From 55c242f449273da61886552c175c834ef1c69790 Mon Sep 17 00:00:00 2001 From: Artur Troian Date: Wed, 20 Jul 2022 15:25:01 -0400 Subject: [PATCH] refactor(_run): reuse kustomize across kube and single examples update ingress-nginx image simplify kind cluster start refs ovrclk/engineering#381 refs ovrclk/engineering#356 refs ovrclk/engineering#413 Signed-off-by: Artur Troian --- .env | 4 +- .envrc | 4 +- .github/workflows/k8s-integration.yaml | 10 +- ...cker.yaml => .goreleaser-docker-amd64.yaml | 19 +- .goreleaser-docker-arm64.yaml | 34 + Makefile | 19 + _docs/kustomize/akash-node/kustomization.yaml | 12 +- .../cluster_role.yaml | 0 .../deployment.yaml | 10 +- .../ingress.yaml | 0 .../kustomization.yaml | 9 +- .../rbac.yaml | 0 .../service.yaml | 0 .../service_account.yaml | 0 .../cluster_role.yaml | 64 ++ .../akash-operator-inventory/deployment.yaml | 46 + .../kustomization.yaml | 10 + .../rbac.yaml | 0 .../role-binding.yaml | 17 + .../akash-operator-inventory/service.yaml | 21 + .../service_account.yaml | 11 + .../cluster_role.yaml | 0 .../deployment.yaml | 0 .../ingress.yaml | 0 .../kustomization.yaml | 10 +- _docs/kustomize/akash-operator-ip/rbac.yaml | 38 + .../service.yaml | 0 .../service_account.yaml | 0 .../kustomize/akash-provider/deployment.yaml | 32 +- _docs/kustomize/akash-provider/ingress.yaml | 3 +- .../akash-provider/kustomization.yaml | 15 +- _docs/kustomize/akash-provider/run.sh | 11 +- .../akash-services/kustomization.yaml | 5 +- _docs/kustomize/kind/kind-metrics-server.yaml | 2 + .../templates/akash-node}/docker-image.yaml | 0 .../templates}/akash-node/gateway-host.yaml | 0 .../templates}/akash-node/kustomization.yaml | 13 +- .../docker-image.yaml | 2 +- .../kustomization.yaml | 8 +- .../docker-image.yaml | 2 +- .../kustomization.yaml | 13 + .../akash-operator-ip}/docker-image.yaml | 0 .../akash-operator-ip/kustomization.yaml | 12 + .../akash-provider/docker-image.yaml | 6 + .../akash-provider/gateway-host.yaml | 0 .../akash-provider/kustomization.yaml | 11 +- _docs/provider/kube/metallb-service.yaml | 14 +- _run/common-base.mk | 5 +- _run/common-commands.mk | 36 +- _run/common-helm.mk | 28 + _run/common-kind.mk | 193 +++-- _run/common-kustomize.mk | 82 ++ _run/common-minikube.mk | 2 +- _run/common.mk | 76 +- _run/ingress-nginx-class.yaml | 8 - _run/ingress-nginx.yaml | 784 +++++++++--------- _run/kind-config-metal-lb-ip.yaml | 12 + _run/kube/.envrc | 2 +- _run/kube/Makefile | 35 +- _run/kube/README.md | 36 +- _run/lite/.envrc | 2 +- _run/metallb.yaml | 452 ++++++++++ _run/minikube/.envrc | 2 +- _run/single/.envrc | 2 +- _run/single/.gitignore | 2 - _run/single/Makefile | 67 +- _run/single/README.md | 34 +- .../kustomization.yaml | 12 - _run/single/kustomize/akash-node/.gitignore | 1 - .../kustomize/akash-provider/.gitignore | 1 - .../hostname_operator_client.go | 2 +- cluster/operatorclients/ip_operator_client.go | 7 +- cluster/util/ip_sharing_key.go | 2 +- cmd/provider-services/cmd/root.go | 4 +- go.mod | 4 +- go.sum | 12 +- integration/e2e_test.go | 25 +- integration/escrow_monitor_test.go | 2 + integration/persistentstorage_test.go | 4 +- integration/test_helpers.go | 2 + make/init.mk | 22 +- make/releasing.mk | 4 +- make/setup-cache.mk | 26 +- make/test-integration.mk | 14 +- manifest/manager.go | 107 +-- operator/waiter/waiter.go | 3 +- script/setup-kind.sh | 132 ++- .../deployment/deployment-v2-ip-endpoint.yaml | 2 +- version/version.go | 147 ++++ 89 files changed, 1944 insertions(+), 938 deletions(-) rename .goreleaser-docker.yaml => .goreleaser-docker-amd64.yaml (60%) create mode 100644 .goreleaser-docker-arm64.yaml rename _docs/kustomize/{akash-hostname-operator => akash-operator-hostname}/cluster_role.yaml (100%) rename _docs/kustomize/{akash-hostname-operator => akash-operator-hostname}/deployment.yaml (90%) rename _docs/kustomize/{akash-hostname-operator => akash-operator-hostname}/ingress.yaml (100%) rename _docs/kustomize/{akash-hostname-operator => akash-operator-hostname}/kustomization.yaml (88%) rename _docs/kustomize/{akash-hostname-operator => akash-operator-hostname}/rbac.yaml (100%) rename _docs/kustomize/{akash-hostname-operator => akash-operator-hostname}/service.yaml (100%) rename _docs/kustomize/{akash-hostname-operator => akash-operator-hostname}/service_account.yaml (100%) create mode 100644 _docs/kustomize/akash-operator-inventory/cluster_role.yaml create mode 100644 _docs/kustomize/akash-operator-inventory/deployment.yaml create mode 100644 _docs/kustomize/akash-operator-inventory/kustomization.yaml rename _docs/kustomize/{akash-ip-operator => akash-operator-inventory}/rbac.yaml (100%) create mode 100644 _docs/kustomize/akash-operator-inventory/role-binding.yaml create mode 100644 _docs/kustomize/akash-operator-inventory/service.yaml create mode 100644 _docs/kustomize/akash-operator-inventory/service_account.yaml rename _docs/kustomize/{akash-ip-operator => akash-operator-ip}/cluster_role.yaml (100%) rename _docs/kustomize/{akash-ip-operator => akash-operator-ip}/deployment.yaml (100%) rename _docs/kustomize/{akash-ip-operator => akash-operator-ip}/ingress.yaml (100%) rename _docs/kustomize/{akash-ip-operator => akash-operator-ip}/kustomization.yaml (88%) create mode 100644 _docs/kustomize/akash-operator-ip/rbac.yaml rename _docs/kustomize/{akash-ip-operator => akash-operator-ip}/service.yaml (100%) rename _docs/kustomize/{akash-ip-operator => akash-operator-ip}/service_account.yaml (100%) rename {_run/single/kustomize/akash-hostname-operator => _docs/kustomize/templates/akash-node}/docker-image.yaml (100%) rename {_run/single/kustomize => _docs/kustomize/templates}/akash-node/gateway-host.yaml (100%) rename {_run/single/kustomize => _docs/kustomize/templates}/akash-node/kustomization.yaml (86%) rename {_run/single/kustomize/akash-provider => _docs/kustomize/templates/akash-operator-hostname}/docker-image.yaml (55%) rename {_run/kube/kustomize/akash-hostname-operator => _docs/kustomize/templates/akash-operator-hostname}/kustomization.yaml (57%) rename {_run/kube/kustomize/akash-hostname-operator => _docs/kustomize/templates/akash-operator-inventory}/docker-image.yaml (52%) create mode 100644 _docs/kustomize/templates/akash-operator-inventory/kustomization.yaml rename {_run/single/kustomize/akash-node => _docs/kustomize/templates/akash-operator-ip}/docker-image.yaml (100%) create mode 100644 _docs/kustomize/templates/akash-operator-ip/kustomization.yaml create mode 100644 _docs/kustomize/templates/akash-provider/docker-image.yaml rename {_run/single/kustomize => _docs/kustomize/templates}/akash-provider/gateway-host.yaml (100%) rename {_run/single/kustomize => _docs/kustomize/templates}/akash-provider/kustomization.yaml (87%) create mode 100644 _run/common-helm.mk create mode 100644 _run/common-kustomize.mk delete mode 100644 _run/ingress-nginx-class.yaml create mode 100644 _run/kind-config-metal-lb-ip.yaml create mode 100644 _run/metallb.yaml delete mode 100644 _run/single/.gitignore delete mode 100644 _run/single/kustomize/akash-hostname-operator/kustomization.yaml delete mode 100644 _run/single/kustomize/akash-node/.gitignore delete mode 100644 _run/single/kustomize/akash-provider/.gitignore create mode 100644 version/version.go diff --git a/.env b/.env index 2c5beeae4..668195459 100644 --- a/.env +++ b/.env @@ -1,6 +1,6 @@ GOLANG_VERSION=1.18.3 -KIND_VERSION=0.11.1 -KINDEST_VERSION=v1.21.1 +KIND_VERSION=0.14.0 +KINDEST_VERSION=v1.22.2 GORELEASER_VERSION=v1.6.3 GO111MODULE=on ROOT_DIR=${AP_ROOT} diff --git a/.envrc b/.envrc index 5dc788613..5922b4b54 100644 --- a/.envrc +++ b/.envrc @@ -3,11 +3,11 @@ export AP_ROOT dotenv +make cache + PATH_add "$AP_DEVCACHE_NODE_BIN" PATH_add "$AP_DEVCACHE_BIN" -make cache - PROVIDER_SERVICES=$AP_DEVCACHE_BIN/provider-services AKASH=$AP_DEVCACHE_BIN/akash diff --git a/.github/workflows/k8s-integration.yaml b/.github/workflows/k8s-integration.yaml index d9847255f..e28208675 100644 --- a/.github/workflows/k8s-integration.yaml +++ b/.github/workflows/k8s-integration.yaml @@ -15,12 +15,14 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: c-py/action-dotenv-to-setenv@v3 - with: - env-file: .env + - uses: HatsuneMiku3939/direnv-action@v1 - uses: actions/setup-go@v3 with: go-version: "${{ env.GOLANG_VERSION }}" + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Prepare images + run: make -s -C _run/kube kind-prepare-images - uses: engineerd/setup-kind@v0.5.0 with: version: "v${{ env.KIND_VERSION }}" @@ -29,7 +31,7 @@ jobs: - name: Docker Status run: docker ps -a - name: Setup Ingress K8S - run: make -s -C _run/kube kind-ingress-setup + run: KUSTOMIZE_INSTALLS=akash-operator-inventory make -s -C _run/kube kind-cluster-setup-e2e-ci - name: k8s-ingress run: make -s -C _run/kube kind-k8s-ip - name: Kube Environment diff --git a/.goreleaser-docker.yaml b/.goreleaser-docker-amd64.yaml similarity index 60% rename from .goreleaser-docker.yaml rename to .goreleaser-docker-amd64.yaml index 89cb81a74..1da4cfe53 100644 --- a/.goreleaser-docker.yaml +++ b/.goreleaser-docker-amd64.yaml @@ -7,7 +7,6 @@ builds: main: ./cmd/provider-services goarch: - amd64 - - arm64 goos: - linux flags: @@ -17,12 +16,11 @@ builds: ldflags: - "{{ .Env.BUILD_VARS }}" - "{{ .Env.STRIP_FLAGS }}" - dockers: - dockerfile: Dockerfile use: buildx - goarch: amd64 goos: linux + goarch: amd64 build_flag_templates: - --platform=linux/amd64 - --label=org.opencontainers.image.title={{ .ProjectName }} @@ -34,18 +32,3 @@ dockers: - --label=org.opencontainers.image.revision={{ .FullCommit }} image_templates: - 'ghcr.io/ovrclk/{{ .ProjectName }}:latest-amd64' - - dockerfile: Dockerfile - use: buildx - goarch: arm64 - goos: linux - build_flag_templates: - - --platform=linux/arm64 - - --label=org.opencontainers.image.title={{ .ProjectName }} - - --label=org.opencontainers.image.description={{ .ProjectName }} - - --label=org.opencontainers.image.url={{.GitURL}} - - --label=org.opencontainers.image.source={{.GitURL}} - - --label=org.opencontainers.image.version={{ .Version }} - - --label=org.opencontainers.image.created={{ time "2006-01-02T15:04:05Z07:00" }} - - --label=org.opencontainers.image.revision={{ .FullCommit }} - image_templates: - - 'ghcr.io/ovrclk/{{ .ProjectName }}:latest-arm64' diff --git a/.goreleaser-docker-arm64.yaml b/.goreleaser-docker-arm64.yaml new file mode 100644 index 000000000..f44beea0d --- /dev/null +++ b/.goreleaser-docker-arm64.yaml @@ -0,0 +1,34 @@ +env: + - GO111MODULE=on + - DOCKER_CLI_EXPERIMENTAL="enabled" +builds: + - id: provider-services-linux + binary: provider-services + main: ./cmd/provider-services + goarch: + - arm64 + goos: + - linux + flags: + - "-mod={{ .Env.MOD }}" + - "-tags={{ .Env.BUILD_TAGS }}" + - -trimpath + ldflags: + - "{{ .Env.BUILD_VARS }}" + - "{{ .Env.STRIP_FLAGS }}" +dockers: + - dockerfile: Dockerfile + use: buildx + goos: linux + goarch: arm64 + build_flag_templates: + - --platform=linux/arm64 + - --label=org.opencontainers.image.title={{ .ProjectName }} + - --label=org.opencontainers.image.description={{ .ProjectName }} + - --label=org.opencontainers.image.url={{.GitURL}} + - --label=org.opencontainers.image.source={{.GitURL}} + - --label=org.opencontainers.image.version={{ .Version }} + - --label=org.opencontainers.image.created={{ time "2006-01-02T15:04:05Z07:00" }} + - --label=org.opencontainers.image.revision={{ .FullCommit }} + image_templates: + - 'ghcr.io/ovrclk/{{ .ProjectName }}:latest-arm64' diff --git a/Makefile b/Makefile index d46268508..50ad8cdff 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,25 @@ GO_MOD ?= readonly BUILD_TAGS ?= osusergo,netgo,static_build GORELEASER_STRIP_FLAGS ?= +GORELEASER_BUILD_VARS := \ +-X github.com/ovrclk/provider-services/version.Name=provider-services \ +-X github.com/ovrclk/provider-services/version.AppName=provider-services \ +-X github.com/ovrclk/provider-services/version.BuildTags=\"$(BUILD_TAGS)\" \ +-X github.com/ovrclk/provider-services/version.Version=$(RELEASE_TAG) \ +-X github.com/ovrclk/provider-services/version.Commit=$(GIT_HEAD_COMMIT_LONG) + +ldflags = -linkmode=$(GO_LINKMODE) -X github.com/ovrclk/provider-services/version.Name=provider-services \ +-X github.com/ovrclk/provider-services/version.AppName=provider-services \ +-X github.com/ovrclk/provider-services/version.BuildTags="$(BUILD_TAGS)" \ +-X github.com/ovrclk/provider-services/version.Version=$(shell git describe --tags | sed 's/^v//') \ +-X github.com/ovrclk/provider-services/version.Commit=$(GIT_HEAD_COMMIT_LONG) + +# check for nostrip option +ifeq (,$(findstring nostrip,$(BUILD_OPTIONS))) + ldflags += -s -w + GORELEASER_STRIP_FLAGS += -s -w +endif + ldflags += $(LDFLAGS) ldflags := $(strip $(ldflags)) diff --git a/_docs/kustomize/akash-node/kustomization.yaml b/_docs/kustomize/akash-node/kustomization.yaml index 05004c6da..ea2e1830f 100644 --- a/_docs/kustomize/akash-node/kustomization.yaml +++ b/_docs/kustomize/akash-node/kustomization.yaml @@ -1,3 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: akash-services resources: - deployment.yaml - service.yaml @@ -5,11 +8,7 @@ resources: commonLabels: app: akash-node akash.network/component: akash-node - -namespace: akash-services - configMapGenerator: - - name: akash-boot files: - run.sh @@ -155,15 +154,14 @@ configMapGenerator: ## # required files ## - files: + files: [] # - config/genesis.json - secretGenerator: - name: akash-keys ## # required files ## - files: + files: [] # - config/node_key.json # - config/priv_validator_key.json # - data/priv_validator_state.json diff --git a/_docs/kustomize/akash-hostname-operator/cluster_role.yaml b/_docs/kustomize/akash-operator-hostname/cluster_role.yaml similarity index 100% rename from _docs/kustomize/akash-hostname-operator/cluster_role.yaml rename to _docs/kustomize/akash-operator-hostname/cluster_role.yaml diff --git a/_docs/kustomize/akash-hostname-operator/deployment.yaml b/_docs/kustomize/akash-operator-hostname/deployment.yaml similarity index 90% rename from _docs/kustomize/akash-hostname-operator/deployment.yaml rename to _docs/kustomize/akash-operator-hostname/deployment.yaml index a67053aa9..8bc436dd5 100644 --- a/_docs/kustomize/akash-hostname-operator/deployment.yaml +++ b/_docs/kustomize/akash-operator-hostname/deployment.yaml @@ -9,6 +9,7 @@ spec: matchLabels: app: akash-hostname-operator replicas: 1 + revisionHistoryLimit: 1 template: metadata: labels: @@ -60,12 +61,3 @@ spec: configMapKeyRef: name: akash-hostname-operator-config key: event-failure-limit - - volumeMounts: - - name: boot - mountPath: /boot - readOnly: true - volumes: - - name: boot - configMap: - name: akash-hostname-operator-boot diff --git a/_docs/kustomize/akash-hostname-operator/ingress.yaml b/_docs/kustomize/akash-operator-hostname/ingress.yaml similarity index 100% rename from _docs/kustomize/akash-hostname-operator/ingress.yaml rename to _docs/kustomize/akash-operator-hostname/ingress.yaml diff --git a/_docs/kustomize/akash-hostname-operator/kustomization.yaml b/_docs/kustomize/akash-operator-hostname/kustomization.yaml similarity index 88% rename from _docs/kustomize/akash-hostname-operator/kustomization.yaml rename to _docs/kustomize/akash-operator-hostname/kustomization.yaml index 324d79eae..14fb6533a 100644 --- a/_docs/kustomize/akash-hostname-operator/kustomization.yaml +++ b/_docs/kustomize/akash-operator-hostname/kustomization.yaml @@ -1,3 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: akash-services resources: - deployment.yaml - service.yaml @@ -5,13 +8,7 @@ resources: - rbac.yaml - service_account.yaml - cluster_role.yaml - -namespace: akash-services - configMapGenerator: - - name: akash-hostname-operator-boot - files: - - run.sh - name: akash-provider-config literals: - k8s-manifest-ns=lease diff --git a/_docs/kustomize/akash-hostname-operator/rbac.yaml b/_docs/kustomize/akash-operator-hostname/rbac.yaml similarity index 100% rename from _docs/kustomize/akash-hostname-operator/rbac.yaml rename to _docs/kustomize/akash-operator-hostname/rbac.yaml diff --git a/_docs/kustomize/akash-hostname-operator/service.yaml b/_docs/kustomize/akash-operator-hostname/service.yaml similarity index 100% rename from _docs/kustomize/akash-hostname-operator/service.yaml rename to _docs/kustomize/akash-operator-hostname/service.yaml diff --git a/_docs/kustomize/akash-hostname-operator/service_account.yaml b/_docs/kustomize/akash-operator-hostname/service_account.yaml similarity index 100% rename from _docs/kustomize/akash-hostname-operator/service_account.yaml rename to _docs/kustomize/akash-operator-hostname/service_account.yaml diff --git a/_docs/kustomize/akash-operator-inventory/cluster_role.yaml b/_docs/kustomize/akash-operator-inventory/cluster_role.yaml new file mode 100644 index 000000000..fe972d199 --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/cluster_role.yaml @@ -0,0 +1,64 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: inventory-operator + labels: + akash.network: "true" + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator +rules: + - apiGroups: + - '' + resources: + - namespaces + - nodes + - pods + - events + - persistentvolumes + - persistentvolumeclaims + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - pods/exec + verbs: + - create + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - ceph.rook.io + resources: + - cephclusters + - cephblockpools + verbs: + - get + - list + - watch + - apiGroups: + - akash.network + resources: + - inventoryrequests + verbs: + - get + - list + - watch + - apiGroups: + - akash.network + resources: + - inventories + verbs: + - create + - patch + - get + - list + - watch diff --git a/_docs/kustomize/akash-operator-inventory/deployment.yaml b/_docs/kustomize/akash-operator-inventory/deployment.yaml new file mode 100644 index 000000000..061825408 --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/deployment.yaml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: inventory-operator + namespace: akash-services + labels: + akash.network: "true" + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator +spec: + selector: + matchLabels: + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator + replicas: 1 + revisionHistoryLimit: 1 + template: + metadata: + labels: + app: inventory-operator + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator + spec: + serviceAccountName: inventory-operator + containers: + - name: inventory-operator + image: ghcr.io/ovrclk/provider-services + args: + - "provider-services" + - "operator" + - "inventory" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + ports: + - containerPort: 8080 + name: api + protocol: TCP diff --git a/_docs/kustomize/akash-operator-inventory/kustomization.yaml b/_docs/kustomize/akash-operator-inventory/kustomization.yaml new file mode 100644 index 000000000..fd79eef61 --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: akash-services +resources: + - deployment.yaml + - service.yaml + - rbac.yaml + - service_account.yaml + - cluster_role.yaml + - role-binding.yaml diff --git a/_docs/kustomize/akash-ip-operator/rbac.yaml b/_docs/kustomize/akash-operator-inventory/rbac.yaml similarity index 100% rename from _docs/kustomize/akash-ip-operator/rbac.yaml rename to _docs/kustomize/akash-operator-inventory/rbac.yaml diff --git a/_docs/kustomize/akash-operator-inventory/role-binding.yaml b/_docs/kustomize/akash-operator-inventory/role-binding.yaml new file mode 100644 index 000000000..49766396c --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/role-binding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: inventory-operator + labels: + akash.network: "true" + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: inventory-operator +subjects: + - kind: ServiceAccount + name: inventory-operator + namespace: akash-services diff --git a/_docs/kustomize/akash-operator-inventory/service.yaml b/_docs/kustomize/akash-operator-inventory/service.yaml new file mode 100644 index 000000000..97926187f --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + akash.network: "true" + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator + name: inventory-operator + namespace: akash-services +spec: + type: ClusterIP + ports: + - name: api + port: 8080 + targetPort: api + appProtocol: http + selector: + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator diff --git a/_docs/kustomize/akash-operator-inventory/service_account.yaml b/_docs/kustomize/akash-operator-inventory/service_account.yaml new file mode 100644 index 000000000..9be2d98bc --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/service_account.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: inventory-operator + namespace: akash-services + labels: + akash.network: "true" + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator +automountServiceAccountToken: true diff --git a/_docs/kustomize/akash-ip-operator/cluster_role.yaml b/_docs/kustomize/akash-operator-ip/cluster_role.yaml similarity index 100% rename from _docs/kustomize/akash-ip-operator/cluster_role.yaml rename to _docs/kustomize/akash-operator-ip/cluster_role.yaml diff --git a/_docs/kustomize/akash-ip-operator/deployment.yaml b/_docs/kustomize/akash-operator-ip/deployment.yaml similarity index 100% rename from _docs/kustomize/akash-ip-operator/deployment.yaml rename to _docs/kustomize/akash-operator-ip/deployment.yaml diff --git a/_docs/kustomize/akash-ip-operator/ingress.yaml b/_docs/kustomize/akash-operator-ip/ingress.yaml similarity index 100% rename from _docs/kustomize/akash-ip-operator/ingress.yaml rename to _docs/kustomize/akash-operator-ip/ingress.yaml diff --git a/_docs/kustomize/akash-ip-operator/kustomization.yaml b/_docs/kustomize/akash-operator-ip/kustomization.yaml similarity index 88% rename from _docs/kustomize/akash-ip-operator/kustomization.yaml rename to _docs/kustomize/akash-operator-ip/kustomization.yaml index b26655808..88844482e 100644 --- a/_docs/kustomize/akash-ip-operator/kustomization.yaml +++ b/_docs/kustomize/akash-operator-ip/kustomization.yaml @@ -1,6 +1,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1 - kind: Kustomization - +kind: Kustomization +namespace: akash-services resources: - deployment.yaml - service.yaml @@ -8,13 +8,7 @@ resources: - rbac.yaml - service_account.yaml - cluster_role.yaml - -namespace: akash-services - configMapGenerator: - - name: akash-ip-operator-boot - files: - - run.sh - name: akash-provider-config literals: - k8s-manifest-ns=lease diff --git a/_docs/kustomize/akash-operator-ip/rbac.yaml b/_docs/kustomize/akash-operator-ip/rbac.yaml new file mode 100644 index 000000000..0ff593515 --- /dev/null +++ b/_docs/kustomize/akash-operator-ip/rbac.yaml @@ -0,0 +1,38 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: akash-ip-operator-manage-service +subjects: + - kind: ServiceAccount + name: akash-ip-operator + namespace: akash-services +roleRef: + kind: ClusterRole + name: akash-ip-op-manage-service + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: akash-ip-operator-watch-providerleasedip +subjects: + - kind: ServiceAccount + name: akash-ip-operator + namespace: akash-services +roleRef: + kind: ClusterRole + name: akash-ip-op-watch-providerleasedip + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: akash-ip-operator-watch-configmaps +subjects: + - kind: ServiceAccount + name: akash-ip-operator + namespace: akash-services +roleRef: + kind: ClusterRole + name: akash-ip-op-watch-configmaps + apiGroup: rbac.authorization.k8s.io diff --git a/_docs/kustomize/akash-ip-operator/service.yaml b/_docs/kustomize/akash-operator-ip/service.yaml similarity index 100% rename from _docs/kustomize/akash-ip-operator/service.yaml rename to _docs/kustomize/akash-operator-ip/service.yaml diff --git a/_docs/kustomize/akash-ip-operator/service_account.yaml b/_docs/kustomize/akash-operator-ip/service_account.yaml similarity index 100% rename from _docs/kustomize/akash-ip-operator/service_account.yaml rename to _docs/kustomize/akash-operator-ip/service_account.yaml diff --git a/_docs/kustomize/akash-provider/deployment.yaml b/_docs/kustomize/akash-provider/deployment.yaml index f035f28fe..483fbfc07 100644 --- a/_docs/kustomize/akash-provider/deployment.yaml +++ b/_docs/kustomize/akash-provider/deployment.yaml @@ -3,6 +3,7 @@ kind: Deployment metadata: name: akash-provider spec: + revisionHistoryLimit: 1 selector: matchLabels: app: akash-provider @@ -31,17 +32,17 @@ spec: - name: AKASH_HOME valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: home - name: AKASH_FROM valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: from - name: AKASH_KEYRING_BACKEND valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: keyring-backend volumeMounts: - name: boot @@ -50,11 +51,14 @@ spec: - name: keys mountPath: /boot-keys readOnly: true + - name: home + mountPath: /home containers: - name: akash-provider image: ghcr.io/ovrclk/provider-services:stable imagePullPolicy: IfNotPresent args: + - provider-services - run - --cluster-k8s env: @@ -66,42 +70,42 @@ spec: - name: AP_HOME valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: home # --from - name: AP_FROM valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: from # --node - name: AP_NODE valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: node # --chain-id - name: AP_CHAIN_ID valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: chain-id # --keyring-backend - name: AP_KEYRING_BACKEND valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: keyring-backend # --trust-node - name: AP_TRUST_NODE valueFrom: configMapKeyRef: - name: akash-client-config + name: akash-provider-client-config key: trust-node ## @@ -361,12 +365,8 @@ spec: - name: gateway containerPort: 8443 volumeMounts: - - name: boot - mountPath: /boot - readOnly: true - - name: keys - mountPath: /boot-keys - readOnly: true + - name: home + mountPath: /home volumes: - name: boot configMap: @@ -374,3 +374,5 @@ spec: - name: keys secret: secretName: akash-provider-keys + - name: home + emptyDir: {} diff --git a/_docs/kustomize/akash-provider/ingress.yaml b/_docs/kustomize/akash-provider/ingress.yaml index d7b13aeeb..5a17e5428 100644 --- a/_docs/kustomize/akash-provider/ingress.yaml +++ b/_docs/kustomize/akash-provider/ingress.yaml @@ -4,13 +4,14 @@ metadata: name: akash-provider annotations: nginx.ingress.kubernetes.io/ssl-passthrough: "true" - nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/backend-protocol: "https" nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" spec: ingressClassName: "akash-ingress-class" rules: - host: akash-provider.localhost + http: paths: - path: / diff --git a/_docs/kustomize/akash-provider/kustomization.yaml b/_docs/kustomize/akash-provider/kustomization.yaml index 9d7e7c0af..cea038f64 100644 --- a/_docs/kustomize/akash-provider/kustomization.yaml +++ b/_docs/kustomize/akash-provider/kustomization.yaml @@ -1,32 +1,31 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: akash-services resources: - rbac.yaml - deployment.yaml - service.yaml - ingress.yaml - service_account.yaml - -namespace: akash-services - commonLabels: app: akash-provider akash.network/component: akash-provider - configMapGenerator: - name: akash-provider-boot files: - run.sh - - name: akash-client-config + - name: akash-provider-client-config literals: - home=/home - - from=main + - from=provider - node=http://akash-node:26657 - chain-id=local - keyring-backend=test - trust-node=true - name: akash-provider-config - literals: + literals: [] ## # override-able defaults ## @@ -63,6 +62,6 @@ secretGenerator: # required values: ## - name: akash-provider-keys - files: + files: [] # - key.txt # - key-pass.txt diff --git a/_docs/kustomize/akash-provider/run.sh b/_docs/kustomize/akash-provider/run.sh index ce4980ea9..6e400f1ba 100644 --- a/_docs/kustomize/akash-provider/run.sh +++ b/_docs/kustomize/akash-provider/run.sh @@ -1,6 +1,6 @@ #!/bin/sh -set -e +set -xe ## # Configuration sanity check @@ -8,14 +8,13 @@ set -e # shellcheck disable=SC2015 [ -f "$AKASH_BOOT_KEYS/key.txt" ] && [ -f "$AKASH_BOOT_KEYS/key-pass.txt" ] || { - echo "Key information not found; AKASH_BOOT_KEYS is not configured properly" - exit 1 + echo "Key information not found; AKASH_BOOT_KEYS is not configured properly" + exit 1 } env | sort ## -# Import key +# Import key. AKASH_FROM contains key name ## -/bin/akash --home="$AKASH_HOME" keys import --keyring-backend="$AKASH_KEYRING_BACKEND" "$AKASH_FROM" \ - "$AKASH_BOOT_KEYS/key.txt" < "$AKASH_BOOT_KEYS/key-pass.txt" +/bin/akash keys import "$AKASH_FROM" "$AKASH_BOOT_KEYS/key.txt" < "$AKASH_BOOT_KEYS/key-pass.txt" diff --git a/_docs/kustomize/akash-services/kustomization.yaml b/_docs/kustomize/akash-services/kustomization.yaml index d8038238d..0b51f85a8 100644 --- a/_docs/kustomize/akash-services/kustomization.yaml +++ b/_docs/kustomize/akash-services/kustomization.yaml @@ -1,4 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: akash-services resources: - network-policies.yaml - -namespace: akash-services diff --git a/_docs/kustomize/kind/kind-metrics-server.yaml b/_docs/kustomize/kind/kind-metrics-server.yaml index e9d8cf673..3ef711467 100644 --- a/_docs/kustomize/kind/kind-metrics-server.yaml +++ b/_docs/kustomize/kind/kind-metrics-server.yaml @@ -119,6 +119,7 @@ metadata: name: metrics-server namespace: kube-system spec: + revisionHistoryLimit: 0 selector: matchLabels: k8s-app: metrics-server @@ -137,6 +138,7 @@ spec: - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --metric-resolution=15s + - --kubelet-insecure-tls image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/_run/single/kustomize/akash-hostname-operator/docker-image.yaml b/_docs/kustomize/templates/akash-node/docker-image.yaml similarity index 100% rename from _run/single/kustomize/akash-hostname-operator/docker-image.yaml rename to _docs/kustomize/templates/akash-node/docker-image.yaml diff --git a/_run/single/kustomize/akash-node/gateway-host.yaml b/_docs/kustomize/templates/akash-node/gateway-host.yaml similarity index 100% rename from _run/single/kustomize/akash-node/gateway-host.yaml rename to _docs/kustomize/templates/akash-node/gateway-host.yaml diff --git a/_run/single/kustomize/akash-node/kustomization.yaml b/_docs/kustomize/templates/akash-node/kustomization.yaml similarity index 86% rename from _run/single/kustomize/akash-node/kustomization.yaml rename to _docs/kustomize/templates/akash-node/kustomization.yaml index 2b91087cd..32178e5f6 100644 --- a/_run/single/kustomize/akash-node/kustomization.yaml +++ b/_docs/kustomize/templates/akash-node/kustomization.yaml @@ -1,17 +1,16 @@ -bases: - # - github.com/ovrclk/akash/_docs/kustomize/akashd?ref=boz/kustomize - - ../../../../_docs/kustomize/akash-node - +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization namespace: akash-services +resources: + - ../../../../../_docs/kustomize/akash-node configMapGenerator: - ## # cosmos-sdk app config (app.toml) overrides ## - name: akash-app-config behavior: merge - literals: + literals: [] # - pruning=syncable ## @@ -19,7 +18,7 @@ configMapGenerator: ## - name: akash-config behavior: merge - literals: + literals: [] # - moniker=node0 - name: akash-data diff --git a/_run/single/kustomize/akash-provider/docker-image.yaml b/_docs/kustomize/templates/akash-operator-hostname/docker-image.yaml similarity index 55% rename from _run/single/kustomize/akash-provider/docker-image.yaml rename to _docs/kustomize/templates/akash-operator-hostname/docker-image.yaml index 66ef73543..8d6693a8c 100644 --- a/_run/single/kustomize/akash-provider/docker-image.yaml +++ b/_docs/kustomize/templates/akash-operator-hostname/docker-image.yaml @@ -1,3 +1,3 @@ - op: replace path: /spec/template/spec/containers/0/image - value: ghcr.io/ovrclk/akash:stable + value: ghcr.io/ovrclk/provider-services:stable diff --git a/_run/kube/kustomize/akash-hostname-operator/kustomization.yaml b/_docs/kustomize/templates/akash-operator-hostname/kustomization.yaml similarity index 57% rename from _run/kube/kustomize/akash-hostname-operator/kustomization.yaml rename to _docs/kustomize/templates/akash-operator-hostname/kustomization.yaml index bd9b5976e..b09c4de6a 100644 --- a/_run/kube/kustomize/akash-hostname-operator/kustomization.yaml +++ b/_docs/kustomize/templates/akash-operator-hostname/kustomization.yaml @@ -1,8 +1,8 @@ -bases: - - ../../../../_docs/kustomize/akash-hostname-operator - +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization namespace: akash-services - +resources: + - ../../../../../_docs/kustomize/akash-operator-hostname patchesJson6902: - path: docker-image.yaml target: diff --git a/_run/kube/kustomize/akash-hostname-operator/docker-image.yaml b/_docs/kustomize/templates/akash-operator-inventory/docker-image.yaml similarity index 52% rename from _run/kube/kustomize/akash-hostname-operator/docker-image.yaml rename to _docs/kustomize/templates/akash-operator-inventory/docker-image.yaml index bd3cf28e3..8d6693a8c 100644 --- a/_run/kube/kustomize/akash-hostname-operator/docker-image.yaml +++ b/_docs/kustomize/templates/akash-operator-inventory/docker-image.yaml @@ -1,3 +1,3 @@ - op: replace path: /spec/template/spec/containers/0/image - value: ghcr.io/ovrclk/provider-services:latest-arm64 + value: ghcr.io/ovrclk/provider-services:stable diff --git a/_docs/kustomize/templates/akash-operator-inventory/kustomization.yaml b/_docs/kustomize/templates/akash-operator-inventory/kustomization.yaml new file mode 100644 index 000000000..df87579dc --- /dev/null +++ b/_docs/kustomize/templates/akash-operator-inventory/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: akash-services +resources: + - ../../../../../_docs/kustomize/akash-operator-inventory + +patchesJson6902: + - path: docker-image.yaml + target: + kind: Deployment + group: apps + name: inventory-operator + version: v1 diff --git a/_run/single/kustomize/akash-node/docker-image.yaml b/_docs/kustomize/templates/akash-operator-ip/docker-image.yaml similarity index 100% rename from _run/single/kustomize/akash-node/docker-image.yaml rename to _docs/kustomize/templates/akash-operator-ip/docker-image.yaml diff --git a/_docs/kustomize/templates/akash-operator-ip/kustomization.yaml b/_docs/kustomize/templates/akash-operator-ip/kustomization.yaml new file mode 100644 index 000000000..eaf18ab54 --- /dev/null +++ b/_docs/kustomize/templates/akash-operator-ip/kustomization.yaml @@ -0,0 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: akash-services +resources: + - ../../../../../_docs/kustomize/akash-operator-ip +patchesJson6902: + - path: docker-image.yaml + target: + kind: Deployment + group: apps + name: akash-ip-operator + version: v1 diff --git a/_docs/kustomize/templates/akash-provider/docker-image.yaml b/_docs/kustomize/templates/akash-provider/docker-image.yaml new file mode 100644 index 000000000..62362f370 --- /dev/null +++ b/_docs/kustomize/templates/akash-provider/docker-image.yaml @@ -0,0 +1,6 @@ +- op: replace + path: /spec/template/spec/initContainers/0/image + value: ghcr.io/ovrclk/akash:stable +- op: replace + path: /spec/template/spec/containers/0/image + value: ghcr.io/ovrclk/provider-services:stable diff --git a/_run/single/kustomize/akash-provider/gateway-host.yaml b/_docs/kustomize/templates/akash-provider/gateway-host.yaml similarity index 100% rename from _run/single/kustomize/akash-provider/gateway-host.yaml rename to _docs/kustomize/templates/akash-provider/gateway-host.yaml diff --git a/_run/single/kustomize/akash-provider/kustomization.yaml b/_docs/kustomize/templates/akash-provider/kustomization.yaml similarity index 87% rename from _run/single/kustomize/akash-provider/kustomization.yaml rename to _docs/kustomize/templates/akash-provider/kustomization.yaml index 7c221dc35..93304009b 100644 --- a/_run/single/kustomize/akash-provider/kustomization.yaml +++ b/_docs/kustomize/templates/akash-provider/kustomization.yaml @@ -1,15 +1,14 @@ -bases: - # - github.com/ovrclk/akash/_docs/kustomize/akash-provider?ref=boz/kustomize - - ../../../../_docs/kustomize/akash-provider - +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization namespace: akash-services - +resources: + - ../../../../../_docs/kustomize/akash-provider configMapGenerator: ## # cosmos-sdk client (config.toml) options ## - - name: akash-client-config + - name: akash-provider-client-config behavior: merge literals: - node=http://akash-node:26657 diff --git a/_docs/provider/kube/metallb-service.yaml b/_docs/provider/kube/metallb-service.yaml index 44b7eaa6e..0bd0a804e 100644 --- a/_docs/provider/kube/metallb-service.yaml +++ b/_docs/provider/kube/metallb-service.yaml @@ -21,15 +21,15 @@ metadata: namespace: metallb-system spec: podSelector: - matchLabels: + matchLabels: app: metallb component: controller policyTypes: - - Ingress + - Ingress ingress: - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: akash-services - podSelector: - matchLabels: {} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: akash-services + podSelector: + matchLabels: {} diff --git a/_run/common-base.mk b/_run/common-base.mk index 9cafb3255..e01d27830 100644 --- a/_run/common-base.mk +++ b/_run/common-base.mk @@ -1,8 +1,11 @@ include $(abspath $(dir $(lastword $(MAKEFILE_LIST)))/../make/init.mk) AP_RUN_NAME := $(notdir $(CURDIR)) -AKASH_HOME ?= $(DEVCACHE_RUN)/$(AP_RUN_NAME) +AP_RUN_DIR := $(DEVCACHE_RUN)/$(AP_RUN_NAME) +AKASH_HOME ?= $(DEVCACHE_RUN)/$(AP_RUN_NAME)/.akash .PHONY: bins bins: +ifneq ($(SKIP_BUILD), true) make -C $(AP_ROOT) bins +endif diff --git a/_run/common-commands.mk b/_run/common-commands.mk index 20938431f..edce2f48d 100644 --- a/_run/common-commands.mk +++ b/_run/common-commands.mk @@ -46,33 +46,31 @@ multisig-send: .PHONY: provider-create provider-create: - $(AKASH) tx provider create "$(PROVIDER_CONFIG_PATH)" \ - --from "$(PROVIDER_KEY_NAME)" + $(AKASH) tx provider create "$(PROVIDER_CONFIG_PATH)" --from "$(PROVIDER_KEY_NAME)" .PHONY: provider-update provider-update: - $(AKASH) tx provider update "$(PROVIDER_CONFIG_PATH)" \ - --from "$(PROVIDER_KEY_NAME)" + $(AKASH) tx provider update "$(PROVIDER_CONFIG_PATH)" --from "$(PROVIDER_KEY_NAME)" .PHONY: provider-status provider-status: - $(PROVIDER_SERVICES) provider status $(PROVIDER_ADDRESS) + $(PROVIDER_SERVICES) status $(PROVIDER_ADDRESS) .PHONY: authenticate authenticate: - $(PROVIDER_SERVICES) provider authenticate \ + $(PROVIDER_SERVICES) authenticate \ --from "$(KEY_ADDRESS)" \ --provider "$(PROVIDER_ADDRESS)" .PHONY: auth-server auth-server: - $(PROVIDER_SERVICES) provider auth-server \ + $(PROVIDER_SERVICES) auth-server \ --from "$(PROVIDER_KEY_NAME)" \ --jwt-auth-listen-address "$(JWT_AUTH_HOST)" \ .PHONY: run-resource-server run-resource-server: - $(PROVIDER_SERVICES) provider run-resource-server \ + $(PROVIDER_SERVICES) run-resource-server \ --from "$(PROVIDER_KEY_NAME)" \ --resource-server-listen-address "$(RESOURCE_SERVER_HOST)" \ --loki-gateway-listen-address localhost:3100 \ @@ -274,7 +272,7 @@ events-run: .PHONY: provider-lease-logs provider-lease-logs: - $(AKASH) provider lease-logs \ + $(PROVIDER_SERVICES) lease-logs \ -f \ --service="$(LEASE_SERVICES)" \ --dseq "$(DSEQ)" \ @@ -283,8 +281,26 @@ provider-lease-logs: .PHONY: provider-lease-events provider-lease-events: - $(AKASH) provider lease-events \ + $(PROVIDER_SERVICES) lease-events \ -f \ --dseq "$(DSEQ)" \ --from "$(KEY_NAME)" \ --provider "$(PROVIDER_ADDRESS)" + +PHONY: provider-lease-status +provider-lease-status: + $(PROVIDER_SERVICES) lease-status \ + --dseq "$(DSEQ)" \ + --gseq "$(GSEQ)" \ + --oseq "$(OSEQ)" \ + --from "$(KEY_NAME)" \ + --provider "$(PROVIDER_ADDRESS)" + +.PHONY: provider-service-status +provider-service-status: + $(PROVIDER_SERVICES) lease-status \ + --dseq "$(DSEQ)" \ + --gseq "$(GSEQ)" \ + --oseq "$(OSEQ)" \ + --from "$(KEY_NAME)" \ + --provider "$(PROVIDER_ADDRESS)" diff --git a/_run/common-helm.mk b/_run/common-helm.mk new file mode 100644 index 000000000..a5516a2f5 --- /dev/null +++ b/_run/common-helm.mk @@ -0,0 +1,28 @@ +HELM_CHARTS ?= + +LOKI_VERSION ?= 2.9.1 +PROMTAIL_VERSION ?= 3.11.0 +GRAFANA_VERSION ?= 6.21.2 + +.PHONY: kind-install-helm-charts +kind-install-helm-charts: $(patsubst %, kind-install-helm-chart-%,$(HELM_CHARTS)) + +# Create a kubernetes cluster with multi-tenant loki, promtail and grafana integrated for logging. +# See: https://www.scaleway.com/en/docs/tutorials/manage-k8s-logging-loki/ for more info. +.PHONY: kind-install-helm-chart-loki +kind-install-helm-chart-loki: + helm repo add grafana https://grafana.github.io/helm-charts + helm repo update + helm upgrade --install loki grafana/loki \ + --version $(LOKI_VERSION) \ + --create-namespace \ + --namespace loki-stack \ + --set persistence.enabled=true,persistence.size=10Gi,config.auth_enabled=true + helm upgrade --install promtail grafana/promtail \ + --version $(PROMTAIL_VERSION) \ + --namespace loki-stack \ + -f ../promtail-values.yaml + helm upgrade --install grafana grafana/grafana \ + --version $(GRAFANA_VERSION) \ + --namespace loki-stack \ + --set persistence.enabled=true,persistence.type=pvc,persistence.size=10Gi diff --git a/_run/common-kind.mk b/_run/common-kind.mk index 70ad2340b..dccf2fa27 100644 --- a/_run/common-kind.mk +++ b/_run/common-kind.mk @@ -1,41 +1,61 @@ # KIND_NAME NOTE: 'kind' string literal is default for the GH actions # KinD, it's fine to use other names locally, however in GH container name -# is configured by engineered/setup-kind. `kind-control-plane` is the docker +# is configured by engineerd/setup-kind. `kind-control-plane` is the docker # image's name in GH Actions. export KIND_NAME ?= $(shell basename $$PWD) +KIND_CREATE := $(AP_RUN_DIR)/.kind-create + KINDEST_VERSION ?= v1.22.2 KIND_IMG ?= kindest/node:$(KINDEST_VERSION) K8S_CONTEXT ?= $(shell kubectl config current-context) -KIND_HTTP_PORT ?= $(shell docker inspect \ + +KIND_HTTP_PORT = $(shell docker inspect \ --type container "$(KIND_NAME)-control-plane" \ --format '{{index .NetworkSettings.Ports "80/tcp" 0 "HostPort"}}') -KIND_HTTP_IP ?= $(shell docker inspect \ +KIND_HTTP_IP = $(shell docker inspect \ --type container "$(KIND_NAME)-control-plane" \ --format '{{index .NetworkSettings.Ports "80/tcp" 0 "HostIp"}}') -KIND_K8S_IP ?= $(shell docker inspect \ +KIND_K8S_IP = $(shell docker inspect \ --type container "$(KIND_NAME)-control-plane" \ --format '{{index .NetworkSettings.Ports "6443/tcp" 0 "HostIp"}}') -KIND_PORT_BINDINGS ?= $(shell docker inspect "$(KIND_NAME)-control-plane" \ - --format '{{index .NetworkSettings.Ports "80/tcp" 0 "HostPort"}}') +# KIND_PORT_BINDINGS deliberately redirects stderr to /dev/null +# in order to suppress message Error: No such object: kube-control-plane +# during cluster setup +# it is a bit doggy way but seems to do the job +KIND_PORT_BINDINGS = $(shell docker inspect "$(KIND_NAME)-control-plane" \ + --format '{{index .NetworkSettings.Ports "80/tcp" 0 "HostPort"}}' 2> /dev/null) + +SETUP_KIND := "$(AP_ROOT)/script/setup-kind.sh" +KIND_CONFIG ?= default +KIND_CONFIG_FILE := $(shell "$(SETUP_KIND)" config-file $$PWD $(KIND_CONFIG)) + +include ../common-kustomize.mk -KIND_CONFIG ?= kind-config.yaml -KIND_CONFIG_CALICO ?= ../kind-config-calico.yaml +# certain targets need to use bash +# detect where bash is installed +# use akash-node-ready target as example +BASH_PATH := $(shell which bash) + +INGRESS_CONFIG_PATH ?= ../ingress-nginx.yaml +CALICO_MANIFEST ?= https://docs.projectcalico.org/v3.8/manifests/calico.yaml AKASH_DOCKER_IMAGE ?= ghcr.io/ovrclk/akash:latest-$(UNAME_ARCH) DOCKER_IMAGE ?= ghcr.io/ovrclk/provider-services:latest-$(UNAME_ARCH) PROVIDER_HOSTNAME ?= localhost -PROVIDER_HOST ?= $(PROVIDER_HOSTNAME):$(KIND_HTTP_PORT) -PROVIDER_ENDPOINT ?= http://$(PROVIDER_HOST) +PROVIDER_HOST = $(PROVIDER_HOSTNAME):$(KIND_HTTP_PORT) +PROVIDER_ENDPOINT = http://$(PROVIDER_HOST) -INGRESS_CONFIG_PATH ?= ../ingress-nginx.yaml -INGRESS_CLASS_CONFIG_PATH ?= ../ingress-nginx-class.yaml -CALICO_MANIFEST ?= https://docs.projectcalico.org/v3.8/manifests/calico.yaml +METALLB_CONFIG_PATH ?= ../metallb.yaml +METALLB_IP_CONFIG_PATH ?= ../kind-config-metal-lb-ip.yaml +METALLB_SERVICE_PATH ?= ../../_docs/provider/kube/metallb-service.yaml + +KIND_ROLLOUT_TIMEOUT ?= 90 .PHONY: app-http-port app-http-port: @@ -47,91 +67,110 @@ kind-k8s-ip: .PHONY: kind-prepare-images kind-prepare-images: +ifneq ($(SKIP_BUILD), true) make -C $(AP_ROOT) docker-image ifeq ($(AKASH_SRC_IS_LOCAL), true) make -C $(AKASH_LOCAL_PATH) docker-image else docker pull $(AKASH_DOCKER_IMAGE) endif - -.PHONY: kind-configure-image -kind-configure-image: -ifeq ($(KIND_NAME), single) - echo "- op: replace\n path: /spec/template/spec/containers/0/image\n value: $(AKASH_DOCKER_IMAGE)" > ./kustomize/akash-node/docker-image.yaml - echo "- op: replace\n path: /spec/template/spec/containers/0/image\n value: $(DOCKER_IMAGE)" > ./kustomize/akash-provider/docker-image.yaml endif - echo "- op: replace\n path: /spec/template/spec/containers/0/image\n value: $(DOCKER_IMAGE)" > ./kustomize/akash-hostname-operator/docker-image.yaml -.PHONY: kind-upload-image -kind-upload-image: $(KIND) +.PHONY: kind-upload-images +kind-upload-images: $(KIND) ifeq ($(KIND_NAME), single) - $(KIND) --name "$(KIND_NAME)" load docker-image "${AKASH_DOCKER_IMAGE}" + $(KIND) --name "$(KIND_NAME)" load docker-image "$(AKASH_DOCKER_IMAGE)" endif - $(KIND) --name "$(KIND_NAME)" load docker-image "${DOCKER_IMAGE}" + $(KIND) --name "$(KIND_NAME)" load docker-image "$(DOCKER_IMAGE)" .PHONY: kind-port-bindings kind-port-bindings: $(KIND) @echo $(KIND_PORT_BINDINGS) -.PHONY: kind-cluster-create -kind-cluster-create: $(KIND) - $(KIND) create cluster \ - --config "$(KIND_CONFIG)" \ - --name "$(KIND_NAME)" \ - --image "$(KIND_IMG)" - kubectl label nodes $(KIND_NAME)-control-plane akash.network/role=ingress - kubectl apply -f "$(INGRESS_CONFIG_PATH)" - kubectl apply -f "$(INGRESS_CLASS_CONFIG_PATH)" - "$(AP_ROOT)/script/setup-kind.sh" - -.PHONY: kind-install-operators -kind-install-operators: - kubectl apply -f "$(ROOT_DIR)/" - -# Create a kubernetes cluster with multi-tenant loki, promtail and grafana integrated for logging. -# See: https://www.scaleway.com/en/docs/tutorials/manage-k8s-logging-loki/ for more info. -.PHONY: kind-cluster-loki-create -kind-cluster-loki-create: kind-cluster-create - helm repo add grafana https://grafana.github.io/helm-charts - helm repo update - helm upgrade --install loki grafana/loki \ - --version 2.9.1 \ - --create-namespace \ - --namespace loki-stack \ - --set persistence.enabled=true,persistence.size=10Gi,config.auth_enabled=true - helm upgrade --install promtail grafana/promtail \ - --version 3.11.0 \ - --namespace loki-stack \ - -f ../promtail-values.yaml - helm upgrade --install grafana grafana/grafana \ - --version 6.21.2 \ - --namespace loki-stack \ - --set persistence.enabled=true,persistence.type=pvc,persistence.size=10Gi - -.PHONY: kind-cluster-calico-create -kind-cluster-calico-create: $(KIND) - $(KIND) create cluster \ - --config "$(KIND_CONFIG_CALICO)" \ - --name "$(KIND_NAME)" \ - --image "$(KIND_IMG)" +.PHONY: kind-cluster-setup +kind-cluster-setup: init \ + kind-prepare-images \ + kind-cluster-create \ + kind-setup-ingress \ + kind-upload-images \ + kustomize-init \ + kustomize-deploy-services \ + kind-deployments-rollout \ + kind-setup-$(KIND_NAME) + +.PHONY: kind-cluster-setup-e2e +kind-cluster-setup-e2e: kind-cluster-create kind-cluster-setup-e2e-ci + +.PHONY: kind-cluster-setup-e2e-ci +kind-cluster-setup-e2eci: +kind-cluster-setup-e2e-ci: \ + kind-setup-ingress \ + kind-upload-images \ + kustomize-init \ + kustomize-deploy-services \ + kind-deployments-rollout + +$(KIND_CREATE): $(KIND) $(AP_RUN_DIR) + $(KIND) create cluster --config "$(KIND_CONFIG_FILE)" --name "$(KIND_NAME)" --image "$(KIND_IMG)" + touch $@ + +.INTERMEDIATE: kind-cluster-create +kind-cluster-create: $(KIND_CREATE) + +.PHONY: kind-setup-ingress +kind-setup-ingress: kind-setup-ingress-$(KIND_CONFIG) + +.PHONY: kind-setup-ingress-calico +kind-setup-ingress-calico: kubectl apply -f "$(CALICO_MANIFEST)" - kubectl -n kube-system set env daemonset/calico-node FELIX_IGNORELOOSERPF=true # Calico needs to be managing networking before finishing setup kubectl apply -f "$(INGRESS_CONFIG_PATH)" - $(AP_ROOT)/script/setup-kind.sh calico-metrics - -.PHONY: kind-ingress-setup -kind-ingress-setup: + kubectl rollout status deployment -n ingress-nginx ingress-nginx-controller --timeout=$(KIND_ROLLOUT_TIMEOUT)s + kubectl apply -f "$(METALLB_CONFIG_PATH)" + kubectl apply -f "$(METALLB_IP_CONFIG_PATH)" + kubectl apply -f "$(METALLB_SERVICE_PATH)" + "$(SETUP_KIND)" calico-metrics + +.PHONY: kind-setup-ingress-default +kind-setup-ingress-default: kubectl label nodes $(KIND_NAME)-control-plane akash.network/role=ingress kubectl apply -f "$(INGRESS_CONFIG_PATH)" - kubectl apply -f "$(INGRESS_CLASS_CONFIG_PATH)" - "$(AP_ROOT)/script/setup-kind.sh" - + kubectl rollout status deployment -n ingress-nginx ingress-nginx-controller --timeout=$(KIND_ROLLOUT_TIMEOUT)s + kubectl apply -f "$(METALLB_CONFIG_PATH)" + kubectl apply -f "$(METALLB_IP_CONFIG_PATH)" + kubectl apply -f "$(METALLB_SERVICE_PATH)" + "$(SETUP_KIND)" .PHONY: kind-cluster-delete kind-cluster-delete: $(KIND) $(KIND) delete cluster --name "$(KIND_NAME)" - -.PHONY: kind-cluster-clean -kind-cluster-clean: - kubectl delete ns -l akash.network + rm -rf $(KIND_CREATE) + +.PHONY: kind-status-ingress-% +kind-status-ingress-%: + kubectl rollout status -n akash-services ingress $* --timeout=$(KIND_ROLLOUT_TIMEOUT)s + +.PHONY: kind-deployment-rollout-% +kind-deployment-rollout-%: + kubectl -n akash-services rollout status deployment $* --timeout=$(KIND_ROLLOUT_TIMEOUT)s + kubectl -n akash-services wait pods -l akash.network/component=$* --for condition=Ready --timeout=$(KIND_ROLLOUT_TIMEOUT)s + +.PHONY: akash-node-ready +akash-node-ready: SHELL=$(BASH_PATH) +akash-node-ready: + @( \ + max_retry=15; \ + counter=0; \ + while [[ counter -lt max_retry ]]; do \ + read block < <(curl -s $(AKASH_NODE)/status | jq -r '.result.sync_info.latest_block_height' 2> /dev/null); \ + if [[ $$? -ne 0 || $$block -lt 1 ]]; then \ + echo "unable to get node status. sleep for 1s"; \ + ((counter++)); \ + sleep 1; \ + else \ + echo "latest block height: $${block}"; \ + exit 0; \ + fi \ + done; \ + exit 1 \ + ) diff --git a/_run/common-kustomize.mk b/_run/common-kustomize.mk new file mode 100644 index 000000000..fdd85eaca --- /dev/null +++ b/_run/common-kustomize.mk @@ -0,0 +1,82 @@ +KUSTOMIZE_ROOT ?= $(AP_ROOT)/_docs/kustomize +KUSTOMIZE_DIR := $(DEVCACHE_RUN)/$(KIND_NAME)/kustomize +KUSTOMIZE_PROVIDER := $(KUSTOMIZE_DIR)/akash-provider +KUSTOMIZE_AKASH := $(KUSTOMIZE_DIR)/akash-node +KUSTOMIZE_OPERATOR_HOSTNAME := $(KUSTOMIZE_DIR)/akash-operator-hostname +KUSTOMIZE_OPERATOR_INVENTORY := $(KUSTOMIZE_DIR)/akash-operator-inventory +KUSTOMIZE_OPERATOR_IP := $(KUSTOMIZE_DIR)/akash-operator-ip + +CLIENT_EXPORT_PASSWORD ?= 12345678 + +$(KUSTOMIZE_DIR): + mkdir -p $(KUSTOMIZE_DIR) + +.PHONY: kustomize-init +kustomize-init: $(KUSTOMIZE_DIR) kustomize-templates kustomize-set-images kustomize-configure-services + +#### Kustomize init templates +.PHONY: kustomize-templates +kustomize-templates: $(patsubst %, kustomize-template-%,$(KUSTOMIZE_INSTALLS)) + +.PHONY: kustomize-template-% +kustomize-template-%: + cp -r $(ROOT_DIR)/_docs/kustomize/templates/$* $(KUSTOMIZE_DIR)/ + + +#### Kustomize configure images +.PHONY: kustomize-set-images +kustomize-set-images: $(patsubst %, kustomize-set-image-%,$(KUSTOMIZE_INSTALLS)) + +.PHONY: kustomize-set-image-akash-node +kustomize-set-image-akash-node: + echo "- op: replace\n path: /spec/template/spec/containers/0/image\n value: $(AKASH_DOCKER_IMAGE)" > $(KUSTOMIZE_DIR)/akash-node/docker-image.yaml + +.PHONY: kustomize-set-image-akash-provider +kustomize-set-image-akash-provider: + echo "- op: replace\n path: /spec/template/spec/initContainers/0/image\n value: $(AKASH_DOCKER_IMAGE)" > $(KUSTOMIZE_DIR)/akash-provider/docker-image.yaml + echo "- op: replace\n path: /spec/template/spec/containers/0/image\n value: $(DOCKER_IMAGE)" >> $(KUSTOMIZE_DIR)/akash-provider/docker-image.yaml + +.PHONY: kustomize-set-image-akash-operator-% +kustomize-set-image-akash-operator-%: + echo "- op: replace\n path: /spec/template/spec/containers/0/image\n value: $(DOCKER_IMAGE)" > "$(KUSTOMIZE_DIR)/akash-operator-$*/docker-image.yaml" + +#### Kustomize configurations +.PHONY: kustomize-configure-services +kustomize-configure-services: $(patsubst %, kustomize-configure-%,$(KUSTOMIZE_INSTALLS)) + +.PHONY: kustomize-configure-akash-node +kustomize-configure-akash-node: + mkdir -p "$(KUSTOMIZE_AKASH)/cache" + cp -r "$(AKASH_HOME)/"* "$(KUSTOMIZE_AKASH)/cache/" + +.PHONY: kustomize-configure-akash-provider +kustomize-configure-akash-provider: + mkdir -p "$(KUSTOMIZE_PROVIDER)/cache" + cp -r "$(AKASH_HOME)/config" "$(KUSTOMIZE_PROVIDER)/cache/" + echo "$(CLIENT_EXPORT_PASSWORD)" > "$(KUSTOMIZE_PROVIDER)/cache/key-pass.txt" + cat "$(AKASH_HOME)/$(PROVIDER_ADDRESS).pem" > "$(KUSTOMIZE_PROVIDER)/cache/provider-cert.pem" + ( \ + cat "$(KUSTOMIZE_PROVIDER)/cache/key-pass.txt" ; \ + cat "$(KUSTOMIZE_PROVIDER)/cache/key-pass.txt" \ + ) | $(AKASH) keys export provider 1> "$(KUSTOMIZE_PROVIDER)/cache/key.txt" + +.PHONY: kustomize-configure-akash-operator-hostname +kustomize-configure-akash-operator-hostname: + +.PHONY: kustomize-configure-akash-operator-ip +kustomize-init-akash-operator-ip: + +.PHONY: kustomize-configure-akash-operator-inventory +kustomize-init-configure-operator-inventory: + +#### Kustomize installations +.PHONY: kustomize-deploy-services +kustomize-deploy-services: $(patsubst %, kustomize-deploy-%,$(KUSTOMIZE_INSTALLS)) + +.PHONY: kustomize-deploy-% +kustomize-deploy-%: + kubectl kustomize $(KUSTOMIZE_DIR)/$* | kubectl apply -f- + +.PHONY: clean-kustomize +clean-kustomize: + rm -rf $(KUSTOMIZE_DIR) diff --git a/_run/common-minikube.mk b/_run/common-minikube.mk index d0407d9b1..57652a01e 100644 --- a/_run/common-minikube.mk +++ b/_run/common-minikube.mk @@ -5,7 +5,7 @@ MINIKUBE_IP = $(shell minikube ip) MINIKUBE_INVOKE = VM_DRIVER=$(MINIKUBE_VM_DRIVER) ROOK_PATH=$(AP_ROOT)/_docs/rook/test $(AP_ROOT)/script/setup-minikube.sh .PHONY: minikube-cluster-create -minikube-cluster-create: init-dirs +minikube-cluster-create: $(MINIKUBE_INVOKE) up $(MINIKUBE_INVOKE) akash-setup kubectl apply -f ../ingress-nginx-class.yaml diff --git a/_run/common.mk b/_run/common.mk index bdcef78a5..34f7388bc 100644 --- a/_run/common.mk +++ b/_run/common.mk @@ -6,6 +6,8 @@ null := space := $(null) # comma := , +SKIP_BUILD ?= false + ifndef AKASH_HOME $(error AKASH_HOME is not set) endif @@ -16,16 +18,18 @@ export AKASH_CHAIN_ID = local export AKASH_YES = true export AKASH_GAS_PRICES = 0.025uakt export AKASH_GAS = auto +export AKASH_NODE = http://akash.localhost:$(KIND_PORT_BINDINGS) -export AP_KEYRING_BACKEND = test -export AP_GAS_ADJUSTMENT = 2 -export AP_CHAIN_ID = local -export AP_YES = true -export AP_GAS_PRICES = 0.025uakt -export AP_GAS = auto +export AP_HOME = $(AKASH_HOME) +export AP_KEYRING_BACKEND = $(AKASH_KEYRING_BACKEND) +export AP_GAS_ADJUSTMENT = $(AKASH_GAS_ADJUSTMENT) +export AP_CHAIN_ID = $(AKASH_CHAIN_ID) +export AP_YES = $(AKASH_YES) +export AP_GAS_PRICES = $(AKASH_GAS_PRICES) +export AP_GAS = $(AKASH_GAS) +export AP_NODE = $(AKASH_NODE) -AKASH := $(AKASH) --home $(AKASH_HOME) -PROVIDER_SERVICES := $(PROVIDER_SERVICES) --home $(AKASH_HOME) +AKASH_INIT := $(AP_RUN_DIR)/.akash-init KEY_OPTS := --keyring-backend=$(AKASH_KEYRING_BACKEND) GENESIS_PATH := $(AKASH_HOME)/config/genesis.json @@ -45,37 +49,44 @@ GENESIS_ACCOUNTS := $(KEY_NAMES) $(MULTISIG_KEY) CLIENT_CERTS := main validator other SERVER_CERTS := provider - .PHONY: init +init: bins akash-init + + +$(AP_RUN_DIR): + mkdir -p $@ + +$(AKASH_HOME): + mkdir -p $@ -init: bins client-init node-init +$(AKASH_INIT): $(AKASH_HOME) client-init node-init + touch $@ -.PHONY: client-init -client-init: init-dirs client-init-keys +.INTERMEDIATE: akash-init +akash-init: $(AKASH_INIT) -.PHONY: init-dirs -init-dirs: - mkdir -p "$(AKASH_HOME)" +.INTERMEDIATE: client-init +client-init: client-init-keys -.PHONY: client-init-keys -client-init-keys: $(patsubst %,client-init-key-%,$(KEY_NAMES)) client-init-multisig-key +.INTERMEDIATE: client-init-keys +client-init-keys: $(patsubst %,client-init-key-%,$(KEY_NAMES)) client-init-key-multisig -.PHONY: client-init-key-% +.INTERMEDIATE: $(patsubst %,client-init-key-%,$(KEY_NAMES)) client-init-key-%: $(AKASH) keys add "$(@:client-init-key-%=%)" -.PHONY: client-init-multisig-key -client-init-multisig-key: +.INTERMEDIATE: client-init-key-multisig +client-init-key-multisig: $(AKASH) keys add \ "$(MULTISIG_KEY)" \ --multisig "$(subst $(space),$(comma),$(strip $(MULTISIG_SIGNERS)))" \ --multisig-threshold 2 -.PHONY: node-init +.INTERMEDIATE: node-init node-init: node-init-genesis node-init-genesis-accounts node-init-genesis-certs node-init-gentx node-init-finalize -.PHONY: node-init-genesis -node-init-genesis: init-dirs +.INTERMEDIATE: node-init-genesis +node-init-genesis: $(AKASH) init node0 cp "$(GENESIS_PATH)" "$(GENESIS_PATH).orig" cat "$(GENESIS_PATH).orig" | \ @@ -85,36 +96,35 @@ node-init-genesis: init-dirs jq -rM '(..|objects|select(has("mint_denom"))).mint_denom |= "$(CHAIN_TOKEN_DENOM)"' > \ "$(GENESIS_PATH)" -.PHONY: node-init-genesis-certs +.INTERMEDIATE: node-init-genesis-certs node-init-genesis-certs: $(patsubst %,node-init-genesis-client-cert-%,$(CLIENT_CERTS)) $(patsubst %,node-init-genesis-server-cert-%,$(SERVER_CERTS)) -.PHONY: node-init-genesis-client-cert-% +.INTERMEDIATE: $(patsubst %,node-init-genesis-client-cert-%,$(CLIENT_CERTS)) node-init-genesis-client-cert-%: $(AKASH) tx cert generate client --from=$* $(AKASH) tx cert publish client --to-genesis=true --from=$* -.PHONY: node-init-genesis-server-cert-% +.INTERMEDIATE: $(patsubst %,node-init-genesis-server-cert-%,$(SERVER_CERTS)) node-init-genesis-server-cert-%: $(AKASH) tx cert generate server localhost akash-provider.localhost --from=$* $(AKASH) tx cert publish server --to-genesis=true --from=$* -.PHONY: node-init-genesis-accounts +.INTERMEDIATE: node-init-genesis-accounts node-init-genesis-accounts: $(patsubst %,node-init-genesis-account-%,$(GENESIS_ACCOUNTS)) $(AKASH) validate-genesis -.PHONY: node-init-genesis-account-% +.INTERMEDIATE: $(patsubst %,node-init-genesis-account-%,$(GENESIS_ACCOUNTS)) node-init-genesis-account-%: $(AKASH) add-genesis-account \ "$(shell $(AKASH) $(KEY_OPTS) keys show "$(@:node-init-genesis-account-%=%)" -a)" \ "$(CHAIN_MIN_DEPOSIT)$(CHAIN_TOKEN_DENOM)" -.PHONY: node-init-gentx +.INTERMEDIATE: node-init-gentx node-init-gentx: AKASH_GAS='' AKASH_GAS_PRICES='' node-init-gentx: - $(AKASH) gentx validator \ - "$(CHAIN_VALIDATOR_DELEGATE)$(CHAIN_TOKEN_DENOM)" + $(AKASH) gentx validator "$(CHAIN_VALIDATOR_DELEGATE)$(CHAIN_TOKEN_DENOM)" -.PHONY: node-init-finalize +.INTERMEDIATE: node-init-finalize node-init-finalize: $(AKASH) collect-gentxs $(AKASH) validate-genesis @@ -133,7 +143,7 @@ rest-server-run: .PHONY: clean clean: clean-$(AP_RUN_NAME) - rm -rf "$(AKASH_HOME)" + rm -rf "$(DEVCACHE_RUN)/$(AP_RUN_NAME)" .PHONY: rosetta-run rosetta-run: diff --git a/_run/ingress-nginx-class.yaml b/_run/ingress-nginx-class.yaml deleted file mode 100644 index 94756324c..000000000 --- a/_run/ingress-nginx-class.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: "networking.k8s.io/v1" -kind: "IngressClass" -metadata: - name: "akash-ingress-class" - labels: - akash.network: "true" -spec: - controller: "k8s.io/ingress-nginx" diff --git a/_run/ingress-nginx.yaml b/_run/ingress-nginx.yaml index cb892f56f..0e1a2fc3a 100644 --- a/_run/ingress-nginx.yaml +++ b/_run/ingress-nginx.yaml @@ -1,87 +1,67 @@ apiVersion: v1 kind: Namespace metadata: - name: ingress-nginx labels: - app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + name: ingress-nginx --- -# Source: ingress-nginx/templates/controller-serviceaccount.yaml apiVersion: v1 +automountServiceAccountToken: true kind: ServiceAccount metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 name: ingress-nginx namespace: ingress-nginx -automountServiceAccountToken: true --- -# Source: ingress-nginx/templates/controller-configmap.yaml apiVersion: v1 -kind: ConfigMap +kind: ServiceAccount metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/component: admission-webhook app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: controller - name: ingress-nginx-controller - namespace: ingress-nginx ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - helm.sh/chart: ingress-nginx-4.0.1 app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: controller - name: ingress-nginx-tcp + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission namespace: ingress-nginx -data: - proxy-body-size: 10m - 8443: "default/akash-provider:8443" --- -# Source: ingress-nginx/templates/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole +kind: Role metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/component: controller app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 name: ingress-nginx + namespace: ingress-nginx rules: - apiGroups: - - '' + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" resources: - configmaps - - endpoints - - nodes - pods - secrets + - endpoints verbs: + - get - list - watch - apiGroups: - - '' - resources: - - nodes - verbs: - - get - - apiGroups: - - '' + - "" resources: - services verbs: @@ -96,13 +76,6 @@ rules: - get - list - watch - - apiGroups: - - '' - resources: - - events - verbs: - - create - - patch - apiGroups: - networking.k8s.io resources: @@ -117,60 +90,101 @@ rules: - get - list - watch + - apiGroups: + - "" + resourceNames: + - ingress-controller-leader + resources: + - configmaps + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - ingress-controller-leader + resources: + - leases + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch --- -# Source: ingress-nginx/templates/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: Role metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/component: admission-webhook app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - name: ingress-nginx -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: ingress-nginx -subjects: - - kind: ServiceAccount - name: ingress-nginx - namespace: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create --- -# Source: ingress-nginx/templates/controller-role.yaml apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: controller + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 name: ingress-nginx - namespace: ingress-nginx rules: - apiGroups: - - '' + - "" resources: + - configmaps + - endpoints + - nodes + - pods + - secrets - namespaces verbs: - - get + - list + - watch - apiGroups: - - '' + - coordination.k8s.io resources: - - configmaps - - pods - - secrets - - endpoints + - leases verbs: - - get - list - watch - apiGroups: - - '' + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" resources: - services verbs: @@ -185,6 +199,13 @@ rules: - get - list - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch - apiGroups: - networking.k8s.io resources: @@ -199,40 +220,35 @@ rules: - get - list - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission +rules: - apiGroups: - - '' + - admissionregistration.k8s.io resources: - - configmaps - resourceNames: - - ingress-controller-leader + - validatingwebhookconfigurations verbs: - get - update - - apiGroups: - - '' - resources: - - configmaps - verbs: - - create - - apiGroups: - - '' - resources: - - events - verbs: - - create - - patch --- -# Source: ingress-nginx/templates/controller-rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -244,120 +260,170 @@ subjects: name: ingress-nginx namespace: ingress-nginx --- -# Source: ingress-nginx/templates/controller-service-webhook.yaml -apiVersion: v1 -kind: Service +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/component: admission-webhook app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: controller - name: ingress-nginx-controller-admission + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission namespace: ingress-nginx -spec: - type: ClusterIP - ports: - - name: https-webhook - port: 443 - targetPort: webhook - appProtocol: https - selector: +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: v1 +data: + allow-snippet-annotations: "true" +kind: ConfigMap +metadata: + labels: app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-controller + namespace: ingress-nginx --- -# Source: ingress-nginx/templates/controller-service.yaml apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 name: ingress-nginx-controller namespace: ingress-nginx spec: - type: NodePort ports: - - name: http + - appProtocol: http + name: http port: 80 protocol: TCP targetPort: http - appProtocol: http - - name: https + - appProtocol: https + name: https port: 443 protocol: TCP targetPort: https - appProtocol: https - - name: provider-status - port: 8443 - protocol: TCP selector: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: NodePort --- -# Source: ingress-nginx/templates/controller-deployment.yaml -apiVersion: apps/v1 -kind: Deployment +apiVersion: v1 +kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller - name: ingress-nginx-controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-controller-admission namespace: ingress-nginx spec: + ports: + - appProtocol: https + name: https-webhook + port: 443 + targetPort: webhook selector: - matchLabels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/component: controller - revisionHistoryLimit: 10 + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: minReadySeconds: 0 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate template: metadata: labels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx spec: - dnsPolicy: ClusterFirst containers: - - name: controller - image: k8s.gcr.io/ingress-nginx/controller:v1.0.0@sha256:0851b34f69f69352bf168e6ccf30e1e20714a264ab1ecd1933e4d8c0fc3215c6 - imagePullPolicy: IfNotPresent - lifecycle: - preStop: - exec: - command: - - /wait-shutdown - args: + - args: - /nginx-ingress-controller - --election-id=ingress-controller-leader - --controller-class=k8s.io/ingress-nginx - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller - - --tcp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-tcp - - --validating-webhook=:7443 + - --validating-webhook=:8443 - --validating-webhook-certificate=/usr/local/certificates/cert - --validating-webhook-key=/usr/local/certificates/key - securityContext: - capabilities: - drop: - - ALL - add: - - NET_BIND_SERVICE - runAsUser: 101 - allowPrivilegeEscalation: true + - --publish-status-address=localhost + - --enable-ssl-passthrough env: - name: POD_NAME valueFrom: @@ -369,6 +435,13 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so + image: registry.k8s.io/ingress-nginx/controller:v1.3.0@sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown livenessProbe: failureThreshold: 5 httpGet: @@ -379,6 +452,19 @@ spec: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 + name: controller + ports: + - containerPort: 80 + hostPort: 80 + name: http + protocol: TCP + - containerPort: 443 + hostPort: 443 + name: https + protocol: TCP + - containerPort: 8443 + name: webhook + protocol: TCP readinessProbe: failureThreshold: 3 httpGet: @@ -389,243 +475,64 @@ spec: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - ports: - - name: http - containerPort: 80 - protocol: TCP - hostPort: 80 - - name: https - containerPort: 443 - protocol: TCP - hostPort: 443 - - name: webhook - containerPort: 7443 - protocol: TCP - - name: provider-status - containerPort: 8443 - hostPort: 8443 - protocol: TCP - volumeMounts: - - name: webhook-cert - mountPath: /usr/local/certificates/ - readOnly: true resources: requests: cpu: 100m memory: 90Mi + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true + dnsPolicy: ClusterFirst nodeSelector: + ingress-ready: "true" kubernetes.io/os: linux - akash.network/role: ingress serviceAccountName: ingress-nginx - terminationGracePeriodSeconds: 300 + terminationGracePeriodSeconds: 0 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Equal volumes: - name: webhook-cert secret: secretName: ingress-nginx-admission --- -# Source: ingress-nginx/templates/controller-ingressclass.yaml -# We don't support namespaced ingressClass yet -# So a ClusterRole and a ClusterRoleBinding is required -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: controller - name: nginx - namespace: ingress-nginx -spec: - controller: k8s.io/ingress-nginx ---- -# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml -# before changing this value, check the required kubernetes version -# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: admission-webhook - name: ingress-nginx-admission -webhooks: - - name: validate.nginx.ingress.kubernetes.io - matchPolicy: Equivalent - rules: - - apiGroups: - - networking.k8s.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - ingresses - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - clientConfig: - service: - namespace: ingress-nginx - name: ingress-nginx-controller-admission - path: /networking/v1/ingresses ---- -# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: ingress-nginx-admission - namespace: ingress-nginx - annotations: - helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: admission-webhook ---- -# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: ingress-nginx-admission - annotations: - helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: admission-webhook -rules: - - apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - update ---- -# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +apiVersion: batch/v1 +kind: Job metadata: - name: ingress-nginx-admission - annotations: - helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: ingress-nginx-admission -subjects: - - kind: ServiceAccount - name: ingress-nginx-admission - namespace: ingress-nginx ---- -# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: ingress-nginx-admission - namespace: ingress-nginx - annotations: - helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: admission-webhook -rules: - - apiGroups: - - '' - resources: - - secrets - verbs: - - get - - create ---- -# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: ingress-nginx-admission - namespace: ingress-nginx - annotations: - helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - labels: - helm.sh/chart: ingress-nginx-4.0.1 app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: admission-webhook -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: ingress-nginx-admission -subjects: - - kind: ServiceAccount - name: ingress-nginx-admission - namespace: ingress-nginx ---- -# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml -apiVersion: batch/v1 -kind: Job -metadata: + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 name: ingress-nginx-admission-create namespace: ingress-nginx - annotations: - helm.sh/hook: pre-install,pre-upgrade - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: admission-webhook spec: template: metadata: - name: ingress-nginx-admission-create labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission-create spec: containers: - - name: create - image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068 - imagePullPolicy: IfNotPresent - args: + - args: - create - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc - --namespace=$(POD_NAMESPACE) @@ -635,47 +542,44 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - restartPolicy: OnFailure - serviceAccountName: ingress-nginx-admission + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660 + imagePullPolicy: IfNotPresent + name: create + securityContext: + allowPrivilegeEscalation: false nodeSelector: kubernetes.io/os: linux + restartPolicy: OnFailure securityContext: + fsGroup: 2000 runAsNonRoot: true runAsUser: 2000 + serviceAccountName: ingress-nginx-admission --- -# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml apiVersion: batch/v1 kind: Job metadata: - name: ingress-nginx-admission-patch - namespace: ingress-nginx - annotations: - helm.sh/hook: post-install,post-upgrade - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission-patch + namespace: ingress-nginx spec: template: metadata: - name: ingress-nginx-admission-patch labels: - helm.sh/chart: ingress-nginx-4.0.1 - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission-patch spec: containers: - - name: patch - image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068 - imagePullPolicy: IfNotPresent - args: + - args: - patch - --webhook-name=ingress-nginx-admission - --namespace=$(POD_NAMESPACE) @@ -687,10 +591,60 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - restartPolicy: OnFailure - serviceAccountName: ingress-nginx-admission + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660 + imagePullPolicy: IfNotPresent + name: patch + securityContext: + allowPrivilegeEscalation: false nodeSelector: kubernetes.io/os: linux + restartPolicy: OnFailure securityContext: + fsGroup: 2000 runAsNonRoot: true runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.3.0 + name: ingress-nginx-admission +webhooks: + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: ingress-nginx-controller-admission + namespace: ingress-nginx + path: /networking/v1/ingresses + failurePolicy: Fail + matchPolicy: Equivalent + name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: akash-ingress-class + labels: + akash.network: "true" + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +spec: + controller: k8s.io/ingress-nginx diff --git a/_run/kind-config-metal-lb-ip.yaml b/_run/kind-config-metal-lb-ip.yaml new file mode 100644 index 000000000..ba9554a16 --- /dev/null +++ b/_run/kind-config-metal-lb-ip.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - 24.0.0.1-24.0.0.100 diff --git a/_run/kube/.envrc b/_run/kube/.envrc index b3acc6414..7c97c29ad 100644 --- a/_run/kube/.envrc +++ b/_run/kube/.envrc @@ -1,3 +1,3 @@ source_up .envrc -export AKASH_HOME=$DEVCACHE_RUN/kube +export AKASH_HOME=$DEVCACHE_RUN/kube/.akash diff --git a/_run/kube/Makefile b/_run/kube/Makefile index 978939f03..84e8d0925 100644 --- a/_run/kube/Makefile +++ b/_run/kube/Makefile @@ -1,3 +1,9 @@ +KIND_SETUP_PREREQUISITES ?= \ + +KUSTOMIZE_INSTALLS ?= \ + akash-operator-hostname \ + akash-operator-inventory + include ../common.mk include ../common-commands.mk include ../common-kind.mk @@ -21,24 +27,6 @@ provider-run: --bid-price-strategy "randomRange" \ --deployment-runtime-class "none" -.PHONY: provider-lease-status -provider-lease-status: - $(PROVIDER_SERVICES) lease-status \ - --dseq "$(DSEQ)" \ - --gseq "$(GSEQ)" \ - --oseq "$(OSEQ)" \ - --from "$(KEY_NAME)" \ - --provider "$(PROVIDER_ADDRESS)" - -.PHONY: provider-service-status -provider-service-status: - $(PROVIDER_SERVICES) lease-status \ - --dseq "$(DSEQ)" \ - --gseq "$(GSEQ)" \ - --oseq "$(OSEQ)" \ - --from "$(KEY_NAME)" \ - --provider "$(PROVIDER_ADDRESS)" - .PHONY: provider-lease-ping provider-lease-ping: curl -sIH "Host: hello.localhost" localhost:$(KIND_HTTP_PORT) @@ -47,10 +35,11 @@ provider-lease-ping: hostname-operator: $(PROVIDER_SERVICES) hostname-operator -.PHONY: kustomize-install-hostname-operator -kustomize-install-hostname-operator: - kubectl kustomize ./kustomize/akash-hostname-operator | kubectl apply -f - - .PHONY: clean-kube clean-kube: - # noop + +.PHONY: kind-deployments-rollout +kind-deployments-rollout: + +.PHONY: kind-setup-kube +kind-setup-kube: diff --git a/_run/kube/README.md b/_run/kube/README.md index 975eeca37..81caa088c 100644 --- a/_run/kube/README.md +++ b/_run/kube/README.md @@ -21,25 +21,25 @@ The [instructions](#runbook) below will illustrate how to run a network with a s Four keys and accounts are created. The key names are: -|Key Name|Use| -|---|---| -|`main`|Primary account (creating deployments, etc...)| -|`provider`|The provider account (bidding on orders, etc...)| -|`validator`|The sole validator for the created network| -|`other`|Misc. account to (receives tokens, etc...)| +| Key Name | Use | +|-------------|--------------------------------------------------| +| `main` | Primary account (creating deployments, etc...) | +| `provider` | The provider account (bidding on orders, etc...) | +| `validator` | The sole validator for the created network | +| `other` | Misc. account to (receives tokens, etc...) | Most `make` commands are configurable and have defaults to make it such that you don't need to override them for a simple pass-through of this example. -|Name|Default|Description| -|---|---|---| -|`KEY_NAME`|`main`|standard key name| -|`PROVIDER_KEY_NAME`|`provider`|name of key to use for provider| -|`DSEQ`|1|deployment sequence| -|`GSEQ`|1|group sequence| -|`OSEQ`|1|order sequence| -|`PRICE`|10uakt|price to bid| +| Name | Default | Description | +|---------------------|------------|---------------------------------| +| `KEY_NAME` | `main` | standard key name | +| `PROVIDER_KEY_NAME` | `provider` | name of key to use for provider | +| `DSEQ` | 1 | deployment sequence | +| `GSEQ` | 1 | group sequence | +| `OSEQ` | 1 | order sequence | +| `PRICE` | 10uakt | price to bid | # Runbook @@ -71,10 +71,10 @@ The counter on the left side of the messages is regularly in the 120 range. If there may be a problem. -| Option | __t1 Step: 1__ | Explanation | -|---|---|---| -| Map random local port to port 80 of your workload | `make kind-cluster-create` | This is less error-prone, but makes it difficult to access your app through the browser. | -| Map localhost port 80 to workload | `KIND_CONFIG=kind-config-80.yaml make kind-cluster-create` | If anything else is listening on port 80 (any other web server), this method will fail. If it does succeed, you will be able to browse your app from the browser. | +| Option | __t1 Step: 1__ | Explanation | +|---------------------------------------------------|------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Map random local port to port 80 of your workload | `make kind-cluster-setup` | This is less error-prone, but makes it difficult to access your app through the browser. | +| Map localhost port 80 to workload | `KIND_CONFIG=kind-config-80.yaml make kind-cluster-create` | If anything else is listening on port 80 (any other web server), this method will fail. If it does succeed, you will be able to browse your app from the browser. | ## Build Akash binaries and initialize network diff --git a/_run/lite/.envrc b/_run/lite/.envrc index f4dd31459..4ad6fb510 100644 --- a/_run/lite/.envrc +++ b/_run/lite/.envrc @@ -1,3 +1,3 @@ source_up .envrc -export AKASH_HOME=$DEVCACHE_RUN/lite +export AKASH_HOME=$DEVCACHE_RUN/lite/.akash diff --git a/_run/metallb.yaml b/_run/metallb.yaml new file mode 100644 index 000000000..0c4de7438 --- /dev/null +++ b/_run/metallb.yaml @@ -0,0 +1,452 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: metallb-system + labels: + app: metallb + +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +spec: + allowPrivilegeEscalation: false + allowedCapabilities: [] + allowedHostPaths: [] + defaultAddCapabilities: [] + defaultAllowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - secret + - emptyDir +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_RAW + allowedHostPaths: [] + defaultAddCapabilities: [] + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + hostIPC: false + hostNetwork: true + hostPID: false + hostPorts: + - max: 7472 + min: 7472 + - max: 7946 + min: 7946 + privileged: true + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - secret + - emptyDir +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:controller +rules: + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services/status + verbs: + - update + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: + - policy + resourceNames: + - controller + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:speaker +rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - nodes + verbs: + - get + - list + - watch + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: + - policy + resourceNames: + - speaker + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +rules: + - apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +rules: + - apiGroups: + - '' + resources: + - pods + verbs: + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +rules: + - apiGroups: + - '' + resources: + - secrets + verbs: + - create + - apiGroups: + - '' + resources: + - secrets + resourceNames: + - memberlist + verbs: + - list + - apiGroups: + - apps + resources: + - deployments + resourceNames: + - controller + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:controller +subjects: + - kind: ServiceAccount + name: controller + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:speaker +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:speaker +subjects: + - kind: ServiceAccount + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: config-watcher +subjects: + - kind: ServiceAccount + name: controller + - kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-lister +subjects: + - kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: controller +subjects: + - kind: ServiceAccount + name: controller +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: metallb + component: speaker + name: speaker + namespace: metallb-system +spec: + selector: + matchLabels: + app: metallb + component: speaker + template: + metadata: + annotations: + prometheus.io/port: '7472' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: speaker + spec: + containers: + - args: + - --port=7472 + - --config=config + env: + - name: METALLB_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: METALLB_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: METALLB_ML_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + # needed when another software is also using memberlist / port 7946 + # when changing this default you also need to update the container ports definition + # and the PodSecurityPolicy hostPorts definition + #- name: METALLB_ML_BIND_PORT + # value: "7946" + - name: METALLB_ML_LABELS + value: "app=metallb,component=speaker" + - name: METALLB_ML_SECRET_KEY + valueFrom: + secretKeyRef: + name: memberlist + key: secretkey + image: quay.io/metallb/speaker:v0.10.3 + name: speaker + ports: + - containerPort: 7472 + name: monitoring + - containerPort: 7946 + name: memberlist-tcp + - containerPort: 7946 + name: memberlist-udp + protocol: UDP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + readOnlyRootFilesystem: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: speaker + terminationGracePeriodSeconds: 2 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metallb + component: controller + name: controller + namespace: metallb-system +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app: metallb + component: controller + template: + metadata: + annotations: + prometheus.io/port: '7472' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: controller + spec: + containers: + - args: + - --port=7472 + - --config=config + env: + - name: METALLB_ML_SECRET_NAME + value: memberlist + - name: METALLB_DEPLOYMENT + value: controller + image: quay.io/metallb/controller:v0.10.3 + name: controller + ports: + - containerPort: 7472 + name: monitoring + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + runAsUser: 65534 + fsGroup: 65534 + serviceAccountName: controller + terminationGracePeriodSeconds: 0 diff --git a/_run/minikube/.envrc b/_run/minikube/.envrc index 73aa0f886..1186e1db2 100644 --- a/_run/minikube/.envrc +++ b/_run/minikube/.envrc @@ -1,3 +1,3 @@ source_up .envrc -export AKASH_HOME=$DEVCACHE_RUN/minikube +export AKASH_HOME=$DEVCACHE_RUN/minikube/.akash diff --git a/_run/single/.envrc b/_run/single/.envrc index c3c5321fb..e3c6d30ef 100644 --- a/_run/single/.envrc +++ b/_run/single/.envrc @@ -1,3 +1,3 @@ source_up .envrc -export AKASH_HOME=$DEVCACHE_RUN/single +export AKASH_HOME=$DEVCACHE_RUN/single/.akash diff --git a/_run/single/.gitignore b/_run/single/.gitignore deleted file mode 100644 index ae5fcae98..000000000 --- a/_run/single/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -helm -cache diff --git a/_run/single/Makefile b/_run/single/Makefile index 80e5d037c..e4aa8a400 100644 --- a/_run/single/Makefile +++ b/_run/single/Makefile @@ -1,70 +1,29 @@ +KUSTOMIZE_INSTALLS ?= \ + akash-node \ + akash-provider \ + akash-operator-hostname \ + akash-operator-inventory + include ../common.mk include ../common-commands.mk include ../common-kind.mk -KUSTOMIZE_ROOT ?= $(AP_ROOT)/_docs/kustomize -KUSTOMIZE_DIR ?= $(AP_ROOT)/_run/single/kustomize -KUSTOMIZE_PROVIDER_DIR ?= $(KUSTOMIZE_DIR)/akash-provider -KUSTOMIZE_PROVIDER_CACHE ?= $(KUSTOMIZE_PROVIDER_DIR)/cache -KUSTOMIZE_AKASHD_DIR ?= $(KUSTOMIZE_DIR)/akash-node -KUSTOMIZE_AKASHD_CACHE ?= $(KUSTOMIZE_AKASHD_DIR)/cache -CLIENT_EXPORT_PASSWORD ?= 12345678 - PROVIDER_HOSTNAME = akash-provider.localhost -export AP_NODE = tcp://akash.localhost:$(KIND_PORT_BINDINGS) GATEWAY_ENDPOINT ?= https://akash-provider.localhost .PHONY: kind-namespace-setup kind-namespace-setup: kubectl apply -f "$(KUSTOMIZE_ROOT)/networking" -.PHONY: kustomize-init -kustomize-init: kustomize-init-akash-node kustomize-init-provider kind-namespace-setup - -.PHONY: kustomize-init-akash-node -kustomize-init-akash-node: - mkdir -p "$(KUSTOMIZE_AKASHD_CACHE)" - cp -r "$(AKASH_HOME)/"* "$(KUSTOMIZE_AKASHD_CACHE)/" - -.PHONY: kustomize-init-provider -kustomize-init-provider: - mkdir -p "$(KUSTOMIZE_PROVIDER_CACHE)" - cp -r "$(AKASH_HOME)/config" "$(KUSTOMIZE_PROVIDER_CACHE)/" - echo "$(CLIENT_EXPORT_PASSWORD)" > "$(KUSTOMIZE_PROVIDER_CACHE)/key-pass.txt" - cat "$(AKASH_HOME)/$(PROVIDER_ADDRESS).pem" > "$(KUSTOMIZE_PROVIDER_CACHE)/provider-cert.pem" - ( \ - cat "$(KUSTOMIZE_PROVIDER_CACHE)/key-pass.txt" ; \ - cat "$(KUSTOMIZE_PROVIDER_CACHE)/key-pass.txt" \ - ) | $(AKASH) keys export provider 1> "$(KUSTOMIZE_PROVIDER_CACHE)/key.txt" - -.PHONY: kustomize-install-node -kustomize-install-node: - kubectl kustomize kustomize/akash-node | kubectl apply -f- - -.PHONY: kustomize-install-provider -kustomize-install-provider: - kubectl kustomize kustomize/akash-provider | kubectl apply -f- - -.PHONY: kustmoize-install-hostname-operator -kustomize-install-hostname-operator: - kubectl kustomize ./kustomize/akash-hostname-operator | kubectl apply -f - - .PHONY: provider-lease-ping provider-lease-ping: curl -sIH "Host: hello.localhost" localhost:$(KIND_HTTP_PORT) -PHONY: provider-lease-status -provider-lease-status: - $(AKASH) provider lease-status \ - --dseq "$(DSEQ)" \ - --gseq "$(GSEQ)" \ - --oseq "$(OSEQ)" \ - --from "$(KEY_NAME)" \ - --provider "$(PROVIDER_ADDRESS)" +PHONY: clean-single +clean-single: + +.PHONY: kind-deployments-rollout +kind-deployments-rollout: kind-deployment-rollout-akash-node -PHONY: clean-$(AP_RUN_NAME) -clean-$(AP_RUN_NAME): - rm -rf $(CURDIR)/kustomize/akash-node/cache - rm -f $(CURDIR)/kustomize/akash-node/docker-image.yaml - rm -rf $(CURDIR)/kustomize/akash-provider/cache - rm -f $(CURDIR)/kustomize/akash-provider/docker-image.yaml +.PHONY: kind-setup-single +kind-setup-single: akash-node-ready provider-create diff --git a/_run/single/README.md b/_run/single/README.md index 961c65202..fe911c5c5 100644 --- a/_run/single/README.md +++ b/_run/single/README.md @@ -34,25 +34,25 @@ Each command is marked __t1__-__t3__ to indicate a suggested terminal number. Four keys and accounts are created. The key names are: -|Key Name|Use| -|---|---| -|`main`|Primary account (creating deployments, etc...)| -|`provider`|The provider account (bidding on orders, etc...)| -|`validator`|The sole validator for the created network| -|`other`|Misc. account to (receives tokens, etc...)| +| Key Name | Use | +|-------------|--------------------------------------------------| +| `main` | Primary account (creating deployments, etc...) | +| `provider` | The provider account (bidding on orders, etc...) | +| `validator` | The sole validator for the created network | +| `other` | Misc. account to (receives tokens, etc...) | Most `make` commands are configurable and have defaults to make it such that you don't need to override them for a simple pass-through of this example. -|Name|Default|Description| -|---|---|---| -|`KEY_NAME`|`main`|standard key name| -|`PROVIDER_KEY_NAME`|`provider`|name of key to use for provider| -|`DSEQ`|1|deployment sequence| -|`GSEQ`|1|group sequence| -|`OSEQ`|1|order sequence| -|`PRICE`|10uakt|price to bid| +| Name | Default | Description | +|---------------------|------------|---------------------------------| +| `KEY_NAME` | `main` | standard key name | +| `PROVIDER_KEY_NAME` | `provider` | name of key to use for provider | +| `DSEQ` | 1 | deployment sequence | +| `GSEQ` | 1 | group sequence | +| `OSEQ` | 1 | order sequence | +| `PRICE` | 10uakt | price to bid | To get DNS routing to work locally, there are two addresses which will probably need to set to configure requests to hit the kind docker container. To route requests back to the local interface, add the following two lines to your `/etc/hosts` for the Akash-Node and Akash-Provider examples to work correctly. @@ -89,10 +89,10 @@ Pick one of the following commands: __t1__ ```sh # Standard Networking -make kind-cluster-create +make kind-cluster-setup # Calico Network Manger -make kind-cluster-calico-create +KIND_CONFIG=calico make kind-cluster-setup ``` Check all pods in kube-system and ingress-nginx namespaces are in Running state. @@ -108,7 +108,7 @@ cluster created by the `kind` command. This uploads an image from your local doc __t1__ ```sh -DOCKER_IMAGE=ovrclk/akash:mycustomtag make kind-upload-image +DOCKER_IMAGE=ovrclk/akash:mycustomtag make kind-upload-images ``` ### Build Akash binaries and initialize network diff --git a/_run/single/kustomize/akash-hostname-operator/kustomization.yaml b/_run/single/kustomize/akash-hostname-operator/kustomization.yaml deleted file mode 100644 index bd9b5976e..000000000 --- a/_run/single/kustomize/akash-hostname-operator/kustomization.yaml +++ /dev/null @@ -1,12 +0,0 @@ -bases: - - ../../../../_docs/kustomize/akash-hostname-operator - -namespace: akash-services - -patchesJson6902: - - path: docker-image.yaml - target: - kind: Deployment - group: apps - name: akash-hostname-operator - version: v1 diff --git a/_run/single/kustomize/akash-node/.gitignore b/_run/single/kustomize/akash-node/.gitignore deleted file mode 100644 index 14d86ad62..000000000 --- a/_run/single/kustomize/akash-node/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/cache diff --git a/_run/single/kustomize/akash-provider/.gitignore b/_run/single/kustomize/akash-provider/.gitignore deleted file mode 100644 index 14d86ad62..000000000 --- a/_run/single/kustomize/akash-provider/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/cache diff --git a/cluster/operatorclients/hostname_operator_client.go b/cluster/operatorclients/hostname_operator_client.go index 3a0dc4ee3..a8aee647e 100644 --- a/cluster/operatorclients/hostname_operator_client.go +++ b/cluster/operatorclients/hostname_operator_client.go @@ -15,7 +15,7 @@ import ( ) const ( - hostnameOperatorHealthPath = "/health" + hostnameOperatorHealthPath = "health" ) type HostnameOperatorClient interface { diff --git a/cluster/operatorclients/ip_operator_client.go b/cluster/operatorclients/ip_operator_client.go index c69b2d20d..bec7fa02c 100644 --- a/cluster/operatorclients/ip_operator_client.go +++ b/cluster/operatorclients/ip_operator_client.go @@ -12,10 +12,11 @@ import ( "sync" mtypes "github.com/ovrclk/akash/x/market/types/v1beta2" - ipoptypes "github.com/ovrclk/provider-services/operator/ipoperator/types" "github.com/tendermint/tendermint/libs/log" "k8s.io/client-go/rest" + ipoptypes "github.com/ovrclk/provider-services/operator/ipoperator/types" + clusterutil "github.com/ovrclk/provider-services/cluster/util" ) @@ -126,7 +127,7 @@ func (ipoc *ipOperatorClient) newRequest(ctx context.Context, method string, pat } func (ipoc *ipOperatorClient) GetIPAddressStatus(ctx context.Context, orderID mtypes.OrderID) ([]ipoptypes.LeaseIPStatus, error) { - path := fmt.Sprintf("/ip-lease-status/%s/%d/%d/%d", orderID.GetOwner(), orderID.GetDSeq(), orderID.GetGSeq(), orderID.GetOSeq()) + path := fmt.Sprintf("ip-lease-status/%s/%d/%d/%d", orderID.GetOwner(), orderID.GetDSeq(), orderID.GetGSeq(), orderID.GetOSeq()) req, err := ipoc.newRequest(ctx, http.MethodGet, path, nil) if err != nil { return nil, err @@ -159,7 +160,7 @@ func (ipoc *ipOperatorClient) GetIPAddressStatus(ctx context.Context, orderID mt } func (ipoc *ipOperatorClient) GetIPAddressUsage(ctx context.Context) (ipoptypes.IPAddressUsage, error) { - req, err := ipoc.newRequest(ctx, http.MethodGet, "/usage", nil) + req, err := ipoc.newRequest(ctx, http.MethodGet, "usage", nil) if err != nil { return ipoptypes.IPAddressUsage{}, err } diff --git a/cluster/util/ip_sharing_key.go b/cluster/util/ip_sharing_key.go index 995d24492..679a3a2d5 100644 --- a/cluster/util/ip_sharing_key.go +++ b/cluster/util/ip_sharing_key.go @@ -11,7 +11,7 @@ import ( mtypes "github.com/ovrclk/akash/x/market/types/v1beta2" ) -var allowedIPEndpointNameRegex = regexp.MustCompile(`^[a-z0-9\-]+$`) +var allowedIPEndpointNameRegex = regexp.MustCompile(`^[a-z\d\-]+$`) func MakeIPSharingKey(lID mtypes.LeaseID, endpointName string) string { effectiveName := endpointName diff --git a/cmd/provider-services/cmd/root.go b/cmd/provider-services/cmd/root.go index 488e4c5ae..2e2c9dc12 100644 --- a/cmd/provider-services/cmd/root.go +++ b/cmd/provider-services/cmd/root.go @@ -9,6 +9,7 @@ import ( "github.com/ovrclk/provider-services/operator" "github.com/ovrclk/provider-services/operator/hostnameoperator" "github.com/ovrclk/provider-services/operator/ipoperator" + "github.com/ovrclk/provider-services/version" "github.com/cosmos/cosmos-sdk/client/flags" @@ -24,7 +25,7 @@ func NewRootCmd() *cobra.Command { Use: "provider-services", Short: "Provider services commands", SilenceUsage: true, - PersistentPreRunE: acmd.GetPersistentPreRunE(encodingConfig), + PersistentPreRunE: acmd.GetPersistentPreRunE(encodingConfig, []string{"AP"}), } cmd.PersistentFlags().String(flags.FlagNode, "http://localhost:26657", "The node address") @@ -49,6 +50,7 @@ func NewRootCmd() *cobra.Command { cmd.AddCommand(RunResourceServerCmd()) cmd.AddCommand(MigrateEndpointsCmd()) cmd.AddCommand(operator.Cmd()) + cmd.AddCommand(version.NewVersionCommand()) return cmd } diff --git a/go.mod b/go.mod index 8944a56a1..cd606328f 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 - github.com/ovrclk/akash v0.17.0-rc1 + github.com/ovrclk/akash v0.17.0-rc4 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 github.com/rook/rook v1.9.7 @@ -217,7 +217,7 @@ require ( k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect nhooyr.io/websocket v1.8.6 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/kind v0.11.1 + sigs.k8s.io/kind v0.14.0 sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 79ecdb8fe..b854c019a 100644 --- a/go.sum +++ b/go.sum @@ -127,6 +127,7 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -462,7 +463,6 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= @@ -1327,8 +1327,8 @@ github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/ovrclk/akash v0.17.0-rc1 h1:AB1lmIej3FFdHK6qEkHtnIU74CoG3Z64+sZjtKoEt0o= -github.com/ovrclk/akash v0.17.0-rc1/go.mod h1:U2CiUzzVBMd/xrFIwb0OsBsTOqTpI+HXVcLcsT7P4Hs= +github.com/ovrclk/akash v0.17.0-rc4 h1:s3ONB1QQ9MscQPDjtczCzNZaxy+kn1AkZY8SctIkYSI= +github.com/ovrclk/akash v0.17.0-rc4/go.mod h1:U2CiUzzVBMd/xrFIwb0OsBsTOqTpI+HXVcLcsT7P4Hs= github.com/ovrclk/cosmos-sdk v0.45.4-akash.1 h1:ZuBoK0jELPPUtrFZhQWzBRi9LY8oBYTJRuTHt6irLRo= github.com/ovrclk/cosmos-sdk v0.45.4-akash.1/go.mod h1:WOqtDxN3eCCmnYLVla10xG7lEXkFjpTaqm2a2WasgCc= github.com/ovrclk/tendermint v0.34.19-patches h1:4BppAEGV1P+pU3R8pzbriswK03enldY9UqkeWj3NGBg= @@ -1346,7 +1346,6 @@ github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7ir github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -2408,7 +2407,6 @@ k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlm k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= @@ -2490,8 +2488,8 @@ sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8 sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/kind v0.11.1 h1:pVzOkhUwMBrCB0Q/WllQDO3v14Y+o2V0tFgjTqIUjwA= -sigs.k8s.io/kind v0.11.1/go.mod h1:fRpgVhtqAWrtLB9ED7zQahUimpUXuG/iHT88xYqEGIA= +sigs.k8s.io/kind v0.14.0 h1:cNmI3jGBvp7UegEGbC5we8plDtCUmaNRL+bod7JoSCE= +sigs.k8s.io/kind v0.14.0/go.mod h1:UrFRPHG+2a5j0Q7qiR4gtJ4rEyn8TuMQwuOPf+m4oHg= sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= diff --git a/integration/e2e_test.go b/integration/e2e_test.go index d47802197..75b892b6a 100644 --- a/integration/e2e_test.go +++ b/integration/e2e_test.go @@ -1,3 +1,5 @@ +//go:build e2e + package integration import ( @@ -405,19 +407,17 @@ func (s *IntegrationTestSuite) SetupSuite() { func waitForTCPSocket(ctx context.Context, dialer net.Dialer, host string, t *testing.T) { // Wait no more than 30 seconds for the socket to be listening - subctx, cancel := context.WithTimeout(ctx, 30*time.Second) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() for { - if err := subctx.Err(); err != nil { + if err := ctx.Err(); err != nil { t.Fatalf("timed out trying to connect to host %q", host) } // Just test for TCP socket accepting connections, not for an actual functional server - conn, err := dialer.DialContext(subctx, "tcp", host) + conn, err := dialer.DialContext(ctx, "tcp", host) if err != nil { - t.Logf("connecting to %q returned %v", host, err) - if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { t.Fatalf("timed out trying to connect to host %q", host) } @@ -1082,7 +1082,7 @@ func (s *E2EDeploymentUpdate) TestE2ELeaseShell() { var out sdktest.BufferWriter - leaseShellCtx, cancel := context.WithTimeout(context.Background(), time.Minute) + leaseShellCtx, cancel := context.WithTimeout(s.ctx, time.Minute) defer cancel() logged := make(map[string]struct{}) @@ -1484,7 +1484,6 @@ func (s *E2EIPAddress) TestIPAddressLease() { // Wait for lease to show up maxWait := time.After(2 * time.Minute) for { - select { case <-s.ctx.Done(): s.T().Fatal("test context closed before lease is stood up by provider") @@ -1503,7 +1502,11 @@ func (s *E2EIPAddress) TestIPAddressLease() { break } - time.Sleep(time.Second) + select { + case <-s.ctx.Done(): + s.T().Fatal("test context closed before lease is stood up by provider") + case <-time.After(time.Second): + } } time.Sleep(30 * time.Second) // TODO - replace with polling @@ -1518,11 +1521,11 @@ func (s *E2EIPAddress) TestIPAddressLease() { fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()), fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir), ) + require.NoError(s.T(), err) leaseStatusData := gwrest.LeaseStatus{} err = json.Unmarshal(cmdResult.Bytes(), &leaseStatusData) require.NoError(s.T(), err) - s.Require().Len(leaseStatusData.IPs, 1) webService := leaseStatusData.IPs["web"] @@ -1534,7 +1537,6 @@ func (s *E2EIPAddress) TestIPAddressLease() { ipAddr := leasedIP.IP ip := net.ParseIP(ipAddr) s.Assert().NotNilf(ip, "after parsing %q got nil", ipAddr) - } func TestIntegrationTestSuite(t *testing.T) { @@ -1549,8 +1551,7 @@ func TestIntegrationTestSuite(t *testing.T) { suite.Run(t, new(E2EPersistentStorageDeploymentUpdate)) suite.Run(t, new(E2EMigrateHostname)) suite.Run(t, new(E2EJWTServer)) - // fixme engineering#357 - // suite.Run(t, &E2EIPAddress{IntegrationTestSuite{ipMarketplace: true}}) + suite.Run(t, &E2EIPAddress{IntegrationTestSuite{ipMarketplace: true}}) } func (s *IntegrationTestSuite) waitForBlocksCommitted(height int) error { diff --git a/integration/escrow_monitor_test.go b/integration/escrow_monitor_test.go index 5db9de9ec..1d2fe1164 100644 --- a/integration/escrow_monitor_test.go +++ b/integration/escrow_monitor_test.go @@ -1,3 +1,5 @@ +//go:build e2e + package integration import ( diff --git a/integration/persistentstorage_test.go b/integration/persistentstorage_test.go index d60f44d67..45de5fdd0 100644 --- a/integration/persistentstorage_test.go +++ b/integration/persistentstorage_test.go @@ -1,3 +1,5 @@ +//go:build e2e + package integration import ( @@ -236,7 +238,7 @@ func (s *E2EPersistentStorageBeta2) TestDedicatedStorageClass() { s.Require().Equal(testData.String(), string(bodyData)) } -func (s *E2EPersistentStorageDeploymentUpdate) TestDeploymentUpdate() { +func (s *E2EPersistentStorageDeploymentUpdate) TestPersistentStorageDeploymentUpdate() { // create a deployment deploymentPath, err := filepath.Abs("../testdata/deployment/deployment-v2-storage-updateA.yaml") s.Require().NoError(err) diff --git a/integration/test_helpers.go b/integration/test_helpers.go index 2a3e36feb..ce3bb78c3 100644 --- a/integration/test_helpers.go +++ b/integration/test_helpers.go @@ -1,3 +1,5 @@ +//go:build e2e + package integration import ( diff --git a/make/init.mk b/make/init.mk index a3e66def4..dc4cba1d7 100644 --- a/make/init.mk +++ b/make/init.mk @@ -1,3 +1,10 @@ +# expecting GNU make >= 4.0. so comparing major version only +MAKE_VERSION := $(shell make --version | head -1 | cut -d" " -f3 | cut -d"." -f1) + +ifneq (true, $(shell [ $(MAKE_VERSION) -ge 4 ] && echo true)) +$(error "make version is outdated. min required 4.0") +endif + # AP_ROOT may not be set if environment does not support/use direnv # in this case define it manually as well as all required env variables ifndef AP_ROOT @@ -7,7 +14,8 @@ endif UNAME_OS := $(shell uname -s) UNAME_OS_LOWER := $(shell uname -s | tr '[:upper:]' '[:lower:]') -UNAME_ARCH := $(shell uname -m) +# uname reports x86_64. rename to amd64 to make it usable by goreleaser +UNAME_ARCH := $(shell uname -m | sed "s/x86_64/amd64/g") GO_MOD_NAME := $(shell go list -m) @@ -23,7 +31,6 @@ AKASH_SRC_IS_LOCAL := $(shell $(ROOT_DIR)/script/is_local_gomod.sh "gi AKASH_LOCAL_PATH := $(shell $(GO) list -mod=readonly -m -f '{{ .Replace }}' "github.com/ovrclk/akash") AKASH_VERSION := $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/ovrclk/akash | cut -c2-) GRPC_GATEWAY_VERSION := $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/grpc-ecosystem/grpc-gateway) -PROTOC_SWAGGER_GEN_VERSION := $(GRPC_GATEWAY_VERSION) GOLANGCI_LINT_VERSION ?= v1.45.2 GOLANG_VERSION ?= 1.16.1 STATIK_VERSION ?= v0.1.7 @@ -31,6 +38,7 @@ GIT_CHGLOG_VERSION ?= v0.15.1 MODVENDOR_VERSION ?= v0.3.0 MOCKERY_VERSION ?= 2.12.1 K8S_CODE_GEN_VERSION ?= v0.19.3 +KIND_VERSION ?= $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' sigs.k8s.io/kind) # _VERSION_FILE points to the marker file for the installed version. # If _VERSION_FILE is changed, the binary will be re-downloaded. @@ -41,6 +49,7 @@ MOCKERY_VERSION_FILE := $(AP_DEVCACHE_VERSIONS)/mockery/v$(MOCKERY_V K8S_CODE_GEN_VERSION_FILE := $(AP_DEVCACHE_VERSIONS)/k8s-codegen/$(K8S_CODE_GEN_VERSION) GOLANGCI_LINT_VERSION_FILE := $(AP_DEVCACHE_VERSIONS)/golangci-lint/$(GOLANGCI_LINT_VERSION) AKASH_VERSION_FILE := $(AP_DEVCACHE_VERSIONS)/akash/$(AKASH_VERSION) +KIND_VERSION_FILE := $(AP_DEVCACHE_VERSIONS)/kind/$(KIND_VERSION) MODVENDOR = $(AP_DEVCACHE_BIN)/modvendor SWAGGER_COMBINE = $(AP_DEVCACHE_NODE_BIN)/swagger-combine @@ -49,10 +58,13 @@ GIT_CHGLOG := $(AP_DEVCACHE_BIN)/git-chglog MOCKERY := $(AP_DEVCACHE_BIN)/mockery K8S_GENERATE_GROUPS := $(AP_ROOT)/vendor/k8s.io/code-generator/generate-groups.sh K8S_GO_TO_PROTOBUF := $(AP_DEVCACHE_BIN)/go-to-protobuf -KIND := kind NPM := npm GOLANGCI_LINT := $(AP_DEVCACHE_BIN)/golangci-lint +ifeq (0, $(shell which kind &>/dev/null; echo $?)) +KIND := $(shell which kind) +endif + AKASH_BIND_LOCAL ?= # if go.mod contains replace for akash on local filesystem @@ -62,3 +74,7 @@ AKASH_BIND_LOCAL := -v $(AKASH_LOCAL_PATH):$(AKASH_LOCAL_PATH) endif include $(AP_ROOT)/make/setup-cache.mk + +ifeq (, $(KIND)) +KIND := $(shell which kind) +endif diff --git a/make/releasing.mk b/make/releasing.mk index 5f104f8a9..a37070f90 100644 --- a/make/releasing.mk +++ b/make/releasing.mk @@ -30,7 +30,7 @@ install: $(GO) install $(BUILD_FLAGS) ./cmd/provider-services .PHONY: docker-image -docker-image: +docker-image: modvendor docker run \ --rm \ -e STABLE=$(IS_STABLE) \ @@ -42,7 +42,7 @@ docker-image: -v $(shell pwd):/go/src/$(GO_MOD_NAME) \ -w /go/src/$(GO_MOD_NAME) \ $(GORELEASER_IMAGE) \ - -f .goreleaser-docker.yaml \ + -f .goreleaser-docker-$(UNAME_ARCH).yaml \ --debug=$(GORELEASER_DEBUG) \ --rm-dist \ --skip-validate \ diff --git a/make/setup-cache.mk b/make/setup-cache.mk index 89ee20b2c..7f5e0a8bd 100644 --- a/make/setup-cache.mk +++ b/make/setup-cache.mk @@ -1,3 +1,7 @@ +ifeq (, $(AP_DEVCACHE)) +$(error AP_DEVCACHE is not set) +endif + $(AP_DEVCACHE): @echo "creating .cache dir structure..." mkdir -p $@ @@ -6,6 +10,8 @@ $(AP_DEVCACHE): mkdir -p $(AP_DEVCACHE_VERSIONS) mkdir -p $(AP_DEVCACHE_NODE_MODULES) mkdir -p $(AP_DEVCACHE)/run + +.INTERMEDIATE: cache cache: $(AP_DEVCACHE) .PHONY: akash @@ -83,10 +89,16 @@ $(K8S_CODE_GEN_VERSION_FILE): $(AP_DEVCACHE) modvendor $(K8S_GO_TO_PROTOBUF): $(K8S_CODE_GEN_VERSION_FILE) $(K8S_GENERATE_GROUPS): $(K8S_CODE_GEN_VERSION_FILE) -.PHONY: $(KIND) -$(KIND): - @echo "installing kind ..." - $(GO) install sigs.k8s.io/kind +$(KIND_VERSION_FILE): $(AP_DEVCACHE) +ifeq (, $(KIND)) + @echo "installing kind $(KIND_VERSION) ..." + rm -f $(MOCKERY) + GOBIN=$(AP_DEVCACHE_BIN) go install sigs.k8s.io/kind +endif + rm -rf "$(dir $@)" + mkdir -p "$(dir $@)" + touch $@ +$(KIND): $(KIND_VERSION_FILE) $(NPM): ifeq (, $(shell which $(NPM) 2>/dev/null)) @@ -102,11 +114,5 @@ else @echo "swagger-combine already installed; skipping..." endif -$(PROTOC_SWAGGER_GEN): $(AP_DEVCACHE) -ifeq (, $(shell which protoc-gen-swagger 2>/dev/null)) - @echo "installing protoc-gen-swagger $(PROTOC_SWAGGER_GEN_VERSION) ..." - GOBIN=$(AP_DEVCACHE_BIN) $(GO) install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@$(PROTOC_SWAGGER_GEN_VERSION) -endif - cache-clean: rm -rf $(AP_DEVCACHE) diff --git a/make/test-integration.mk b/make/test-integration.mk index cea0152d4..478cac6fe 100644 --- a/make/test-integration.mk +++ b/make/test-integration.mk @@ -11,8 +11,11 @@ INTEGRATION_VARS := TEST_INTEGRATION=true .PHONY: test-e2e-integration test-e2e-integration: - # Assumes cluster created: `make -s -C _run/kube kind-cluster-create` - $(KIND_VARS) $(INTEGRATION_VARS) go test -count=1 -mod=readonly -p 4 -tags "e2e $(BUILD_MAINNET)" -v ./integration/... -run TestIntegrationTestSuite -timeout 1500s + # Assumes cluster created and configured: + # ``` + # KUSTOMIZE_INSTALLS=akash-operator-inventory make kind-cluster-setup-e2e + # ``` + $(KIND_VARS) $(INTEGRATION_VARS) go test -count=1 -mod=readonly -p 4 -tags "e2e" -v ./integration/... -run TestIntegrationTestSuite -timeout 1500s .PHONY: test-e2e-integration-k8s test-e2e-integration-k8s: @@ -20,11 +23,14 @@ test-e2e-integration-k8s: .PHONY: test-query-app test-query-app: - $(INTEGRATION_VARS) $(KIND_VARS) go test -mod=readonly -p 4 -tags "e2e integration $(BUILD_MAINNET)" -v ./integration/... -run TestQueryApp + $(INTEGRATION_VARS) $(KIND_VARS) go test -mod=readonly -p 4 -tags "e2e integration" -v ./integration/... -run TestQueryApp .PHONY: test-k8s-integration test-k8s-integration: - # Assumes cluster created: `make -s -C _run/kube kind-cluster-create` + # Assumes cluster created and configured: + # ``` + # KUSTOMIZE_INSTALLS=akash-operator-inventory make kind-cluster-setup-e2e + # ``` go test -count=1 -v -tags k8s_integration ./pkg/apis/akash.network/v2beta1 go test -count=1 -v -tags k8s_integration ./cluster/kube diff --git a/manifest/manager.go b/manifest/manager.go index fa8e82b2a..5ac77e286 100644 --- a/manifest/manager.go +++ b/manifest/manager.go @@ -8,15 +8,15 @@ import ( "time" "github.com/boz/go-lifecycle" + "github.com/ovrclk/akash/sdl" + "github.com/ovrclk/akash/validation" "github.com/pkg/errors" "github.com/tendermint/tendermint/libs/log" maniv2beta1 "github.com/ovrclk/akash/manifest/v2beta1" "github.com/ovrclk/akash/pubsub" - "github.com/ovrclk/akash/sdl" sdlutil "github.com/ovrclk/akash/sdl/util" "github.com/ovrclk/akash/util/runner" - "github.com/ovrclk/akash/validation" dtypes "github.com/ovrclk/akash/x/deployment/types/v1beta2" mtypes "github.com/ovrclk/akash/x/market/types/v1beta2" @@ -38,6 +38,7 @@ var ( ErrNoManifestForDeployment = errors.New("manifest not yet received for that deployment") ErrNoLeaseForDeployment = errors.New("no lease for deployment") errNoGroupForLease = errors.New("group not found") + errManifestRejected = errors.New("manifest rejected") ) func newManager(h *service, daddr dtypes.DeploymentID) *manager { @@ -136,6 +137,7 @@ func (m *manager) clearFetched() { m.data = dtypes.QueryDeploymentResponse{} m.localLeases = nil } + func (m *manager) run(donech chan<- *manager) { defer m.lc.ShutdownCompleted() defer func() { donech <- m }() @@ -146,14 +148,12 @@ func (m *manager) run(donech chan<- *manager) { loop: for { - var stopch <-chan time.Time if m.stoptimer != nil { stopch = m.stoptimer.C } select { - case err := <-m.lc.ShutdownRequest(): m.lc.ShutdownInitiated(err) break loop @@ -166,24 +166,22 @@ loop: case ev := <-m.leasech: m.log.Info("new lease", "lease", ev.LeaseID) m.clearFetched() - m.emitReceivedEvents() m.maybeScheduleStop() runch = m.maybeFetchData(ctx, runch) - case id := <-m.rmleasech: m.log.Info("lease removed", "lease", id) m.clearFetched() m.maybeScheduleStop() - case req := <-m.manifestch: m.log.Info("manifest received") m.requests = append(m.requests, req) - m.validateRequests() - m.emitReceivedEvents() m.maybeScheduleStop() - runch = m.maybeFetchData(ctx, runch) + if runch = m.maybeFetchData(ctx, runch); runch == nil { + m.validateRequests() + m.emitReceivedEvents() + } case version := <-m.updatech: m.log.Info("received version", "version", hex.EncodeToString(version)) m.versions = append(m.versions, version) @@ -226,16 +224,16 @@ loop: if runch != nil { <-runch } - } +// maybeFetchData try fetch deployment and lease data +// if there is cached result the function returns nil channel which signals caller to process events func (m *manager) maybeFetchData(ctx context.Context, runch <-chan runner.Result) <-chan runner.Result { if runch != nil { return runch } - expired := time.Since(m.fetchedAt) > m.config.CachedResultMaxAge - if !m.fetched || expired { + if !m.fetched || time.Since(m.fetchedAt) > m.config.CachedResultMaxAge { m.clearFetched() return m.fetchData(ctx) } @@ -318,7 +316,6 @@ func (m *manager) maybeScheduleStop() bool { // nolint:golint,unparam return false } if m.stoptimer != nil { - m.log.Info("starting stop timer", "duration", manifestLingerDuration) m.stoptimer = time.NewTimer(manifestLingerDuration) } @@ -335,6 +332,7 @@ func (m *manager) fillAllRequests(response error) { req.ch <- response } m.requests = nil + } func (m *manager) emitReceivedEvents() { @@ -406,43 +404,13 @@ func (m *manager) validateRequests() { } } -var errManifestRejected = errors.New("manifest rejected") - -func (m *manager) checkHostnamesForManifest(requestManifest maniv2beta1.Manifest, groupNames []string) error { - // Check if the hostnames are available. Do not block forever - ownerAddr, err := m.data.GetDeployment().DeploymentID.GetOwnerAddress() - if err != nil { - return err - } - - allHostnames := make([]string, 0) - - for _, mgroup := range requestManifest.GetGroups() { - for _, groupName := range groupNames { - // Only check leases with a matching deployment ID & group name - if groupName != mgroup.GetName() { - continue - } - - allHostnames = append(allHostnames, sdlutil.AllHostnamesOfManifestGroup(mgroup)...) - if !m.config.HTTPServicesRequireAtLeastOneHost { - continue - } - // For each service that exposes via an Ingress, then require a hsotname - for _, service := range mgroup.Services { - for _, expose := range service.Expose { - if sdlutil.ShouldBeIngress(expose) && len(expose.Hosts) == 0 { - return fmt.Errorf("%w: service %q exposed on %d:%s must have a hostname", errManifestRejected, service.Name, sdlutil.ExposeExternalPort(expose), expose.Proto) - } - } - } - } +func (m *manager) validateRequest(req manifestRequest) error { + select { + case <-req.ctx.Done(): + return req.ctx.Err() + default: } - return m.hostnameService.CanReserveHostnames(allHostnames, ownerAddr) -} - -func (m *manager) validateRequest(req manifestRequest) error { // ensure that an uploaded manifest matches the hash declared on // the Akash Deployment.Version version, err := sdl.ManifestVersion(req.value.Manifest) @@ -457,16 +425,17 @@ func (m *manager) validateRequest(req manifestRequest) error { } else { versionExpected = m.data.Deployment.Version } + if !bytes.Equal(version, versionExpected) { m.log.Info("deployment version mismatch", "expected", m.data.Deployment.Version, "got", version) return ErrManifestVersion } - if err := validation.ValidateManifest(req.value.Manifest); err != nil { + if err = validation.ValidateManifest(req.value.Manifest); err != nil { return err } - if err := validation.ValidateManifestWithDeployment(&req.value.Manifest, m.data.Groups); err != nil { + if err = validation.ValidateManifestWithDeployment(&req.value.Manifest, m.data.Groups); err != nil { return err } @@ -476,9 +445,43 @@ func (m *manager) validateRequest(req manifestRequest) error { groupNames = append(groupNames, lease.Group.GroupSpec.Name) } // Check that hostnames are not in use - if err := m.checkHostnamesForManifest(req.value.Manifest, groupNames); err != nil { + if err = m.checkHostnamesForManifest(req.value.Manifest, groupNames); err != nil { return err } return nil } + +func (m *manager) checkHostnamesForManifest(requestManifest maniv2beta1.Manifest, groupNames []string) error { + // Check if the hostnames are available. Do not block forever + ownerAddr, err := m.data.GetDeployment().DeploymentID.GetOwnerAddress() + if err != nil { + return err + } + + allHostnames := make([]string, 0) + + for _, mgroup := range requestManifest.GetGroups() { + for _, groupName := range groupNames { + // Only check leases with a matching deployment ID & group name + if groupName != mgroup.GetName() { + continue + } + + allHostnames = append(allHostnames, sdlutil.AllHostnamesOfManifestGroup(mgroup)...) + if !m.config.HTTPServicesRequireAtLeastOneHost { + continue + } + // For each service that exposes via an Ingress, then require a hsotname + for _, service := range mgroup.Services { + for _, expose := range service.Expose { + if sdlutil.ShouldBeIngress(expose) && len(expose.Hosts) == 0 { + return fmt.Errorf("%w: service %q exposed on %d:%s must have a hostname", errManifestRejected, service.Name, sdlutil.ExposeExternalPort(expose), expose.Proto) + } + } + } + } + } + + return m.hostnameService.CanReserveHostnames(allHostnames, ownerAddr) +} diff --git a/operator/waiter/waiter.go b/operator/waiter/waiter.go index 69c919e86..73e9417c8 100644 --- a/operator/waiter/waiter.go +++ b/operator/waiter/waiter.go @@ -2,8 +2,9 @@ package waiter import ( "context" - "github.com/tendermint/tendermint/libs/log" "time" + + "github.com/tendermint/tendermint/libs/log" ) type OperatorWaiter interface { diff --git a/script/setup-kind.sh b/script/setup-kind.sh index dfa10eb8b..1d47d19db 100755 --- a/script/setup-kind.sh +++ b/script/setup-kind.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Set up a kubernetes environment with kind. @@ -8,73 +8,113 @@ # * Install Network Policies # * Optionally install metrics-server -set -xe +set -e rootdir="$(dirname "$0")/.." install_ns() { - kubectl apply -f "$rootdir/_docs/kustomize/networking/" + set -x + kubectl apply -f "$rootdir/_docs/kustomize/networking/" } install_network_policies() { - kubectl kustomize "$rootdir/_docs/kustomize/akash-services/" | kubectl apply -f- + set -x + kubectl kustomize "$rootdir/_docs/kustomize/akash-services/" | kubectl apply -f- } install_crd() { - kubectl apply -f "$rootdir/pkg/apis/akash.network/crd.yaml" - kubectl apply -f "$rootdir/_docs/kustomize/storage/storageclass.yaml" - kubectl patch node "${KIND_NAME}-control-plane" -p '{"metadata":{"labels":{"akash.network/storageclasses":"beta2.default"}}}' - kubectl apply -f "https://raw.githubusercontent.com/ovrclk/k8s-inventory-operator/master/example/inventory-operator.yaml" + set -x + kubectl apply -f "$rootdir/pkg/apis/akash.network/crd.yaml" + kubectl apply -f "$rootdir/_docs/kustomize/storage/storageclass.yaml" + kubectl patch node "${KIND_NAME}-control-plane" -p '{"metadata":{"labels":{"akash.network/storageclasses":"beta2.default"}}}' } install_metrics() { - # https://github.com/kubernetes-sigs/kind/issues/398#issuecomment-621143252 - kubectl apply -f "$rootdir/_docs/kustomize/kind/kind-metrics-server.yaml" + set -x + # https://github.com/kubernetes-sigs/kind/issues/398#issuecomment-621143252 + kubectl apply -f "$rootdir/_docs/kustomize/kind/kind-metrics-server.yaml" -# kubectl wait pod --namespace kube-system \ -# --for=condition=ready \ -# --selector=k8s-app=metrics-server \ -# --timeout=90s + # kubectl wait pod --namespace kube-system \ + # --for=condition=ready \ + # --selector=k8s-app=metrics-server \ + # --timeout=90s - echo "metrics initialized" + echo "metrics initialized" } -usage() { - cat < %s@%s", d.Path, d.Version, d.Replace.Path, d.Replace.Version) + } + + return fmt.Sprintf("%s@%s", d.Path, d.Version) +} + +func (d buildDep) MarshalJSON() ([]byte, error) { return json.Marshal(d.String()) } +func (d buildDep) MarshalYAML() (interface{}, error) { return d.String(), nil }