diff --git a/_run/cert-manager.yaml b/_run/cert-manager.yaml new file mode 100644 index 000000000..179aa25be --- /dev/null +++ b/_run/cert-manager.yaml @@ -0,0 +1,1194 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/name: cert-manager + name: cert-manager +--- +# Source: cert-manager/templates/cainjector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +--- +# Source: cert-manager/templates/webhook-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +--- +# Source: cert-manager/templates/webhook-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +data: +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["cert-manager.io"] + resources: ["certificates/finalizers", "certificaterequests/finalizers"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "orders/status"] + verbs: ["update", "patch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + # Use to update challenge resource status + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update", "patch"] + # Used to watch challenge resources + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "watch"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [ "gateway.networking.k8s.io" ] + resources: [ "httproutes" ] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require the ability to specify a custom hostname when we are creating + # new ingress resources. + # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 + - apiGroups: ["route.openshift.io"] + resources: ["routes/custom-host"] + verbs: ["create"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways", "httproutes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways/finalizers", "httproutes/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates/status"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["signers"] + verbs: ["approve"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to: +# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers +# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] + verbs: ["sign"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: +- apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-approve:cert-manager-io +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificatesigningrequests +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-webhook:subjectaccessreviews +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# leader election rules +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + # Used for leader election by the controller + # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller + # see cmd/cainjector/start.go#L113 + # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller + # see cmd/cainjector/start.go#L137 + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-controller"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: +- apiGroups: [""] + resources: ["secrets"] + resourceNames: + - 'cert-manager-webhook-ca' + verbs: ["get", "list", "watch", "update"] +# It's not possible to grant CREATE permission on a single resourceName. +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: cert-manager +--- +# Source: cert-manager/templates/rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-webhook:dynamic-serving +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + name: tcp-prometheus-servicemonitor + targetPort: 9402 + selector: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" +--- +# Source: cert-manager/templates/webhook-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + type: ClusterIP + ports: + - name: https + port: 443 + protocol: TCP + targetPort: "https" + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +--- +# Source: cert-manager/templates/cainjector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + spec: + serviceAccountName: cert-manager-cainjector + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-cainjector + image: "quay.io/jetstack/cert-manager-cainjector:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --leader-election-namespace=kube-system + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: 'true' + prometheus.io/port: '9402' + spec: + serviceAccountName: cert-manager + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-controller + image: "quay.io/jetstack/cert-manager-controller:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace=kube-system + - --acme-http01-solver-image=quay.io/jetstack/cert-manager-acmesolver:v1.11.0 + - --max-concurrent-challenges=60 + ports: + - containerPort: 9402 + name: http-metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + spec: + serviceAccountName: cert-manager-webhook + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-webhook + image: "quay.io/jetstack/cert-manager-webhook:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --secure-port=10250 + - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) + - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca + - --dynamic-serving-dns-names=cert-manager-webhook + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE) + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc + + ports: + - name: https + protocol: TCP + containerPort: 10250 + - name: healthcheck + protocol: TCP + containerPort: 6080 + livenessProbe: + httpGet: + path: /livez + port: 6080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + # Only include 'sideEffects' field in Kubernetes 1.12+ + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /mutate +--- +# Source: cert-manager/templates/webhook-validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /validate + diff --git a/_run/common-helm.mk b/_run/common-helm.mk index a5516a2f5..a89c45d05 100644 --- a/_run/common-helm.mk +++ b/_run/common-helm.mk @@ -21,7 +21,7 @@ kind-install-helm-chart-loki: helm upgrade --install promtail grafana/promtail \ --version $(PROMTAIL_VERSION) \ --namespace loki-stack \ - -f ../promtail-values.yaml + -f ../promtail-values.yamlk helm upgrade --install grafana grafana/grafana \ --version $(GRAFANA_VERSION) \ --namespace loki-stack \ diff --git a/cluster/client.go b/cluster/client.go index 9fd075b09..3e0d95943 100644 --- a/cluster/client.go +++ b/cluster/client.go @@ -82,7 +82,7 @@ type Client interface { tsq remotecommand.TerminalSizeQueue) (ctypes.ExecResult, error) // ConnectHostnameToDeployment Connect a given hostname to a deployment - ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective) error + ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective, sslEnabled bool) error // RemoveHostnameFromDeployment Remove a given hostname from a deployment RemoveHostnameFromDeployment(ctx context.Context, hostname string, leaseID mtypes.LeaseID, allowMissing bool) error @@ -415,7 +415,7 @@ func (c *nullClient) GetHostnameDeploymentConnections(_ context.Context) ([]ctyp return nil, errNotImplemented } -func (c *nullClient) ConnectHostnameToDeployment(_ context.Context, _ ctypes.ConnectHostnameToDeploymentDirective) error { +func (c *nullClient) ConnectHostnameToDeployment(_ context.Context, _ ctypes.ConnectHostnameToDeploymentDirective, _ bool) error { return errNotImplemented } diff --git a/cluster/kube/client.go b/cluster/kube/client.go index 1369a6e08..95a3cfd0d 100644 --- a/cluster/kube/client.go +++ b/cluster/kube/client.go @@ -56,6 +56,7 @@ type client struct { ns string log log.Logger kubeContentConfig *restclient.Config + cfg ClientConfig } func (c *client) String() string { @@ -64,7 +65,7 @@ func (c *client) String() string { // NewClient returns new Kubernetes Client instance with provided logger, host and ns. Returns error in-case of failure // configPath may be the empty string -func NewClient(ctx context.Context, log log.Logger, ns string, configPath string) (Client, error) { +func NewClient(ctx context.Context, log log.Logger, ns string, configPath string, ccfg ClientConfig) (Client, error) { config, err := clientcommon.OpenKubeConfig(configPath, log) if err != nil { return nil, errors.Wrap(err, "kube: error building config flags") @@ -98,6 +99,7 @@ func NewClient(ctx context.Context, log log.Logger, ns string, configPath string ns: ns, log: log.With("client", "kube"), kubeContentConfig: config, + cfg: ccfg, }, nil } diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index e4adecac7..ab7c68af4 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -24,12 +24,13 @@ import ( const ( akashIngressClassName = "akash-ingress-class" + root = "nginx.ingress.kubernetes.io" + certManager = "cert-manager.io" ) -func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDirective) map[string]string { +func (c *client) kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDirective) map[string]string { // For kubernetes/ingress-nginx // https://github.com/kubernetes/ingress-nginx - const root = "nginx.ingress.kubernetes.io" readTimeout := math.Ceil(float64(directive.ReadTimeout) / 1000.0) sendTimeout := math.Ceil(float64(directive.SendTimeout) / 1000.0) @@ -66,11 +67,20 @@ func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDir } } + switch c.cfg.Ssl.IssuerType { + case clusterIssuer: + result[fmt.Sprintf("%s/cluster-issuer", certManager)] = c.cfg.Ssl.IssuerName + break + case issuer: + result[fmt.Sprintf("%s/issuer", certManager)] = c.cfg.Ssl.IssuerName + break + } + result[fmt.Sprintf("%s/proxy-next-upstream", root)] = strBuilder.String() return result } -func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective) error { +func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective, tlsEnabled bool) error { ingressName := directive.Hostname ns := builder.LidNS(directive.LeaseID) rules := ingressRules(directive.Hostname, directive.ServiceName, directive.ServicePort) @@ -82,16 +92,27 @@ func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctyp labels[builder.AkashManagedLabelName] = "true" builder.AppendLeaseLabels(directive.LeaseID, labels) + var tls []netv1.IngressTLS + if tlsEnabled { + tls = []netv1.IngressTLS{ + { + Hosts: []string{directive.Hostname}, + SecretName: fmt.Sprintf("%s-tls", ingressName), + }, + } + } + ingressClassName := akashIngressClassName obj := &netv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: ingressName, Labels: labels, - Annotations: kubeNginxIngressAnnotations(directive), + Annotations: c.kubeNginxIngressAnnotations(directive), }, Spec: netv1.IngressSpec{ IngressClassName: &ingressClassName, Rules: rules, + TLS: tls, }, } diff --git a/cluster/kube/config.go b/cluster/kube/config.go new file mode 100644 index 000000000..d254daafa --- /dev/null +++ b/cluster/kube/config.go @@ -0,0 +1,15 @@ +package kube + +const ( + issuer = "issuer" + clusterIssuer = "cluster-issuer" +) + +type ClientConfig struct { + Ssl Ssl +} + +type Ssl struct { + IssuerType string + IssuerName string +} diff --git a/cluster/kube/deploy_test.go b/cluster/kube/deploy_test.go index 4ebdae7bd..b4c4a7fd7 100644 --- a/cluster/kube/deploy_test.go +++ b/cluster/kube/deploy_test.go @@ -44,7 +44,7 @@ func TestDeploy(t *testing.T) { require.NoError(t, err) log := testutil.Logger(t) - client, err := NewClient(ctx, log, "lease", "") + client, err := NewClient(ctx, log, "lease", "", ClientConfig{}) require.NoError(t, err) ctx = context.WithValue(ctx, builder.SettingsKey, builder.NewDefaultSettings()) diff --git a/cluster/kube/k8s_integration_test.go b/cluster/kube/k8s_integration_test.go index 85d31eb92..617114f1a 100644 --- a/cluster/kube/k8s_integration_test.go +++ b/cluster/kube/k8s_integration_test.go @@ -38,7 +38,7 @@ func TestNewClientNSNotFound(t *testing.T) { ctx := context.WithValue(context.Background(), builder.SettingsKey, settings) - ac, err := NewClient(ctx, atestutil.Logger(t), ns, providerflags.KubeConfigDefaultPath) + ac, err := NewClient(ctx, atestutil.Logger(t), ns, providerflags.KubeConfigDefaultPath, ClientConfig{}) require.True(t, kubeErrors.IsNotFound(err)) require.Nil(t, ac) } @@ -76,7 +76,7 @@ func TestNewClient(t *testing.T) { }, metav1.CreateOptions{}) require.NoError(t, err) - ac, err := NewClient(ctx, atestutil.Logger(t), ns, providerflags.KubeConfigDefaultPath) + ac, err := NewClient(ctx, atestutil.Logger(t), ns, providerflags.KubeConfigDefaultPath, ClientConfig{}) require.NoError(t, err) diff --git a/cluster/mocks/client.go b/cluster/mocks/client.go index 784da12cf..3b82bc7a4 100644 --- a/cluster/mocks/client.go +++ b/cluster/mocks/client.go @@ -89,13 +89,13 @@ func (_c *Client_AllHostnames_Call) RunAndReturn(run func(context.Context) ([]v1 return _c } -// ConnectHostnameToDeployment provides a mock function with given fields: ctx, directive -func (_m *Client) ConnectHostnameToDeployment(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective) error { - ret := _m.Called(ctx, directive) +// ConnectHostnameToDeployment provides a mock function with given fields: ctx, directive, tlsEnabled +func (_m *Client) ConnectHostnameToDeployment(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective, tlsEnabled bool) error { + ret := _m.Called(ctx, directive, tlsEnabled) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective) error); ok { - r0 = rf(ctx, directive) + if rf, ok := ret.Get(0).(func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective, bool) error); ok { + r0 = rf(ctx, directive, tlsEnabled) } else { r0 = ret.Error(0) } @@ -111,13 +111,14 @@ type Client_ConnectHostnameToDeployment_Call struct { // ConnectHostnameToDeployment is a helper method to define mock.On call // - ctx context.Context // - directive v1beta3.ConnectHostnameToDeploymentDirective -func (_e *Client_Expecter) ConnectHostnameToDeployment(ctx interface{}, directive interface{}) *Client_ConnectHostnameToDeployment_Call { - return &Client_ConnectHostnameToDeployment_Call{Call: _e.mock.On("ConnectHostnameToDeployment", ctx, directive)} +// - tlsEnabled bool +func (_e *Client_Expecter) ConnectHostnameToDeployment(ctx interface{}, directive interface{}, tlsEnabled interface{}) *Client_ConnectHostnameToDeployment_Call { + return &Client_ConnectHostnameToDeployment_Call{Call: _e.mock.On("ConnectHostnameToDeployment", ctx, directive, tlsEnabled)} } -func (_c *Client_ConnectHostnameToDeployment_Call) Run(run func(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective)) *Client_ConnectHostnameToDeployment_Call { +func (_c *Client_ConnectHostnameToDeployment_Call) Run(run func(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective, tlsEnabled bool)) *Client_ConnectHostnameToDeployment_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(v1beta3.ConnectHostnameToDeploymentDirective)) + run(args[0].(context.Context), args[1].(v1beta3.ConnectHostnameToDeploymentDirective), args[2].(bool)) }) return _c } @@ -127,7 +128,7 @@ func (_c *Client_ConnectHostnameToDeployment_Call) Return(_a0 error) *Client_Con return _c } -func (_c *Client_ConnectHostnameToDeployment_Call) RunAndReturn(run func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective) error) *Client_ConnectHostnameToDeployment_Call { +func (_c *Client_ConnectHostnameToDeployment_Call) RunAndReturn(run func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective, bool) error) *Client_ConnectHostnameToDeployment_Call { _c.Call.Return(run) return _c } diff --git a/cmd/provider-services/cmd/flags/flags.go b/cmd/provider-services/cmd/flags/flags.go index c46204648..d8a35b24d 100644 --- a/cmd/provider-services/cmd/flags/flags.go +++ b/cmd/provider-services/cmd/flags/flags.go @@ -9,5 +9,8 @@ const ( FlagWebRefreshInterval = "web-refresh-interval" FlagRetryDelay = "retry-delay" - FlagKubeConfig = "kubeconfig" + FlagKubeConfig = "kubeconfig" + FlagSslEnabled = "ssl" + FlagSslIssuerType = "ssl-issuer-type" + FlagSslIssuerName = "ssl-issuer-name" ) diff --git a/cmd/provider-services/cmd/run.go b/cmd/provider-services/cmd/run.go index 935aae0ef..74d6655fe 100644 --- a/cmd/provider-services/cmd/run.go +++ b/cmd/provider-services/cmd/run.go @@ -350,6 +350,11 @@ func RunCmd() *cobra.Command { return nil } + cmd.Flags().Bool(providerflags.FlagSslEnabled, false, "enable issuing of SSL certificates on the provider's ingress controller. defaults to false") + if err := viper.BindPFlag(providerflags.FlagSslEnabled, cmd.Flags().Lookup(providerflags.FlagSslEnabled)); err != nil { + return nil + } + return cmd } @@ -760,7 +765,16 @@ func createClusterClient(ctx context.Context, log log.Logger, _ *cobra.Command, if ns == "" { return nil, fmt.Errorf("%w: --%s required", errInvalidConfig, providerflags.FlagK8sManifestNS) } - return kube.NewClient(ctx, log, ns, configPath) + + var sslCfg kube.Ssl + if viper.GetBool(providerflags.FlagSslEnabled) { + sslCfg = kube.Ssl{ + IssuerName: viper.GetString(providerflags.FlagSslIssuerName), + IssuerType: viper.GetString(providerflags.FlagSslIssuerType), + } + } + ccfg := kube.ClientConfig{Ssl: sslCfg} + return kube.NewClient(ctx, log, ns, configPath, ccfg) } func showErrorToUser(err error) error { diff --git a/operator/hostnameoperator/hostname_operator.go b/operator/hostnameoperator/hostname_operator.go index f0b6eee39..6b8a18288 100644 --- a/operator/hostnameoperator/hostname_operator.go +++ b/operator/hostnameoperator/hostname_operator.go @@ -389,7 +389,7 @@ func (op *hostnameOperator) applyAddOrUpdateEvent(ctx context.Context, ev ctypes if shouldConnect { op.log.Debug("Updating ingress") // Update or create the existing ingress - err = op.client.ConnectHostnameToDeployment(ctx, directive) + err = op.client.ConnectHostnameToDeployment(ctx, directive, op.isSslEnabled()) } } else { op.log.Debug("Swapping ingress to new deployment") @@ -398,7 +398,7 @@ func (op *hostnameOperator) applyAddOrUpdateEvent(ctx context.Context, ev ctypes if err == nil { // Remove the current entry, if the next action succeeds then it gets inserted below delete(op.hostnames, ev.GetHostname()) - err = op.client.ConnectHostnameToDeployment(ctx, directive) + err = op.client.ConnectHostnameToDeployment(ctx, directive, op.isSslEnabled()) } } @@ -445,7 +445,7 @@ func doHostnameOperator(cmd *cobra.Command) error { logger := operatorcommon.OpenLogger().With("op", "hostname") logger.Info("HTTP listening", "address", listenAddr) - client, err := clusterClient.NewClient(cmd.Context(), logger, ns, configPath) + client, err := clusterClient.NewClient(cmd.Context(), logger, ns, configPath, config.ClientConfig) if err != nil { return err } @@ -502,3 +502,7 @@ func Cmd() *cobra.Command { return cmd } + +func (op *hostnameOperator) isSslEnabled() bool { + return op.cfg.ClientConfig.Ssl != clusterClient.Ssl{} +} diff --git a/operator/hostnameoperator/hostname_operator_test.go b/operator/hostnameoperator/hostname_operator_test.go index 6e857401b..e86047d04 100644 --- a/operator/hostnameoperator/hostname_operator_test.go +++ b/operator/hostnameoperator/hostname_operator_test.go @@ -424,7 +424,7 @@ func TestHostnameOperatorApplyAdd(t *testing.T) { } s.client.On("GetManifestGroup", mock.Anything, leaseID).Return(true, mg, nil) directive := buildDirective(ev, serviceExpose) // result tested in other unit tests - s.client.On("ConnectHostnameToDeployment", mock.Anything, directive).Return(nil) + s.client.On("ConnectHostnameToDeployment", mock.Anything, directive, mock.Anything).Return(nil) managed := grabManagedHostnames(t, s.op.server.GetRouter().ServeHTTP) require.Empty(t, managed) @@ -511,7 +511,7 @@ func TestHostnameOperatorApplyAddMultipleServices(t *testing.T) { } s.client.On("GetManifestGroup", mock.Anything, leaseID).Return(true, mg, nil) directive := buildDirective(ev, serviceExpose) // result tested in other unit tests - s.client.On("ConnectHostnameToDeployment", mock.Anything, directive).Return(nil) + s.client.On("ConnectHostnameToDeployment", mock.Anything, directive, mock.Anything).Return(nil) err := s.op.applyEvent(s.ctx, ev) require.NoError(t, err) @@ -596,9 +596,9 @@ func TestHostnameOperatorApplyUpdate(t *testing.T) { s.client.On("GetManifestGroup", mock.Anything, secondLeaseID).Return(true, mg2, nil) directive := buildDirective(ev, serviceExpose) // result tested in other unit tests - s.client.On("ConnectHostnameToDeployment", mock.Anything, directive).Return(nil) + s.client.On("ConnectHostnameToDeployment", mock.Anything, directive, mock.Anything).Return(nil) secondDirective := buildDirective(secondEv, secondServiceExpose) // result tested in other unit tests - s.client.On("ConnectHostnameToDeployment", mock.Anything, secondDirective).Return(nil) + s.client.On("ConnectHostnameToDeployment", mock.Anything, secondDirective, mock.Anything).Return(nil) s.client.On("RemoveHostnameFromDeployment", mock.Anything, hostname, leaseID, false).Return(nil) diff --git a/operator/ipoperator/ip_operator.go b/operator/ipoperator/ip_operator.go index eee9674ec..315d5a96f 100644 --- a/operator/ipoperator/ip_operator.go +++ b/operator/ipoperator/ip_operator.go @@ -579,13 +579,13 @@ func doIPOperator(cmd *cobra.Command) error { poolName := viper.GetString(flagMetalLbPoolName) logger := operatorcommon.OpenLogger().With("operator", "ip") - opcfg := operatorcommon.GetOperatorConfigFromViper() - _, err := sdk.AccAddressFromBech32(opcfg.ProviderAddress) + config := operatorcommon.GetOperatorConfigFromViper() + _, err := sdk.AccAddressFromBech32(config.ProviderAddress) if err != nil { return fmt.Errorf("%w: provider address must valid bech32", err) } - client, err := clusterClient.NewClient(cmd.Context(), logger, ns, configPath) + client, err := clusterClient.NewClient(cmd.Context(), logger, ns, configPath, config.ClientConfig) if err != nil { return err } @@ -603,7 +603,7 @@ func doIPOperator(cmd *cobra.Command) error { logger.Info("clients", "kube", client, "metallb", mllbc) logger.Info("HTTP listening", "address", listenAddr) - op, err := newIPOperator(logger, client, opcfg, operatorcommon.IgnoreListConfigFromViper(), mllbc) + op, err := newIPOperator(logger, client, config, operatorcommon.IgnoreListConfigFromViper(), mllbc) if err != nil { return err } diff --git a/operator/operatorcommon/operator_config.go b/operator/operatorcommon/operator_config.go index 2d3a78db9..b66c664e0 100644 --- a/operator/operatorcommon/operator_config.go +++ b/operator/operatorcommon/operator_config.go @@ -1,6 +1,7 @@ package operatorcommon import ( + "github.com/akash-network/provider/cluster/kube" "time" "github.com/spf13/viper" @@ -13,13 +14,24 @@ type OperatorConfig struct { WebRefreshInterval time.Duration RetryDelay time.Duration ProviderAddress string + ClientConfig kube.ClientConfig } func GetOperatorConfigFromViper() OperatorConfig { + var sslCfg kube.Ssl + if viper.GetBool(providerflags.FlagSslEnabled) { + sslCfg = kube.Ssl{ + IssuerName: viper.GetString(providerflags.FlagSslIssuerName), + IssuerType: viper.GetString(providerflags.FlagSslIssuerType), + } + } + ccfg := kube.ClientConfig{Ssl: sslCfg} + return OperatorConfig{ PruneInterval: viper.GetDuration(providerflags.FlagPruneInterval), WebRefreshInterval: viper.GetDuration(providerflags.FlagWebRefreshInterval), RetryDelay: viper.GetDuration(providerflags.FlagRetryDelay), ProviderAddress: viper.GetString(flagProviderAddress), + ClientConfig: ccfg, } }