Skip to content

Enabled audit logs in kind cluster #196

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@ set -eou pipefail
source scripts/dev/set_env_context.sh

dump_logs() {
source scripts/evergreen/e2e/dump_diagnostic_information.sh
dump_all_non_default_namespaces "$@"
scripts/evergreen/e2e/dump_diagnostic_information.sh dump_all_non_default_namespaces "$@"
}
trap dump_logs EXIT

Expand Down
5 changes: 5 additions & 0 deletions scripts/dev/contexts/root-context
Original file line number Diff line number Diff line change
Expand Up @@ -116,3 +116,8 @@ export MDB_SEARCH_COMMUNITY_VERSION

export MDB_SEARCH_COMMUNITY_NAME="mongodb-search-community"
export MDB_SEARCH_COMMUNITY_REPO_URL="quay.io/mongodb"

# use RequestResponse for full audit log
# use None to disable audit logging
# https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/#audit-policy
export K8S_AUDIT_LOG_LEVEL="RequestResponse"
14 changes: 10 additions & 4 deletions scripts/dev/evg_host.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ fi
get_host_url() {
host=$(evergreen host list --json | jq -r ".[] | select (.name==\"${EVG_HOST_NAME}\") | .host_name ")
if [[ "${host}" == "" ]]; then
>&2 echo "Cannot find running EVG host with name ${EVG_HOST_NAME}.
Run evergreen host list --json or visit https://spruce.mongodb.com/spawn/host."
echo "Cannot find running EVG host with name ${EVG_HOST_NAME}.
Run evergreen host list --json or visit https://spruce.mongodb.com/spawn/host." >&2
exit 1
fi
echo "ubuntu@${host}"
Expand All @@ -33,6 +33,10 @@ cmd=${1-""}

if [[ "${cmd}" != "" && "${cmd}" != "help" ]]; then
host_url=$(get_host_url)
# don't echo if script is piped
if [ -t 1 ]; then
echo "Current host: ${host_url}" >&2
fi
fi

kubeconfig_path="${HOME}/.operator-dev/evg-host.kubeconfig"
Expand Down Expand Up @@ -103,7 +107,7 @@ recreate-kind-clusters() {
configure "${1-"amd64"}" 2>&1| prepend "evg_host.sh configure"
echo "Recreating kind clusters on ${EVG_HOST_NAME} (${host_url})..."
# shellcheck disable=SC2088
ssh -T "${host_url}" "cd ~/mongodb-kubernetes; DELETE_KIND_NETWORK=${DELETE_KIND_NETWORK} scripts/dev/recreate_kind_clusters.sh"
ssh -T "${host_url}" "cd ~/mongodb-kubernetes; MDB_BASH_DEBUG=${MDB_BASH_DEBUG} DELETE_KIND_NETWORK=${DELETE_KIND_NETWORK} scripts/dev/recreate_kind_clusters.sh"
echo "Copying kubeconfig to ${kubeconfig_path}"
get-kubeconfig 2>&1| prepend "evg_host.sh configure"
}
Expand All @@ -114,7 +118,7 @@ recreate-kind-cluster() {
configure "${1-"amd64"}" 2>&1| prepend "evg_host.sh configure"
echo "Recreating kind cluster ${cluster_name} on ${EVG_HOST_NAME} (${host_url})..."
# shellcheck disable=SC2088
ssh -T "${host_url}" "cd ~/mongodb-kubernetes; scripts/dev/recreate_kind_cluster.sh ${cluster_name}"
ssh -T "${host_url}" "cd ~/mongodb-kubernetes; MDB_BASH_DEBUG=${MDB_BASH_DEBUG} scripts/dev/recreate_kind_cluster.sh ${cluster_name}"
echo "Copying kubeconfig to ${kubeconfig_path}"
get-kubeconfig
}
Expand Down Expand Up @@ -196,6 +200,7 @@ COMMANDS:
ssh [args] creates ssh session passing optional arguments to ssh
cmd [command with args] execute command as if being on evg host
upload-my-ssh-private-key uploads your ssh keys (~/.ssh/id_rsa) to evergreen host
get-host-url prints evergreen host url
help this message
"
}
Expand All @@ -211,6 +216,7 @@ tunnel) retry_with_sleep tunnel "$@" ;;
sync) sync ;;
cmd) cmd "$@" ;;
upload-my-ssh-private-key) upload-my-ssh-private-key ;;
get-host-url) get_host_url ;;
help) usage ;;
*) usage ;;
esac
2 changes: 2 additions & 0 deletions scripts/dev/recreate_kind_cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ fi
docker_create_kind_network
docker_run_local_registry "kind-registry" "5000"

create_audit_policy_yaml "${K8S_AUDIT_LOG_LEVEL}"

scripts/dev/setup_kind_cluster.sh -r -e -n "${cluster_name}" -l "172.18.255.200-172.18.255.250" -c "${CLUSTER_DOMAIN}"

source scripts/dev/install_csi_driver.sh
Expand Down
2 changes: 2 additions & 0 deletions scripts/dev/recreate_kind_clusters.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ docker_cleanup 2>&1| prepend "docker_cleanup"
docker_create_kind_network
docker_run_local_registry "kind-registry" "5000"

create_audit_policy_yaml "${K8S_AUDIT_LOG_LEVEL}"

# To future maintainers: whenever modifying this bit, make sure you also update coredns.yaml
(scripts/dev/setup_kind_cluster.sh -n "e2e-operator" -p "10.244.0.0/16" -s "10.96.0.0/16" -l "172.18.255.200-172.18.255.210" -c "${CLUSTER_DOMAIN}" 2>&1 | prepend "e2e-operator") &
(scripts/dev/setup_kind_cluster.sh -n "e2e-cluster-1" -p "10.245.0.0/16" -s "10.97.0.0/16" -l "172.18.255.210-172.18.255.220" -c "${CLUSTER_DOMAIN}" 2>&1 | prepend "e2e-cluster-1") &
Expand Down
55 changes: 41 additions & 14 deletions scripts/dev/setup_kind_cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,15 @@ service_network="10.96.0.0/16"
metallb_ip_range="172.18.255.200-172.18.255.250"
install_registry=0
configure_docker_network=0
while getopts ':n:p:s:c:l:egrhk' opt; do
while getopts ':n:p:s:c:l:aegrhk' opt; do
case ${opt} in
# options with args
n) cluster_name=${OPTARG} ;;
p) pod_network=${OPTARG} ;;
s) service_network=${OPTARG} ;;
c) cluster_domain=${OPTARG} ;;
l) metallb_ip_range=${OPTARG} ;;
# options without
e) export_kubeconfig=1 ;;
g) install_registry=1 ;;
r) recreate=1 ;;
Expand Down Expand Up @@ -127,29 +129,54 @@ EOF
}

kind_create_cluster() {
cat <<EOF | kind create cluster --name "${cluster_name}" --kubeconfig "${kubeconfig_path}" --wait 700s -v 5 --config=-
cat <<EOF > kind-cluster.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: ${kind_image}
extraMounts:
- containerPath: /var/lib/kubelet/config.json
hostPath: ${HOME}/.docker/config.json
networking:
podSubnet: "${pod_network}"
serviceSubnet: "${service_network}"
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
dnsDomain: "${cluster_domain}"
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
endpoint = ["http://${reg_name}:${reg_port}"]
nodes:
- role: control-plane
image: ${kind_image}
extraMounts:
- containerPath: /var/lib/kubelet/config.json
hostPath: ${HOME}/.docker/config.json
- containerPath: /etc/kubernetes/audit-policy
hostPath: ${HOME}/.kind/audit-policy/ # audit-policy.yaml must be copied into this directory
readOnly: true
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
dnsDomain: "${cluster_domain}"
apiServer:
extraArgs:
# Tell the API server where the audit policy is
audit-policy-file: "/etc/kubernetes/audit-policy/audit-policy.yaml"
# Tell the API server where to write audit logs
audit-log-path: "/var/log/kubernetes/audit.log"
extraVolumes:
# Mount the audit policy directory into the apiserver pod
- name: audit-policy
hostPath: /etc/kubernetes/audit-policy
mountPath: /etc/kubernetes/audit-policy
readOnly: true
pathType: Directory
# Mount the host log directory into the apiserver pod to write logs
- name: audit-logs
hostPath: /var/log/kubernetes
mountPath: /var/log/kubernetes
readOnly: false
pathType: DirectoryOrCreate # Creates the directory if it doesn't exist
EOF
cat kind-cluster.yaml
kind create cluster --name "${cluster_name}" --kubeconfig "${kubeconfig_path}" --wait 900s -v=7 --config=kind-cluster.yaml
rm kind-cluster.yaml
echo "finished installing kind"
}

Expand Down
107 changes: 96 additions & 11 deletions scripts/evergreen/e2e/dump_diagnostic_information.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,56 @@

set -Eeou pipefail

test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x

## We need to make sure this script does not fail if one of
## the kubectl commands fails.
set +e

source scripts/funcs/printing

get_context_prefix() {
ctx=$1
prefix="${ctx}_"
# shellcheck disable=SC2154
if [[ "${KUBE_ENVIRONMENT_NAME:-}" != "multi" ]]; then
prefix=""
fi

echo -n "${prefix}"
}

dump_audit_log() {
ctx=$1
echo "Dumping audit log for cluster context: ${ctx}"
if [[ "${EVG_HOST_NAME}" != "" ]]; then
evg_host_url=$(scripts/dev/evg_host.sh get-host-url)
DOCKER_HOST="ssh://${evg_host_url}"
export DOCKER_HOST
echo "Setting DOCKER_HOST=${DOCKER_HOST} to run docker command on a remote docker daemon"
fi

control_plane_container_name="${ctx#kind-}-control-plane"
echo "Finding control plane container: ${control_plane_container_name}"

control_plane_container=$(docker ps -q -f "name=${control_plane_container_name}")
if [[ "${control_plane_container}" == "" ]]; then
echo "Cannot find control plane container of ${ctx} using docker ps: "
docker ps
return 1
fi

echo "Found control plane container: ${control_plane_container}. Dumping audit log to logs/${ctx}_audit.log"
docker cp "${control_plane_container}:/var/log/kubernetes/audit.log" "logs/${ctx}_audit.log"
}

dump_all_non_default_namespaces() {
echo "Gathering logs from all non-default namespaces"

local original_context
original_context="$(kubectl config current-context)"
kubectl config use-context "${1:-${original_context}}" &> /dev/null
prefix="${1:-${original_context}}_"
# shellcheck disable=SC2154
if [[ "${KUBE_ENVIRONMENT_NAME:-}" != "multi" ]]; then
prefix=""
fi
prefix="$(get_context_prefix "${1:-${original_context}}")"

mkdir -p logs
namespaces=$(kubectl get namespace --output=jsonpath="{.items[*].metadata.name}" | tr ' ' '\n' | \
Expand All @@ -37,9 +70,12 @@ dump_all_non_default_namespaces() {
dump_namespace "${ns}" "${prefix}"
fi
done

dump_audit_log "${ctx}"
}

dump_all() {
ctx=$1
[[ "${MODE-}" = "dev" ]] && return

mkdir -p logs
Expand All @@ -48,12 +84,8 @@ dump_all() {
# with a different context.
local original_context
original_context="$(kubectl config current-context)"
kubectl config use-context "${1:-${original_context}}" &> /dev/null
prefix="${1:-${original_context}}_"
# shellcheck disable=SC2154
if [[ "${KUBE_ENVIRONMENT_NAME:-}" != "multi" ]]; then
prefix=""
fi
kubectl config use-context "${ctx:-${original_context}}" &> /dev/null
prefix="$(get_context_prefix "${1:-${original_context}}")"

# The dump process usually happens for a single namespace (the one the test and the operator are installed to)
# but in some exceptional cases (e.g. clusterwide operator) there can be more than 1 namespace to print diagnostics
Expand All @@ -76,6 +108,8 @@ dump_all() {
kubectl -n "kube-system" get configmap coredns -o yaml > "logs/${prefix}coredns.yaml"

kubectl events --all-namespaces > "logs/${prefix}kube_events.json"

dump_audit_log "${ctx}"
}

dump_objects() {
Expand Down Expand Up @@ -355,3 +389,54 @@ dump_namespace() {

kubectl describe nodes > "logs/${prefix}z_nodes_detailed.log" || true
}

main() {
context=""
cmd=""
metallb_ip_range="172.18.255.200-172.18.255.250"
install_registry=0
configure_docker_network=0
audit_logs=0
while getopts ':n:p:s:c:l:aegrhk' opt; do
case ${opt} in
# options with args
n) cluster_name=${OPTARG} ;;
p) pod_network=${OPTARG} ;;
s) service_network=${OPTARG} ;;
c) cluster_domain=${OPTARG} ;;
l) metallb_ip_range=${OPTARG} ;;
# options without
e) export_kubeconfig=1 ;;
g) install_registry=1 ;;
r) recreate=1 ;;
k) configure_docker_network=1 ;;
h) usage ;;
*) usage ;;
esac
done
shift "$((OPTIND - 1))"
}

if [[ $# -ne 2 ]]; then
echo "Missing required parameters"
echo "Usage: $0 dump_all|dump_all_non_default_namespaces <context>"
exit 1
fi

func=$1

if [[ "${func}" == "dump_all" ]]; then
if [[ $# -gt 1 ]]; then
shift
fi

dump_all "$@"
elif [[ "${func}" == "dump_all_non_default_namespaces" ]]; then
if [[ $# -gt 1 ]]; then
shift
fi
dump_all_non_default_namespaces "$@"
else
echo "Usage: $0 dump_all|dump_all_non_default_namespaces"
return 1
fi

This file was deleted.

7 changes: 3 additions & 4 deletions scripts/evergreen/e2e/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ start_time=$(date +%s)
source scripts/funcs/checks
source scripts/funcs/kubernetes
source scripts/funcs/printing
source scripts/evergreen/e2e/dump_diagnostic_information.sh
source scripts/evergreen/e2e/lib.sh
source scripts/dev/set_env_context.sh

Expand Down Expand Up @@ -107,15 +106,15 @@ fi
# shellcheck disable=SC2154
if [[ "${KUBE_ENVIRONMENT_NAME:-}" = "multi" ]]; then
echo "Dumping diagnostics for context ${CENTRAL_CLUSTER}"
dump_all "${CENTRAL_CLUSTER}" || true
scripts/evergreen/e2e/dump_diagnostic_information.sh dump_all "${CENTRAL_CLUSTER}" || true

for member_cluster in ${MEMBER_CLUSTERS}; do
echo "Dumping diagnostics for context ${member_cluster}"
dump_all "${member_cluster}" || true
scripts/evergreen/e2e/dump_diagnostic_information.sh dump_all "${member_cluster}" || true
done
else
# Dump all the information we can from this namespace
dump_all || true
scripts/evergreen/e2e/dump_diagnostic_information.sh dump_all "$(kubectl config current-context)" || true
fi

# we only have static cluster in openshift, otherwise there is no need to mark and clean them up here
Expand Down
12 changes: 12 additions & 0 deletions scripts/funcs/kubernetes
Original file line number Diff line number Diff line change
Expand Up @@ -263,3 +263,15 @@ EOF

return ${result}
}

create_audit_policy_yaml() {
level=$1
echo "creating audit policy yaml in: ${HOME}/audit-policy/audit-policy.yaml"
mkdir -p "${HOME}/.kind/audit-policy"
cat <<EOF > "${HOME}/.kind/audit-policy/audit-policy.yaml"
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: ${level}
EOF
}