Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions .github/actions/start-promtail/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: Start Promtail
description: Start promtail in a docker container to ship test results to Grafana Loki, then stop the container
inputs:
loki_url:
description: URL endpoint of the Grafana Loki instance
required: true
runs:
using: 'composite'
steps:
- name: Start promtail container
shell: bash
run: |
docker run -d \
--name=promtail \
-v ${{ github.workspace }}/test/dashboard/promtail/promtail-config.yaml:/etc/promtail/promtail-config.yaml \
-v ${{ github.workspace }}/test/dashboard/logs/:/var/log \
-e TEST_OUTDIR=test/dashboard/logs \
-e LOKI_URL=${{ inputs.loki_url }} \
-e GITHUB_RUN_ID="${{ github.run_id }}" \
-e GITHUB_WORKFLOW="${{ github.workflow }}" \
-e GITHUB_EVENT_NAME="${{ github.event_name }}" \
-e GITHUB_REPOSITORY="${{ github.repository }}" \
-e GITHUB_SERVER_URL="${{ github.server_url }}" \
-e GITHUB_JOB="${{ github.job }}" \
-e GITHUB_HEAD_REF="${{ github.head_ref }}" \
-e GITHUB_SHA="${{ github.sha }}" \
-e GITHUB_ACTOR="${{ github.actor }}" \
grafana/promtail:3.4.4 \
-config.file=/etc/promtail/promtail-config.yaml \
-config.expand-env=true
48 changes: 45 additions & 3 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -136,11 +136,25 @@ jobs:
with:
name: nginx-agent-unsigned-snapshots
path: build

- name: Create Results Directory
run: mkdir -p ${{ github.workspace }}/test/dashboard/logs/${{ github.job }}/${{matrix.container.image}}-${{matrix.container.version}}

- name: Start Promtail
uses: ./.github/actions/start-promtail
with:
loki_url: ${{ secrets.LOKI_DASHBOARD_URL }}

- name: Run Integration Tests
run: |
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@${{ env.NFPM_VERSION }}
OS_RELEASE="${{ matrix.container.image }}" OS_VERSION="${{ matrix.container.version }}" \
make integration-test
make integration-test | tee ${{github.workspace}}/test/dashboard/logs/${{github.job}}/${{matrix.container.image}}-${{matrix.container.version}}/raw_logs.log
exit "${PIPESTATUS[0]}"

- name: Format Results
if: always()
run: bash ./scripts/dashboard/format_results.sh ${{job.status}} ${{github.job}}/${{matrix.container.image}}-${{matrix.container.version}} ${{github.workspace}}

official-oss-image-integration-tests:
name: Integration Tests - Official OSS Images
Expand Down Expand Up @@ -173,13 +187,27 @@ jobs:
with:
name: nginx-agent-unsigned-snapshots
path: build

- name: Create Results Directory
run: mkdir -p ${{ github.workspace }}/test/dashboard/logs/${{ github.job }}/${{matrix.container.image}}-${{matrix.container.version}}

- name: Start Promtail
uses: ./.github/actions/start-promtail
with:
loki_url: ${{ secrets.LOKI_DASHBOARD_URL }}

- name: Run Integration Tests
run: |
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@${{ env.NFPM_VERSION }}
CONTAINER_NGINX_IMAGE_REGISTRY="docker-registry.nginx.com" \
TAG="${{ matrix.container.version }}-${{ matrix.container.image }}" \
OS_RELEASE="${{ matrix.container.release }}" OS_VERSION="${{ matrix.container.version }}" \
make official-image-integration-test
make official-image-integration-test | tee ${{github.workspace}}/test/dashboard/logs/${{github.job}}/${{matrix.container.image}}-${{matrix.container.version}}/raw_logs.log
exit "${PIPESTATUS[0]}"

- name: Format Results
if: always()
run: bash ./scripts/dashboard/format_results.sh ${{job.status}} ${{github.job}}/${{matrix.container.image}}-${{matrix.container.version}} ${{github.workspace}}

official-plus-image-integration-tests:
name: Integration Tests - Official Plus Images
Expand Down Expand Up @@ -226,13 +254,27 @@ jobs:
registry: ${{ secrets.REGISTRY_URL }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}

- name: Create Results Directory
run: mkdir -p ${{ github.workspace }}/test/dashboard/logs/${{ github.job }}/${{matrix.container.image}}-${{matrix.container.version}}

- name: Start Promtail
uses: ./.github/actions/start-promtail
with:
loki_url: ${{ secrets.LOKI_DASHBOARD_URL }}

- name: Run Integration Tests
run: |
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@${{ env.NFPM_VERSION }}
CONTAINER_NGINX_IMAGE_REGISTRY="${{ secrets.REGISTRY_URL }}" \
TAG="${{ matrix.container.plus }}-${{ matrix.container.image }}-${{ matrix.container.version }}" \
OS_RELEASE="${{ matrix.container.release }}" OS_VERSION="${{ matrix.container.version }}" IMAGE_PATH="${{ matrix.container.path }}" \
make official-image-integration-test
make official-image-integration-test | tee ${{github.workspace}}/test/dashboard/logs/${{github.job}}/${{matrix.container.image}}-${{matrix.container.version}}/raw_logs.log
exit "${PIPESTATUS[0]}"

- name: Format Results
if: always()
run: bash ./scripts/dashboard/format_results.sh ${{job.status}} ${{github.job}}/${{matrix.container.image}}-${{matrix.container.version}} ${{github.workspace}}

performance-tests:
name: Performance Tests
Expand Down
2 changes: 2 additions & 0 deletions Makefile.tools
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ NFPM = github.com/goreleaser/nfpm/v2/cmd/[email protected]
GOTESTCOVERAGE = github.com/vladopajic/go-test-coverage/[email protected]
BENCHSTAT = golang.org/x/perf/cmd/[email protected]
BUF = github.com/bufbuild/buf/cmd/[email protected]
PROMTAIL = github.com/prometheus/promtail/cmd/[email protected]

install-tools: ## Install tool dependencies
@echo "Installing Tools"
Expand All @@ -22,4 +23,5 @@ install-tools: ## Install tool dependencies
@$(GOINST) $(GOTESTCOVERAGE)
@$(GOINST) $(BENCHSTAT)
@$(GOINST) $(BUF)
@$(GOINST) $(PROMTAIL)
@$(GORUN) $(LEFTHOOK) install
224 changes: 224 additions & 0 deletions scripts/dashboard/format_results.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
#!/bin/bash
# Script to process test logs and generate formatted result files
# Usage: ./format_results.sh <job_result> <test_type> <workspace>

set -euo pipefail

# Check if required arguments are provided
if [ "$#" -ne 3 ]; then
echo "Usage: $0 <job_result> <test_type> <workspace>"
exit 1
fi

# Parameters
RESULT="$1"
TEST_TYPE="$2"
WORKSPACE="$3"

# File paths
INPUT_FILE="$WORKSPACE/test/dashboard/logs/$TEST_TYPE/raw_logs.log"
OUTPUT_DIR="$WORKSPACE/test/dashboard/logs/$TEST_TYPE"

# Validate input file exists
if [ ! -f "$INPUT_FILE" ]; then
echo "Error: Input file not found: $INPUT_FILE"
exit 1
fi

format_log() {
line="$1"
json="{"

while [[ "$line" =~ ([a-zA-Z0-9_]+)=((\"([^\"\\]|\\.)*\")|[^[:space:]]+) ]]; do
key="${BASH_REMATCH[1]}"
value="${BASH_REMATCH[2]}"
line="${line#*"${key}=${value}"}"

if [[ "$value" == \"*\" ]]; then
value="${value:1:${#value}-2}"
value="${value//\"/\\\"}"
fi
json+="\"$key\":\"$value\","
done

json="${json%,}}"
echo "$json"
}

write_result() {
start_at="$1"
end_at="$2"
result="$3"
msg="$4"
output_dir="$5"
duration_seconds=0

if [[ $end_at == "end_at" ]]; then
end_at=$(date +"%Y/%m/%d %H:%M:%S")
fi

# Format timestamps
if [[ "$start_at" =~ ^[0-9]{4}/[0-9]{2}/[0-9]{2}\ [0-9]{2}:[0-9]{2}:[0-9]{2}$ && \
"$end_at" =~ ^[0-9]{4}/[0-9]{2}/[0-9]{2}\ [0-9]{2}:[0-9]{2}:[0-9]{2}$ ]]; then
duration_seconds=$(( $(date -d "$end_at" +%s) - $(date -d "$start_at" +%s) ))
start_iso=""
end_iso=""
start_iso=$(date -d "$start_at" +"%Y-%m-%dT%H:%M:%S.%NZ")
end_iso=$(date -d "$end_at" +"%Y-%m-%dT%H:%M:%S.%NZ")
else
duration_seconds=0
fi

if [[ ${msg} == "msg" ]]; then
msg=""
fi

mkdir -p "$output_dir"
result_file="$output_dir/result.json"

echo "{\"start_at\":\"$start_iso\", \"end_at\":\"$end_iso\", \"duration_seconds\":$duration_seconds, \"result\":\"$result\", \"msg\":\"$msg\"}" > "$result_file"
}

format_results() {
test_group=("name" "start_at" "end_at" "result" "msg")
current_test=("name" "start_at" "end_at" "result" "msg")
test_queue=()
is_running=false
has_failed=false
error_trace=""

while IFS= read -r line; do
# Detect if the line is a test start
if [[ "$line" =~ ^===\ RUN[[:space:]]+(.+) ]]; then
test_name="${BASH_REMATCH[1]}"
has_failed=false

if [[ "${test_group[0]}" == "name" && "$is_running" == false ]]; then
is_running=true
test_group[0]="$test_name"
elif [[ "${test_group[0]}" != "name" && "$is_running" == true ]]; then
is_running=true
if [[ "${current_test[0]}" != "${test_group[0]}" ]]; then
test_queue+=("${current_test[@]}")
fi
fi

current_test=("$test_name" "start_at" "end_at" "result" "msg")
continue
fi

# Get start time
if [[ "$line" =~ ^([0-9]{4}/[0-9]{2}/[0-9]{2}[[:space:]][0-9]{2}:[0-9]{2}:[0-9]{2}).*INFO[[:space:]]+starting.*test* ]]; then
test_start="${BASH_REMATCH[1]}"
current_test[1]="$test_start"
if [[ "${current_test[0]}" == "${test_group[0]}" ]]; then
test_group[1]="$test_start"
fi
continue
fi

# Get end time
if [[ "$line" =~ ^([0-9]{4}/[0-9]{2}/[0-9]{2}[[:space:]][0-9]{2}:[0-9]{2}:[0-9]{2}).*INFO[[:space:]]+finished.*test* ]]; then
test_end="${BASH_REMATCH[1]}"
if [[ "${current_test[2]}" == "end_at" ]]; then
current_test[2]="$test_end"
if [[ "${current_test[0]}" == "${test_group[0]}" ]]; then
test_group[2]="$test_end"
fi
elif [[ "${current_test[2]}" != "end_at" ]]; then
test_group[2]="$test_end"
fi
continue
fi

# Capture error messages
if [[ "$line" == *"Error Trace"* || "$line" == *"runtime error"* ]]; then
has_failed=true
error_trace+="${line}"$'\n'
continue
fi

# Detect result
if [[ "$line" == *"--- PASS"* || "$line" == *"--- FAIL"* ]]; then
[[ "$line" == *"--- PASS"* ]] && result_val="pass"
[[ "$line" == *"--- FAIL"* ]] && result_val="fail"

has_failed=false

# Clear current_test field
if [[ "${current_test[0]}" != "name" ]]; then
if [[ "${current_test[0]}" == "${test_group[0]}" ]]; then
current_test=("name" "start_at" "end_at" "result" "msg")
else
test_queue+=("${current_test[@]}")
current_test=("name" "start_at" "end_at" "result" "msg")
fi
fi

# Write results for the test group
if [[ "${test_group[0]}" != "name" && "${#test_queue[@]}" -eq 0 ]] ||
[[ "${test_group[0]}" != "name" && "${#test_queue[@]}" -gt 0 ]]; then
if [[ "$line" != *"${test_group[0]}"* ]]; then
echo "Error: Test name did not match. Expected '${test_group[0]}', in line: '$line'."
exit 1
fi
test_group[3]="$result_val"
if [[ "$result_val" == "fail" ]]; then
if [[ ${test_group[4]} == "msg" ]]; then
test_group[4]=""
fi
test_group[4]+="$error_trace"
fi
write_result "${test_group[1]}" "${test_group[2]}" "${test_group[3]}" "${test_group[4]}" "$OUTPUT_DIR/${test_group[0]}"
test_group=("name" "start_at" "end_at" "result" "msg")
is_running=false
continue
fi

# Write results for individual tests in the queue
if [[ "${test_group[0]}" == "name" && "${#test_queue[@]}" -gt 0 ]]; then
test_match=("${test_queue[0]}" "${test_queue[1]}" "${test_queue[2]}" "${test_queue[3]}" "${test_queue[4]}")
test_match[3]="$result_val"
if [[ "$result_val" == "fail" ]]; then
if [[ ${test_match[4]} == "msg" ]]; then
test_match[4]=""
fi
test_match[4]+="$error_trace"
fi
write_result "${test_match[1]}" "${test_match[2]}" "${test_match[3]}" "${test_match[4]}" "$OUTPUT_DIR/${test_match[0]}"

for i in {0..4}; do
unset 'test_queue[$i]'
done
test_queue=("${test_queue[@]:5}")
fi

# No tests to analyze
if [[ "${test_group[0]}" == "name" && "${#test_queue[@]}" -eq 0 ]]; then
error_trace=""
continue
fi
fi

# Capture error messages
if [[ $has_failed == true ]]; then
error_trace+="${line}"$'\n'
fi

# Capture logs
if [[ "$line" =~ time=([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{3}Z)[[:space:]]+level= ]]; then
LOG_LINE=$(format_log "$line")
LOG_FILE_OUT_DIR="$OUTPUT_DIR/${test_group[0]}"
LOG_FILE=${LOG_FILE_OUT_DIR}/test.log
if [[ ! -d "$LOG_FILE_OUT_DIR" ]]; then
mkdir -p "$LOG_FILE_OUT_DIR"
fi
echo "$LOG_LINE" >> "$LOG_FILE"
continue
fi
done < "$INPUT_FILE"
}

{
format_results
}
Loading