Skip to content

Commit

Permalink
Testing full PR workflow.
Browse files Browse the repository at this point in the history
  • Loading branch information
alliepiper committed Apr 30, 2024
1 parent cbab711 commit 1af0389
Show file tree
Hide file tree
Showing 7 changed files with 193 additions and 162 deletions.
31 changes: 21 additions & 10 deletions .github/actions/workflow-build/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,6 @@ runs:
id: get-pr-info
uses: nv-gha-runners/get-pr-info@main

- run: mkdir workflow
shell: bash --noprofile --norc -euo pipefail {0}

- name: Inspect changes
if: ${{ inputs.inspect-changes_script != '' }}
id: inspect-changes
Expand All @@ -51,13 +48,14 @@ runs:
echo "Running inspect-changes script..."
${{ inputs.inspect-changes_script }} ${base_sha} ${GITHUB_SHA}
echo "Exporting summary..."
mkdir workflow
cp ${GITHUB_STEP_SUMMARY} workflow/changes.md
- name: Parse matrix file into a workflow
id: build-workflow
shell: bash --noprofile --norc -euo pipefail {0}
env:
skip_tests: ${{ inputs.skip_tests && '--skip-tests' || ''}}
skip_tests: ${{ inputs.skip_tests == 'true' && '--skip-tests' || ''}}
dirty_projects_flag: ${{ inputs.inspect-changes_script != '' && '--dirty-projects' || ''}}
dirty_projects: ${{ steps.inspect-changes.outputs.dirty_projects }}
matrix_parser: ${{ inputs.matrix_parser && inputs.matrix_parser || '${GITHUB_ACTION_PATH}/build-workflow.py' }}
Expand All @@ -69,13 +67,26 @@ runs:
${{ env.skip_tests }} \
${{ env.dirty_projects_flag }} ${{ env.dirty_projects }}
echo "Exporting workflow artifacts..."
grep -E '^WORKFLOW=' ${GITHUB_OUTPUT} | sed -e 's/^WORKFLOW=//' | jq . > workflow/workflow.json
grep -E '^WORKFLOW_KEYS=' ${GITHUB_OUTPUT} | sed -e 's/^WORKFLOW_KEYS=//' | jq . > workflow/keys.json
grep -E '^WORKFLOW_JOB_IDS=' ${GITHUB_OUTPUT} | sed -e 's/^WORKFLOW_JOB_IDS=//' | jq . > workflow/job_ids.json
echo "::group::Workflow"
cat workflow/workflow.json
echo "::endgroup::"
echo "Exporting summary..."
cp ${GITHUB_STEP_SUMMARY} workflow/runners.md
echo "::group::Runners"
cat workflow/runner_summary.json | jq -r '"# \(.heading)\n\n\(.body)"' | tee -a "${GITHUB_STEP_SUMMARY}"
echo "::endgroup::"
echo "::group::Job List"
cat workflow/job_list.txt
echo "::endgroup::"
echo "Setting outputs..."
echo "::group::GHA Output: WORKFLOW"
printf "WORKFLOW=%s\n" "$(cat workflow/workflow.json | jq -c '.')" | tee -a "${GITHUB_OUTPUT}"
echo "::endgroup::"
echo "::group::GHA Output: WORKFLOW_KEYS"
printf "WORKFLOW_KEYS=%s\n" "$(cat workflow/workflow_keys.json | jq -c '.')" | tee -a "${GITHUB_OUTPUT}"
echo "::endgroup::"
- name: Upload artifacts
uses: actions/upload-artifact@v3
Expand Down
153 changes: 54 additions & 99 deletions .github/actions/workflow-build/build-workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,30 +71,13 @@
matrix_yaml = None


def write_output(key, value, outfile=sys.stderr):
# Escape any newlines in the value:
value = value.replace('\n', '\\n')

print(f"::group::GHA Output: {key}", file=outfile)
print(f"{key}={value}", file=outfile)
print("::endgroup::", file=outfile)

output_file = os.environ.get('GITHUB_OUTPUT')
if output_file:
with open(output_file, 'a') as f:
print(f"{key}={value}", file=f)


def write_step_summary(name, text, outfile=sys.stderr):
print(f"::group::{name}", file=outfile)
print(text, file=outfile)
print(f"::endgroup::", file=outfile)

output_file = os.environ.get('GITHUB_STEP_SUMMARY')
if output_file:
with open(output_file, 'a') as f:
print(text, file=f)
def write_json_file(filename, json_object):
with open(filename, 'w') as f:
json.dump(json_object, f, indent=2)

def write_text_file(filename, text):
with open(filename, 'w') as f:
print(text, file=f)

def error_message_with_matrix_job(matrix_job, message):
return f"{matrix_job['origin']['workflow_location']}: {message}\n Input: {matrix_job['origin']['original_matrix_job']}"
Expand Down Expand Up @@ -494,65 +477,6 @@ def natural_sort_key(key):
return workflow_dispatch_groups


def get_id_to_full_job_name_map(final_workflow):
id_to_full_job_name = {}
for group_name, group_json in final_workflow.items():
if 'standalone' in group_json:
for job_json in group_json['standalone']:
id_to_full_job_name[job_json['id']] = f"{group_name} {job_json['name']}"
if 'two_stage' in group_json:
for two_stage_json in group_json['two_stage']:
for job_json in two_stage_json['producers'] + two_stage_json['consumers']:
id_to_full_job_name[job_json['id']] = f"{group_name} {job_json['name']}"
return id_to_full_job_name


def pretty_print_workflow(final_workflow, outfile):
print(f"::group::Job list", file=outfile)

total_jobs = 0
runner_counts = {}

def print_job_array(key, group_json):
nonlocal total_jobs
nonlocal runner_counts

job_array = group_json[key] if key in group_json else []
for job_json in job_array:
total_jobs += 1
print(f"{total_jobs:4} {key:13} {job_json['name']}", file=outfile)
runner = job_json['runner']
runner_counts[runner] = runner_counts.get(runner, 0) + 1

for group_name, group_json in final_workflow.items():
print(f"{'':4} {group_name}:", file=outfile)
print_job_array('standalone', group_json)
if 'two_stage' in group_json:
for two_stage_json in group_json['two_stage']:
print_job_array('producers', two_stage_json)
print_job_array('consumers', two_stage_json)
print(f"::endgroup::", file=outfile)

# Sort by descending counts:
runner_counts = {k: v for k, v in sorted(runner_counts.items(), key=lambda item: item[1], reverse=True)}

runner_counts_text = f"<details><summary><h2>🏃‍♂️ Runner counts (total jobs: {total_jobs})</h2></summary>\n\n"

runner_counts_text += f"| {'num':^4} | Runner\n"
runner_counts_text += "|------|------\n"
for runner, count in runner_counts.items():
runner_counts_text += f"| {count:4} | `{runner}`\n"

runner_counts_text += "</details>"

write_output("RUNNER_COUNTS", runner_counts_text, outfile=outfile)
write_step_summary("Runner Counts", runner_counts_text, outfile=outfile)

print("::group::Final Workflow JSON", file=outfile)
print(json.dumps(final_workflow, indent=2), file=outfile)
print("::endgroup::", file=outfile)


def find_workflow_line_number(workflow_name):
regex = re.compile(f"^( )*{workflow_name}:", re.IGNORECASE)
line_number = 0
Expand Down Expand Up @@ -727,29 +651,60 @@ def parse_workflow_dispatch_groups(args, workflow_name):
return finalize_workflow_dispatch_groups(workflow_dispatch_groups)


def write_outputs(final_workflow):
job_list = []
runner_counts = {}
id_to_full_job_name = {}

total_jobs = 0
def process_job_array(group_name, array_name, parent_json):
nonlocal job_list
nonlocal runner_counts
nonlocal total_jobs

job_array = parent_json[array_name] if array_name in parent_json else []
for job_json in job_array:
total_jobs += 1
job_list.append(f"{total_jobs:4} id: {job_json['id']:<4} {array_name:13} {job_json['name']}")
id_to_full_job_name[job_json['id']] = f"{group_name} {job_json['name']}"
runner = job_json['runner']
runner_counts[runner] = runner_counts.get(runner, 0) + 1

for group_name, group_json in final_workflow.items():
job_list.append(f"{'':4} {group_name}:")
process_job_array(group_name, 'standalone', group_json)
if 'two_stage' in group_json:
for two_stage_json in group_json['two_stage']:
process_job_array(group_name, 'producers', two_stage_json)
process_job_array(group_name, 'consumers', two_stage_json)

# Sort by descending counts:
runner_counts = {k: v for k, v in sorted(runner_counts.items(), key=lambda item: item[1], reverse=True)}

runner_heading = f"🏃‍ Runner counts (total jobs: {total_jobs})"

runner_counts_table = f"| {'#':^4} | Runner\n"
runner_counts_table += "|------|------\n"
for runner, count in runner_counts.items():
runner_counts_table += f"| {count:4} | `{runner}`\n"

runner_json = { "heading": runner_heading, "body": runner_counts_table }

os.makedirs("workflow", exist_ok=True)
write_json_file("workflow/workflow.json", final_workflow)
write_json_file("workflow/workflow_keys.json", list(final_workflow.keys()))
write_json_file("workflow/job_ids.json", id_to_full_job_name)
write_text_file("workflow/job_list.txt", "\n".join(job_list))
write_json_file("workflow/runner_summary.json", runner_json)


def print_gha_workflow(args):
final_workflow = {}
for workflow_name in args.workflows:
workflow_dispatch_groups = parse_workflow_dispatch_groups(args, workflow_name)
merge_dispatch_groups(final_workflow, workflow_dispatch_groups)

id_to_full_job_name = get_id_to_full_job_name_map(final_workflow)

pretty_print_workflow(final_workflow, sys.stderr)

print(f"::group::Job ID -> Name Map", file=sys.stderr)
print(json.dumps(id_to_full_job_name, indent=2), file=sys.stderr)
print(f"::endgroup::", file=sys.stderr)

write_output("WORKFLOW",
json.dumps(final_workflow, indent=None, separators=(',', ':')),
outfile=sys.stdout)
write_output("WORKFLOW_KEYS",
json.dumps(list(final_workflow.keys()), indent=None, separators=(',', ':')),
outfile=sys.stdout)
write_output("WORKFLOW_JOB_IDS",
json.dumps(id_to_full_job_name, indent=None, separators=(',', ':')),
outfile=sys.stdout)
write_outputs(final_workflow)


def print_devcontainer_info(args):
Expand Down
32 changes: 20 additions & 12 deletions .github/actions/workflow-results/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,31 +35,35 @@ runs:
name: dispatch-job-success
path: dispatch-job-success/

- name: Print job summaries
- name: Prepare execution summary
id: job-summary
continue-on-error: true
shell: bash --noprofile --norc -euo pipefail {0}
run: |
echo "Generating job summary..."
python3 "${GITHUB_ACTION_PATH}/print-execution-summary.py" workflow/workflow.json > workflow/execution.md
python3 "${GITHUB_ACTION_PATH}/prepare-execution-summary.py" workflow/workflow.json
- name: Prepare final summary
id: final-summary
continue-on-error: true
shell: bash --noprofile --norc -euo pipefail {0}
run: |
printf "%s\n" \
"<details><summary>🔽CI Summary⬇️</summary>" \
"$(cat workflow/execution.md)" \
"$(cat workflow/runners.md)" \
"$(cat workflow/changes.md)" \
"</details>" > workflow/final_summary.md
echo "::group::Final Summary"
python3 "${GITHUB_ACTION_PATH}/final-summary.py" | tee final_summary.md
echo "::endgroup::"
cp workflow/final_summary.md ${GITHUB_STEP_SUMMARY}
printf "SUMMARY=%q\n" "$(cat workflow/final_summary.md)" | tee -a "${GITHUB_OUTPUT}"
# This allows multiline strings and special characters to be passed through the GHA outputs:
url_encode_string() {
python3 -c "import sys; from urllib.parse import quote; print(quote(sys.stdin.read()))"
}
echo "::group::GHA Output: SUMMARY"
printf "SUMMARY=%s\n" "$(cat final_summary.md | url_encode_string)" | tee -a "${GITHUB_OUTPUT}"
echo "::endgroup::"
cp final_summary.md ${GITHUB_STEP_SUMMARY}
- name: Comment on PR
if: ${{ ! cancelled() }}
continue-on-error: true
env:
PR_NUMBER: ${{ fromJSON(steps.get-pr-info.outputs.pr-info).number }}
Expand All @@ -71,7 +75,11 @@ runs:
const pr_number = process.env.PR_NUMBER;
const owner = 'NVIDIA';
const repo = 'cccl';
const commentBody = process.env.COMMENT_BODY;
// Decode URL-encoded string for proper display in comments
const commentBody = decodeURIComponent(process.env.COMMENT_BODY);
console.log('::group::Commenting on PR #' + pr_number + ' with the following message:)
console.log(commentBody);
console.log('::endgroup::');
github.issues.createComment({
owner: owner,
repo: repo,
Expand Down
50 changes: 50 additions & 0 deletions .github/actions/workflow-results/final-summary.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#!/usr/bin/env python3

import json
import os
import re
import sys


def read_file(filepath):
with open(filepath, 'r') as f:
return f.read().rstrip("\n ")

def print_file_if_present(filepath):
if os.path.exists(filepath):
print(read_file(filepath) + "\n\n")


def print_summary_file(filepath, heading_level):
summary_json = json.load(open(filepath))
print(f"<details><summary><h{heading_level}>{summary_json['heading']}</h{heading_level}></summary>\n")
print(summary_json["body"] + "\n")
print("</details>\n")


def main():
# List of all projects detected in 'execution/projects/{project}_summary.json':
projects = []
project_file_regex="(.*)_summary.json"
for filename in os.listdir("execution/projects"):
match = re.match(project_file_regex, filename)
if match:
projects.append(match.group(1))

print(f"<details><summary>{read_file('execution/heading.txt')}</summary>\n")

print("<ul>")
for project in projects:
print("<li>")
print_summary_file(f"execution/projects/{project}_summary.json", 3)
print("</ul>\n")

print_summary_file("workflow/runner_summary.json", 2)
print_file_if_present('workflow/changes.md')

print("</details>")



if __name__ == '__main__':
main()
Loading

0 comments on commit 1af0389

Please sign in to comment.