Update Turbopack test manifest from GitHub Actions artifact (#58394)

### What?
We can use the GitHub actions artifact (which is already produced right
now) instead of a separate git branch to get the latest test results.

This also means we don't have a dependency back to the turbo repo for
the daily tests.


Closes PACK-1951
This commit is contained in:
Leah 2023-11-24 16:48:12 +01:00 committed by GitHub
parent b8a18f6e13
commit 2a8f7ae1b1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 26163 additions and 241 deletions

View file

@ -3,20 +3,18 @@ author: Turbopack team
description: 'Display next.js integration test failure status'
inputs:
# Github token to use to create test report comment. If not specified, the default token will be used with username 'github-actions'
token:
default: ${{ github.token }}
description: 'GitHub token used to create the test report comment. If not specified, the default GitHub actions token will be used'
# The base of the test results to compare against. If not specified, will try to compare with latest main branch's test results.
diff_base:
default: 'main'
description: "The base of the test results to compare against. If not specified, will try to compare with latest main branch's test results."
# Include full test failure message in the report.
# This is currently disabled as we have too many failed test cases, causes
# too many report comment generated.
expand_full_result_message:
default: 'false'
description: 'Whether to include the full test failure message in the report. This is currently disabled as we have too many failed test cases, which would lead to massive comments.'
runs:
using: node16
using: node20
main: index.js

File diff suppressed because one or more lines are too long

View file

@ -8,7 +8,6 @@
"lint:prettier": "prettier -c . --cache --ignore-path=../../../.prettierignore"
},
"devDependencies": {
"@turbo/eslint-config": "workspace:*",
"@types/node": "^18.11.18",
"@vercel/ncc": "0.34.0",
"typescript": "^4.4.4"

View file

@ -6,41 +6,7 @@ const fs = require('fs')
const path = require('path')
const semver = require('semver')
/**
* Models parsed test results output from next.js integration test.
* This is a subset of the full test result output from jest, partially compatible.
*/
interface TestResult {
numFailedTestSuites: number
numFailedTests: number
numPassedTestSuites: number
numPassedTests: number
numPendingTestSuites: number
numPendingTests: number
numRuntimeErrorTestSuites: number
numTodoTests: number
numTotalTestSuites: number
numTotalTests: number
startTime: number
success: boolean
testResults?: Array<{
assertionResults?: Array<{
ancestorTitles?: Array<string> | null
failureMessages?: Array<string> | null
fullName: string
location?: null
status: string
title: string
}> | null
endTime: number
message: string
name: string
startTime: number
status: string
summary: string
}> | null
wasInterrupted: boolean
}
/// <reference path="./manifest" />
type Octokit = ReturnType<typeof getOctokit>
@ -53,18 +19,6 @@ type ExistingComment =
ReturnType<Octokit['rest']['issues']['listComments']>
>['data'][number]
| undefined
interface JobResult {
job: string
data: TestResult
}
interface TestResultManifest {
nextjsVersion: string
ref: string
buildTime?: string
buildSize?: string
result: Array<JobResult>
flakyMonitorJobResults: Array<JobResult>
}
// A comment marker to identify the comment created by this action.
const BOT_COMMENT_MARKER = `<!-- __marker__ next.js integration stats __marker__ -->`

View file

@ -0,0 +1,49 @@
interface JobResult {
job: string
data: TestResult
}
interface TestResultManifest {
nextjsVersion: string
ref: string
buildTime?: string
buildSize?: string
result: Array<JobResult>
flakyMonitorJobResults: Array<JobResult>
}
/**
* Models parsed test results output from next.js integration test.
* This is a subset of the full test result output from jest, partially compatible.
*/
interface TestResult {
numFailedTestSuites: number
numFailedTests: number
numPassedTestSuites: number
numPassedTests: number
numPendingTestSuites: number
numPendingTests: number
numRuntimeErrorTestSuites: number
numTodoTests: number
numTotalTestSuites: number
numTotalTests: number
startTime: number
success: boolean
testResults?: Array<{
assertionResults?: Array<{
ancestorTitles?: Array<string> | null
failureMessages?: Array<string> | null
fullName: string
location?: null
status: string
title: string
}> | null
endTime: number
message: string
name: string
startTime: number
status: string
summary: string
}> | null
wasInterrupted: boolean
}

View file

@ -162,6 +162,7 @@
"jest-extended": "4.0.2",
"jest-junit": "16.0.0",
"json5": "2.2.3",
"kleur": "^4.1.0",
"ky": "0.19.1",
"ky-universal": "0.6.0",
"lerna": "4.0.0",

View file

@ -332,6 +332,9 @@ importers:
json5:
specifier: 2.2.3
version: 2.2.3
kleur:
specifier: ^4.1.0
version: 4.1.3
ky:
specifier: 0.19.1
version: 0.19.1

View file

@ -0,0 +1,28 @@
/// <reference path="../.github/actions/next-integration-stat/src/manifest.d.ts" />
type ListArtifactsResponse = {
total_count: number
artifacts: Artifact[]
}
type Artifact = {
id: number
node_id: string
name: string
size_in_bytes: number
url: string
archive_download_url: string
expired: false
created_at: string
expires_at: string
updated_at: string
workflow_run: WorkflowRun
}
type WorkflowRun = {
id: number
repository_id: number
head_repository_id: number
head_branch: string
head_sha: string
}

View file

@ -1,6 +1,10 @@
const fetch = require('node-fetch')
const fs = require('fs')
const os = require('os')
const path = require('path')
const prettier = require('prettier')
const execa = require('execa')
const { bold } = require('kleur')
async function format(text) {
const options = await prettier.resolveConfig(__filename)
@ -9,10 +13,8 @@ async function format(text) {
const override = process.argv.includes('--override')
const RESULT_URL =
'https://raw.githubusercontent.com/vercel/turbo/nextjs-integration-test-data/test-results/main/nextjs-test-results.json'
const PASSING_JSON_PATH = `${__dirname}/turbopack-tests-manifest.json`
const WORKING_PATH = '/home/runner/work/turbo/turbo/'
const WORKING_PATH = '/root/actions-runner/_work/next.js/next.js/'
const INITIALIZING_TEST_CASES = [
'compile successfully',
@ -106,11 +108,93 @@ const SKIPPED_TEST_SUITES = {
],
}
async function updatePassingTests() {
const passing = { __proto__: null }
const res = await fetch(RESULT_URL)
const results = await res.json()
/**
* @param title {string}
* @param file {string}
* @param args {readonly string[]}
* @returns {execa.ExecaChildProcess}
*/
function exec(title, file, args) {
logCommand(title, `${file} ${args.join(' ')}`)
return execa(file, args, {
stderr: 'inherit',
})
}
/**
* @param {string} title
* @param {string} [command]
*/
function logCommand(title, command) {
let message = `\n${bold().underline(title)}\n`
if (command) {
message += `> ${bold(command)}\n`
}
console.log(message)
}
/**
* @returns {Promise<Artifact>}
*/
async function fetchLatestTestArtifact() {
const { stdout } = await exec(
'Getting latest test artifacts from GitHub actions',
'gh',
['api', '/repos/vercel/next.js/actions/artifacts?name=test-results']
)
/** @type {ListArtifactsResponse} */
const res = JSON.parse(stdout)
for (const artifact of res.artifacts) {
if (artifact.expired || artifact.workflow_run.head_branch !== 'canary') {
continue
}
return artifact
}
throw new Error('no valid test-results artifact was found for branch canary')
}
/**
* @returns {Promise<TestResultManifest>}
*/
async function fetchTestResults() {
const artifact = await fetchLatestTestArtifact()
const subprocess = exec('Downloading artifact archive', 'gh', [
'api',
`/repos/vercel/next.js/actions/artifacts/${artifact.id}/zip`,
])
const filePath = path.join(
os.tmpdir(),
`next-test-results.${Math.floor(Math.random() * 1000).toString(16)}.zip`
)
subprocess.stdout.pipe(fs.createWriteStream(filePath))
await subprocess
const { stdout } = await exec('Extracting test results manifest', 'unzip', [
'-pj',
filePath,
'nextjs-test-results.json',
])
return JSON.parse(stdout)
}
async function updatePassingTests() {
const results = await fetchTestResults()
logCommand('Processing results...')
const passing = { __proto__: null }
for (const result of results.result) {
const runtimeError = result.data.numRuntimeErrorTestSuites > 0
for (const testResult of result.data.testResults) {
@ -153,13 +237,11 @@ async function updatePassingTests() {
if (skippedPassingNames.length > 0) {
console.log(
`${filepath} has ${
`${bold().red(filepath)} has ${
skippedPassingNames.length
} passing tests that are marked as skipped: ${JSON.stringify(
skippedPassingNames,
0,
2
)}`
} passing tests that are marked as skipped:\n${skippedPassingNames
.map((name) => ` - ${name}`)
.join('\n')}\n`
)
}
}
@ -190,9 +272,12 @@ async function updatePassingTests() {
oldData.passed.filter((name) => newData.failed.includes(name))
)
if (shouldPass.size > 0) {
const list = JSON.stringify([...shouldPass], 0, 2)
console.log(
`${file} has ${shouldPass.size} test(s) that should pass but failed: ${list}`
`${bold().red(file)} has ${
shouldPass.size
} test(s) that should pass but failed:\n${Array.from(shouldPass)
.map((name) => ` - ${name}`)
.join('\n')}\n`
)
}
// Merge the old passing tests with the new ones
@ -203,7 +288,9 @@ async function updatePassingTests() {
.sort()
if (!oldData.runtimeError && newData.runtimeError) {
console.log(`${file} has a runtime error that is shouldn't have`)
console.log(
`${bold().red(file)} has a runtime error that is shouldn't have\n`
)
newData.runtimeError = false
}
}
@ -246,4 +333,7 @@ function stripWorkingPath(path) {
return path.slice(WORKING_PATH.length)
}
updatePassingTests()
updatePassingTests().catch((e) => {
console.error(e)
process.exit(1)
})