Implement Turbopack build manifest update (#63461)
WIP. <!-- Thanks for opening a PR! Your contribution is much appreciated. To make sure your PR is handled as smoothly as possible we request that you follow the checklist sections below. Choose the right checklist for the change(s) that you're making: ## For Contributors ### Improving Documentation - Run `pnpm prettier-fix` to fix formatting issues before opening the PR. - Read the Docs Contribution Guide to ensure your contribution follows the docs guidelines: https://nextjs.org/docs/community/contribution-guide ### Adding or Updating Examples - The "examples guidelines" are followed from our contributing doc https://github.com/vercel/next.js/blob/canary/contributing/examples/adding-examples.md - Make sure the linting passes by running `pnpm build && pnpm lint`. See https://github.com/vercel/next.js/blob/canary/contributing/repository/linting.md ### Fixing a bug - Related issues linked using `fixes #number` - Tests added. See: https://github.com/vercel/next.js/blob/canary/contributing/core/testing.md#writing-tests-for-nextjs - Errors have a helpful link attached, see https://github.com/vercel/next.js/blob/canary/contributing.md ### Adding a feature - Implements an existing feature request or RFC. Make sure the feature request has been accepted for implementation before opening a PR. (A discussion must be opened, see https://github.com/vercel/next.js/discussions/new?category=ideas) - Related issues/discussions are linked using `fixes #number` - e2e tests added (https://github.com/vercel/next.js/blob/canary/contributing/core/testing.md#writing-tests-for-nextjs) - Documentation added - Telemetry added. In case of a feature if it's used or not. - Errors have a helpful link attached, see https://github.com/vercel/next.js/blob/canary/contributing.md ## For Maintainers - Minimal description (aim for explaining to someone not on the team to understand the PR) - When linking to a Slack thread, you might want to share details of the conclusion - Link both the Linear (Fixes NEXT-xxx) and the GitHub issues - Add review comments if necessary to explain to the reviewer the logic behind a change ### What? ### Why? ### How? Closes NEXT- Fixes # --> Closes NEXT-2857
This commit is contained in:
parent
d91e78adb7
commit
b9547e74fc
8 changed files with 631 additions and 42 deletions
127
.github/actions/upload-turboyet-data/dist/index.js
vendored
127
.github/actions/upload-turboyet-data/dist/index.js
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -13,13 +13,23 @@ on:
|
|||
default: 'canary'
|
||||
|
||||
jobs:
|
||||
# Trigger actual next.js integration tests.
|
||||
next_js_integration:
|
||||
next_js_dev_integration:
|
||||
name: Execute Next.js integration workflow
|
||||
if: github.repository_owner == 'vercel'
|
||||
permissions:
|
||||
pull-requests: write
|
||||
uses: ./.github/workflows/turbopack-nextjs-integration-tests.yml
|
||||
uses: ./.github/workflows/turbopack-nextjs-dev-integration-tests.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
diff_base: 'none'
|
||||
version: ${{ inputs.version || 'canary' }}
|
||||
|
||||
next_js_build_integration:
|
||||
name: Execute Next.js integration workflow
|
||||
if: github.repository_owner == 'vercel'
|
||||
permissions:
|
||||
pull-requests: write
|
||||
uses: ./.github/workflows/turbopack-nextjs-build-integration-tests.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
diff_base: 'none'
|
||||
|
|
199
.github/workflows/turbopack-nextjs-build-integration-tests.yml
vendored
Normal file
199
.github/workflows/turbopack-nextjs-build-integration-tests.yml
vendored
Normal file
|
@ -0,0 +1,199 @@
|
|||
# Reusable workflow to execute certain version of Next.js integration tests
|
||||
# with turbopack.
|
||||
#
|
||||
# Refer test.yml for how this workflow is being initialized
|
||||
# - Workflow can specify `inputs.version` to specify which version of next.js to use, otherwise will use latest release version.
|
||||
name: Turbopack Next.js production integration tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
# Allow to specify Next.js version to run integration test against.
|
||||
# If not specified, will use latest release version including canary.
|
||||
version:
|
||||
type: string
|
||||
# The base of the test results to compare against. If not specified, will try to compare with latest main branch's test results.
|
||||
diff_base:
|
||||
type: string
|
||||
default: 'main'
|
||||
|
||||
# Workflow-common env variables
|
||||
env:
|
||||
# Enabling backtrace will makes snapshot tests fail
|
||||
RUST_BACKTRACE: 0
|
||||
NEXT_TELEMETRY_DISABLED: 1
|
||||
TEST_CONCURRENCY: 6
|
||||
NEXT_JUNIT_TEST_REPORT: 'true'
|
||||
# Turbopack specific customization for the test runner
|
||||
TURBOPACK: 1
|
||||
__INTERNAL_CUSTOM_TURBOPACK_BINDINGS: ${{ github.workspace }}/packages/next-swc/native/next-swc.linux-x64-gnu.node
|
||||
NEXT_TEST_SKIP_RETRY_MANIFEST: ${{ github.workspace }}/integration-test-data/test-results/main/failed-test-path-list.json
|
||||
NEXT_TEST_CONTINUE_ON_ERROR: TRUE
|
||||
NEXT_E2E_TEST_TIMEOUT: 240000
|
||||
NEXT_TEST_JOB: 1
|
||||
|
||||
jobs:
|
||||
# First, build Next.js to execute across tests.
|
||||
setup_nextjs:
|
||||
name: Setup Next.js build
|
||||
uses: ./.github/workflows/setup-nextjs-build.yml
|
||||
with:
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
# Actual test scheduling. These jobs mimic the same jobs in Next.js repo,
|
||||
# which we do allow some of duplications to make it easier to update if upstream changes.
|
||||
# Refer build_and_test.yml in the Next.js repo for more details.
|
||||
test-build:
|
||||
# This job name is being used in github action to collect test results. Do not change it, or should update
|
||||
# ./.github/actions/next-integration-stat to match the new name.
|
||||
name: Next.js integration test (Production)
|
||||
# Currently it is possible test grouping puts large number of failing tests suites in a single group,
|
||||
# which ends up job timeouts. Temporarily relieve the timeout until we make progresses on the failing suites.
|
||||
# ref: https://github.com/vercel/turbo/pull/5668
|
||||
# timeout-minutes: 180
|
||||
runs-on:
|
||||
- 'self-hosted'
|
||||
- 'linux'
|
||||
- 'x64'
|
||||
- 'metal'
|
||||
|
||||
needs: [setup_nextjs]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: [1, 2, 3, 4, 5, 6]
|
||||
|
||||
steps:
|
||||
- uses: actions/cache/restore@v3
|
||||
id: restore-build
|
||||
with:
|
||||
path: ./*
|
||||
key: ${{ inputs.version }}-${{ github.sha }}-${{ github.run_id }}-${{ github.run_attempt}}-${{ github.run_number }}
|
||||
fail-on-cache-miss: true
|
||||
|
||||
- name: Enable corepack and install yarn
|
||||
run: |
|
||||
corepack enable
|
||||
corepack prepare --activate yarn@1.22.19
|
||||
|
||||
- name: Setup playwright
|
||||
run: |
|
||||
pnpm playwright install
|
||||
|
||||
- name: Run test/production
|
||||
run: |
|
||||
NEXT_TEST_MODE=start TURBOPACK=1 TURBOPACK_BUILD=1 node run-tests.js -g ${{ matrix.group }}/6 -c ${TEST_CONCURRENCY} --type start
|
||||
# It is currently expected to fail some of next.js integration test, do not fail CI check.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Upload test report artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-start-${{ matrix.group }}
|
||||
if-no-files-found: 'error'
|
||||
path: |
|
||||
test/turbopack-test-junit-report
|
||||
|
||||
test-integration:
|
||||
name: Next.js production integration test (Integration)
|
||||
needs: [setup_nextjs]
|
||||
runs-on:
|
||||
- 'self-hosted'
|
||||
- 'linux'
|
||||
- 'x64'
|
||||
- 'metal'
|
||||
|
||||
timeout-minutes: 180
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
|
||||
|
||||
steps:
|
||||
- uses: actions/cache/restore@v3
|
||||
id: restore-build
|
||||
with:
|
||||
path: ./*
|
||||
key: ${{ inputs.version }}-${{ github.sha }}
|
||||
fail-on-cache-miss: true
|
||||
|
||||
- name: Enable corepack and install yarn
|
||||
run: |
|
||||
corepack enable
|
||||
corepack prepare --activate yarn@1.22.19
|
||||
|
||||
- name: Setup playwright
|
||||
run: |
|
||||
pnpm playwright install
|
||||
|
||||
- name: Run test/integration
|
||||
run: |
|
||||
TURBOPACK=1 TURBOPACK_BUILD=1 node run-tests.js -g ${{ matrix.group }}/12 -c ${TEST_CONCURRENCY} --type integration
|
||||
continue-on-error: true
|
||||
|
||||
- name: Upload test report artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-integration-${{ matrix.group }}
|
||||
if-no-files-found: 'error'
|
||||
path: |
|
||||
test/turbopack-test-junit-report
|
||||
|
||||
# Collect integration test results from execute_tests,
|
||||
# Store it as github artifact for next step to consume.
|
||||
collect_nextjs_integration_stat:
|
||||
needs: [test-build, test-integration]
|
||||
name: Next.js integration test status report
|
||||
runs-on:
|
||||
- 'self-hosted'
|
||||
- 'linux'
|
||||
- 'x64'
|
||||
- 'metal'
|
||||
|
||||
if: always()
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Collect integration test stat
|
||||
uses: ./.github/actions/next-integration-stat
|
||||
with:
|
||||
diff_base: ${{ inputs.diff_base }}
|
||||
|
||||
- name: Store artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-results-build
|
||||
path: |
|
||||
nextjs-test-results.json
|
||||
failed-test-path-list.json
|
||||
passed-test-path-list.json
|
||||
|
||||
upload_test_report:
|
||||
needs: [test-build, test-integration]
|
||||
name: Upload test report to datadog
|
||||
runs-on:
|
||||
- 'self-hosted'
|
||||
- 'linux'
|
||||
- 'x64'
|
||||
- 'metal'
|
||||
|
||||
if: ${{ !cancelled() }}
|
||||
steps:
|
||||
- name: Download test report artifacts
|
||||
id: download-test-reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: test-reports-*
|
||||
path: test/reports
|
||||
merge-multiple: true
|
||||
|
||||
- name: Upload to datadog
|
||||
env:
|
||||
DATADOG_API_KEY: ${{ secrets.DATA_DOG_API_KEY }}
|
||||
DD_ENV: 'ci'
|
||||
run: |
|
||||
# We'll tag this to the "Turbopack" datadog service, not "nextjs"
|
||||
npx @datadog/datadog-ci@2.23.1 junit upload --tags test.type:turbopack-build.daily --service Turbopack ./test/reports
|
|
@ -3,7 +3,7 @@
|
|||
#
|
||||
# Refer test.yml for how this workflow is being initialized
|
||||
# - Workflow can specify `inputs.version` to specify which version of next.js to use, otherwise will use latest release version.
|
||||
name: Turbopack Next.js integration tests
|
||||
name: Turbopack Next.js development integration tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
|
@ -8,8 +8,8 @@ on:
|
|||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
update_manifest:
|
||||
name: Update and upload Turbopack test manifest
|
||||
update_dev_manifest:
|
||||
name: Update and upload Turbopack development test manifest
|
||||
if: github.repository_owner == 'vercel'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
@ -39,5 +39,38 @@ jobs:
|
|||
GITHUB_TOKEN: ${{ secrets.GH_TOKEN_PULL_REQUESTS }}
|
||||
BRANCH_NAME: turbopack-manifest
|
||||
SCRIPT: test/build-turbopack-dev-tests-manifest.js
|
||||
PR_TITLE: Update Turbopack test manifest
|
||||
PR_BODY: This auto-generated PR updates the integration test manifest used when testing Turbopack.
|
||||
PR_TITLE: Update Turbopack development test manifest
|
||||
PR_BODY: This auto-generated PR updates the development integration test manifest used when testing Turbopack.
|
||||
update_build_manifest:
|
||||
name: Update and upload Turbopack production test manifest
|
||||
if: github.repository_owner == 'vercel'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Commits made with the default `GITHUB_TOKEN` won't trigger workflows.
|
||||
# See: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow
|
||||
token: ${{ secrets.RELEASE_BOT_GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_LTS_VERSION }}
|
||||
check-latest: true
|
||||
|
||||
- run: corepack enable
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: pnpm i
|
||||
|
||||
- name: Create Pull Request
|
||||
shell: bash
|
||||
run: node scripts/automated-update-workflow.js
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_TOKEN_PULL_REQUESTS }}
|
||||
BRANCH_NAME: turbopack-manifest
|
||||
SCRIPT: test/build-turbopack-build-tests-manifest.js
|
||||
PR_TITLE: Update Turbopack production test manifest
|
||||
PR_BODY: This auto-generated PR updates the production integration test manifest used when testing Turbopack.
|
||||
|
|
|
@ -32,6 +32,10 @@ jobs:
|
|||
node ./test/build-turbopack-dev-tests-manifest.js
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_TOKEN_PULL_REQUESTS }}
|
||||
- run: |
|
||||
node ./test/build-turbopack-build-tests-manifest.js
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_TOKEN_PULL_REQUESTS }}
|
||||
|
||||
- name: 'Upload results to "Are We Turbo Yet" KV'
|
||||
env:
|
||||
|
|
282
test/build-turbopack-build-tests-manifest.js
Normal file
282
test/build-turbopack-build-tests-manifest.js
Normal file
|
@ -0,0 +1,282 @@
|
|||
const fs = require('fs')
|
||||
const os = require('os')
|
||||
const path = require('path')
|
||||
|
||||
const prettier = require('prettier')
|
||||
const execa = require('execa')
|
||||
const { bold } = require('kleur')
|
||||
|
||||
async function format(text) {
|
||||
const options = await prettier.resolveConfig(__filename)
|
||||
return prettier.format(text, { ...options, parser: 'json' })
|
||||
}
|
||||
|
||||
const override = process.argv.includes('--override')
|
||||
|
||||
const PASSING_JSON_PATH = `${__dirname}/turbopack-build-tests-manifest.json`
|
||||
const WORKING_PATH = '/root/actions-runner/_work/next.js/next.js/'
|
||||
|
||||
const INITIALIZING_TEST_CASES = [
|
||||
'compile successfully',
|
||||
'should build successfully',
|
||||
]
|
||||
|
||||
// please make sure this is sorted alphabetically when making changes.
|
||||
const SKIPPED_TEST_SUITES = {}
|
||||
|
||||
function checkSorted(arr, name) {
|
||||
const sorted = [...arr].sort()
|
||||
if (JSON.stringify(arr) !== JSON.stringify(sorted)) {
|
||||
console.log(`Expected order of ${name}:`)
|
||||
for (let i = 0; i < arr.length; i++) {
|
||||
if (arr[i] === sorted[i]) {
|
||||
console.log(` ${arr[i]}`)
|
||||
} else {
|
||||
console.log(bold().red(`- ${arr[i]}`))
|
||||
console.log(bold().green(`+ ${sorted[i]}`))
|
||||
}
|
||||
}
|
||||
throw new Error(`${name} is not sorted`)
|
||||
}
|
||||
}
|
||||
|
||||
checkSorted(Object.keys(SKIPPED_TEST_SUITES), 'SKIPPED_TEST_SUITES')
|
||||
|
||||
for (const [key, value] of Object.entries(SKIPPED_TEST_SUITES)) {
|
||||
checkSorted(value, `SKIPPED_TEST_SUITES['${key}']`)
|
||||
}
|
||||
|
||||
/**
|
||||
* @param title {string}
|
||||
* @param file {string}
|
||||
* @param args {readonly string[]}
|
||||
* @returns {execa.ExecaChildProcess}
|
||||
*/
|
||||
function exec(title, file, args) {
|
||||
logCommand(title, `${file} ${args.join(' ')}`)
|
||||
|
||||
return execa(file, args, {
|
||||
stderr: 'inherit',
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} title
|
||||
* @param {string} [command]
|
||||
*/
|
||||
function logCommand(title, command) {
|
||||
let message = `\n${bold().underline(title)}\n`
|
||||
|
||||
if (command) {
|
||||
message += `> ${bold(command)}\n`
|
||||
}
|
||||
|
||||
console.log(message)
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {Promise<Artifact>}
|
||||
*/
|
||||
async function fetchLatestTestArtifact() {
|
||||
const { stdout } = await exec(
|
||||
'Getting latest test artifacts from GitHub actions',
|
||||
'gh',
|
||||
['api', '/repos/vercel/next.js/actions/artifacts?name=test-results-build']
|
||||
)
|
||||
|
||||
/** @type {ListArtifactsResponse} */
|
||||
const res = JSON.parse(stdout)
|
||||
|
||||
for (const artifact of res.artifacts) {
|
||||
if (artifact.expired || artifact.workflow_run.head_branch !== 'canary') {
|
||||
continue
|
||||
}
|
||||
|
||||
return artifact
|
||||
}
|
||||
|
||||
throw new Error('no valid test-results artifact was found for branch canary')
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {Promise<TestResultManifest>}
|
||||
*/
|
||||
async function fetchTestResults() {
|
||||
const artifact = await fetchLatestTestArtifact()
|
||||
|
||||
const subprocess = exec('Downloading artifact archive', 'gh', [
|
||||
'api',
|
||||
`/repos/vercel/next.js/actions/artifacts/${artifact.id}/zip`,
|
||||
])
|
||||
|
||||
const filePath = path.join(
|
||||
os.tmpdir(),
|
||||
`next-test-results.${Math.floor(Math.random() * 1000).toString(16)}.zip`
|
||||
)
|
||||
|
||||
subprocess.stdout.pipe(fs.createWriteStream(filePath))
|
||||
|
||||
await subprocess
|
||||
|
||||
const { stdout } = await exec('Extracting test results manifest', 'unzip', [
|
||||
'-pj',
|
||||
filePath,
|
||||
'nextjs-test-results.json',
|
||||
])
|
||||
|
||||
return JSON.parse(stdout)
|
||||
}
|
||||
|
||||
async function updatePassingTests() {
|
||||
const results = await fetchTestResults()
|
||||
|
||||
logCommand('Processing results...')
|
||||
|
||||
const passing = { __proto__: null }
|
||||
for (const result of results.result) {
|
||||
const runtimeError = result.data.numRuntimeErrorTestSuites > 0
|
||||
for (const testResult of result.data.testResults) {
|
||||
const filepath = stripWorkingPath(testResult.name)
|
||||
|
||||
const fileResults = (passing[filepath] ??= {
|
||||
passed: [],
|
||||
failed: [],
|
||||
pending: [],
|
||||
flakey: [],
|
||||
runtimeError,
|
||||
})
|
||||
const skips = SKIPPED_TEST_SUITES[filepath] ?? []
|
||||
|
||||
const skippedPassingNames = []
|
||||
|
||||
let initializationFailed = false
|
||||
for (const testCase of testResult.assertionResults) {
|
||||
let { fullName, status } = testCase
|
||||
|
||||
if (
|
||||
status === 'failed' &&
|
||||
INITIALIZING_TEST_CASES.some((name) => fullName.includes(name))
|
||||
) {
|
||||
initializationFailed = true
|
||||
} else if (initializationFailed) {
|
||||
status = 'failed'
|
||||
}
|
||||
if (shouldSkip(fullName, skips)) {
|
||||
if (status === 'passed') skippedPassingNames.push(fullName)
|
||||
status = 'flakey'
|
||||
}
|
||||
|
||||
// treat test-level todo as same as pending
|
||||
if (status === 'todo') {
|
||||
status = 'pending'
|
||||
}
|
||||
|
||||
const statusArray = fileResults[status]
|
||||
if (!statusArray) {
|
||||
throw new Error(`unexpected status "${status}"`)
|
||||
}
|
||||
statusArray.push(fullName)
|
||||
}
|
||||
|
||||
if (skippedPassingNames.length > 0) {
|
||||
console.log(
|
||||
`${bold().yellow(filepath)} has ${
|
||||
skippedPassingNames.length
|
||||
} passing tests that are marked as skipped:\n${skippedPassingNames
|
||||
.map((name) => ` - ${name}`)
|
||||
.join('\n')}\n`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const info of Object.values(passing)) {
|
||||
info.failed = [...new Set(info.failed)].sort()
|
||||
info.pending = [...new Set(info.pending)].sort()
|
||||
info.flakey = [...new Set(info.flakey)].sort()
|
||||
info.passed = [
|
||||
...new Set(info.passed.filter((name) => !info.failed.includes(name))),
|
||||
].sort()
|
||||
}
|
||||
|
||||
if (!override) {
|
||||
const oldPassingData = JSON.parse(
|
||||
fs.readFileSync(PASSING_JSON_PATH, 'utf8')
|
||||
)
|
||||
|
||||
for (const file of Object.keys(oldPassingData)) {
|
||||
const newData = passing[file]
|
||||
const oldData = oldPassingData[file]
|
||||
if (!newData) continue
|
||||
|
||||
// We want to find old passing tests that are now failing, and report them.
|
||||
// Tests are allowed transition to skipped or flakey.
|
||||
const shouldPass = new Set(
|
||||
oldData.passed.filter((name) => newData.failed.includes(name))
|
||||
)
|
||||
if (shouldPass.size > 0) {
|
||||
console.log(
|
||||
`${bold().red(file)} has ${
|
||||
shouldPass.size
|
||||
} test(s) that should pass but failed:\n${Array.from(shouldPass)
|
||||
.map((name) => ` - ${name}`)
|
||||
.join('\n')}\n`
|
||||
)
|
||||
}
|
||||
// Merge the old passing tests with the new ones
|
||||
newData.passed = [...new Set([...shouldPass, ...newData.passed])].sort()
|
||||
// but remove them also from the failed list
|
||||
newData.failed = newData.failed
|
||||
.filter((name) => !shouldPass.has(name))
|
||||
.sort()
|
||||
|
||||
if (!oldData.runtimeError && newData.runtimeError) {
|
||||
console.log(
|
||||
`${bold().red(file)} has a runtime error that is shouldn't have\n`
|
||||
)
|
||||
newData.runtimeError = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// JS keys are ordered, this ensures the tests are written in a consistent order
|
||||
// https://stackoverflow.com/questions/5467129/sort-javascript-object-by-key
|
||||
const ordered = Object.keys(passing)
|
||||
.sort()
|
||||
.reduce((obj, key) => {
|
||||
obj[key] = passing[key]
|
||||
return obj
|
||||
}, {})
|
||||
|
||||
fs.writeFileSync(
|
||||
PASSING_JSON_PATH,
|
||||
await format(JSON.stringify(ordered, null, 2))
|
||||
)
|
||||
}
|
||||
|
||||
function shouldSkip(name, skips) {
|
||||
for (const skip of skips) {
|
||||
if (typeof skip === 'string') {
|
||||
// exact match
|
||||
if (name === skip) return true
|
||||
} else {
|
||||
// regex
|
||||
if (skip.test(name)) return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
function stripWorkingPath(path) {
|
||||
if (!path.startsWith(WORKING_PATH)) {
|
||||
throw new Error(
|
||||
`found unexpected working path in "${path}", expected it to begin with ${WORKING_PATH}`
|
||||
)
|
||||
}
|
||||
return path.slice(WORKING_PATH.length)
|
||||
}
|
||||
|
||||
updatePassingTests().catch((e) => {
|
||||
console.error(e)
|
||||
process.exit(1)
|
||||
})
|
Loading…
Reference in a new issue