rsnext/packages/eslint-plugin-next/lib/utils/url.js
JacobLey 527cb97b56
Support multiple pages directories for linting (#25565)
Monorepos may contain multiple NextJS apps, but linting occurs at top-level so all directories must be declared.

Declaring multiple directories via an Array allows loading all to generate a full list of potential URLs.

Updated schema and tests. Also optimized some of the `fs.*Sync` requests that can add up to lots of blocking lookups.

## Feature

- [ ] Implements an existing feature request or RFC. Make sure the feature request has been accepted for implementation before opening a PR.
- [ ] Related issues linked using `fixes #number`
- [x] Integration tests added
- [x] Documentation added
- [ ] Telemetry added. In case of a feature if it's used or not.

Closes: https://github.com/vercel/next.js/pull/27223
2021-07-20 21:29:54 +00:00

117 lines
2.9 KiB
JavaScript

const fs = require('fs')
const path = require('path')
// Cache for fs.lstatSync lookup.
// Prevent multiple blocking IO requests that have already been calculated.
const fsLstatSyncCache = {}
const fsLstatSync = (source) => {
fsLstatSyncCache[source] = fsLstatSyncCache[source] || fs.lstatSync(source)
return fsLstatSyncCache[source]
}
/**
* Checks if the source is a directory.
* @param {string} source
*/
function isDirectory(source) {
return fsLstatSync(source).isDirectory()
}
/**
* Checks if the source is a directory.
* @param {string} source
*/
function isSymlink(source) {
return fsLstatSync(source).isSymbolicLink()
}
/**
* Gets the possible URLs from a directory.
* @param {string} urlprefix
* @param {string[]} directories
*/
function getUrlFromPagesDirectories(urlPrefix, directories) {
return Array.from(
// De-duplicate similar pages across multiple directories.
new Set(
directories
.map((directory) => parseUrlForPages(urlPrefix, directory))
.flat()
.map(
// Since the URLs are normalized we add `^` and `$` to the RegExp to make sure they match exactly.
(url) => `^${normalizeURL(url)}$`
)
)
).map((urlReg) => new RegExp(urlReg))
}
// Cache for fs.readdirSync lookup.
// Prevent multiple blocking IO requests that have already been calculated.
const fsReadDirSyncCache = {}
/**
* Recursively parse directory for page URLs.
* @param {string} urlprefix
* @param {string} directory
*/
function parseUrlForPages(urlprefix, directory) {
fsReadDirSyncCache[directory] =
fsReadDirSyncCache[directory] || fs.readdirSync(directory)
const res = []
fsReadDirSyncCache[directory].forEach((fname) => {
if (/(\.(j|t)sx?)$/.test(fname)) {
fname = fname.replace(/\[.*\]/g, '.*')
if (/^index(\.(j|t)sx?)$/.test(fname)) {
res.push(`${urlprefix}${fname.replace(/^index(\.(j|t)sx?)$/, '')}`)
}
res.push(`${urlprefix}${fname.replace(/(\.(j|t)sx?)$/, '')}`)
} else {
const dirPath = path.join(directory, fname)
if (isDirectory(dirPath) && !isSymlink(dirPath)) {
res.push(...parseUrlForPages(urlprefix + fname + '/', dirPath))
}
}
})
return res
}
/**
* Takes a URL and does the following things.
* - Replaces `index.html` with `/`
* - Makes sure all URLs are have a trailing `/`
* - Removes query string
* @param {string} url
*/
function normalizeURL(url) {
if (!url) {
return
}
url = url.split('?')[0]
url = url.split('#')[0]
url = url = url.replace(/(\/index\.html)$/, '/')
// Empty URLs should not be trailed with `/`, e.g. `#heading`
if (url === '') {
return url
}
url = url.endsWith('/') ? url : url + '/'
return url
}
function execOnce(fn) {
let used = false
let result
return (...args) => {
if (!used) {
used = true
result = fn(...args)
}
return result
}
}
module.exports = {
getUrlFromPagesDirectories,
normalizeURL,
execOnce,
}