I figured it out! With enough staring and experimentation on the dataviewjs DataArray methods, I was able to get rid of most of the ugly parsing code that was bothering me.
Here’s my final dataviewjs block. Shows a table with all fields and their counts, then a table with rare fields, counts, and notes.
function convertToSlug(str) {
return str.toLowerCase().replace(/ /g,'-')
.replace(/[-]+/g, '-').replace(/[^\w-]+/g,'');
}
const excludes = ["file", "aliases", "tags", "position", "links", "inlinks", "outlinks", "etags", "frontmatter", "lists", "tasks"];
let myFields = new Map();
dv.pages('"/"').forEach((page) =>
Object.keys(page)
.map(k => convertToSlug(k))
.filter(k => !excludes.contains(k))
.forEach(k =>
myFields.set(k, (myFields.has(k) ? 1 + myFields.get(k) : 1))
)
);
let myFieldsDV = dv.array(Array.from(myFields))
.sort(([sluggedField, howManyNotes]) => howManyNotes, "desc");
let myDVHeaders = ["Slugged Field", "Count"];
dv.table(myDVHeaders, myFieldsDV);
dv.header(3, "Rare Fields:");
dv.table(myDVHeaders.concat(["Notes"]),
myFieldsDV
.filter(([ignore, howManyNotes]) => howManyNotes < 3)
.map(([sluggedField, howManyNotes]) =>
[
sluggedField,
howManyNotes,
dv.pages('"/"').filter(page =>
Object.hasOwn(page, sluggedField)).file.link
])
.sort(([sluggedField, hm, notes]) => sluggedField)
);
The vault I used to test this is small (approximately 100 notes) and dataview takes a few seconds to process the query. Note that the second table iterates through all your notes as many times as you have rare fields, so you may be able to speed it up by changing the filter on howManyNotes to be 1 or 2 rather than 3 or by limiting the bottom dv.pages('"/"') by folder as you did in your OP.
I’m curious to hear how the performance is on your vault! Does it avoid the “stack size exceeded” error?