feat: cms integration

This commit is contained in:
Cory Dransfeldt 2024-06-01 07:16:49 -07:00
parent ff77bdaf36
commit d23243b177
No known key found for this signature in database
1050 changed files with 1032 additions and 27229 deletions

View file

@ -7,7 +7,7 @@ import markdownItFootnote from 'markdown-it-footnote'
import htmlmin from 'html-minifier-terser'
import filters from './config/filters/index.js'
import { minifyJsComponents } from './config/events/index.js'
import { allContent, searchIndex, tagList, tagsSortedByCount, links, tagMap, booksToRead } from './config/collections/index.js'
import { allContent, searchIndex } from './config/collections/index.js'
import { DateTime } from 'luxon'
// load .env
@ -47,9 +47,6 @@ export default async function (eleventyConfig) {
eleventyConfig.addPassthroughCopy({
'node_modules/@cdransf/select-pagination/select-pagination.js': 'assets/scripts/components/select-pagination.js',
})
eleventyConfig.addPassthroughCopy({
'node_modules/@zachleat/webcare-webshare/webcare-webshare.js': 'assets/scripts/components/webcare-webshare.js'
})
eleventyConfig.addPassthroughCopy({
'node_modules/youtube-video-element/youtube-video-element.js': 'assets/scripts/components/youtube-video-element.js'
})
@ -57,24 +54,9 @@ export default async function (eleventyConfig) {
'node_modules/@daviddarnes/mastodon-post/mastodon-post.js': 'assets/scripts/components/mastodon-post.js'
})
// enable merging of tags
eleventyConfig.setDataDeepMerge(true)
// create excerpts
eleventyConfig.setFrontMatterParsingOptions({
excerpt: true,
excerpt_alias: 'post_excerpt',
excerpt_separator: '<!-- excerpt -->',
})
// collections
eleventyConfig.addCollection('allContent', allContent)
eleventyConfig.addCollection('searchIndex', searchIndex)
eleventyConfig.addCollection('tagList', tagList)
eleventyConfig.addCollection('tagsSortedByCount', tagsSortedByCount)
eleventyConfig.addCollection('links', links)
eleventyConfig.addCollection('tagMap', tagMap)
eleventyConfig.addCollection('booksToRead', booksToRead)
const md = markdownIt({ html: true, linkify: true })
md.use(markdownItAnchor, {

View file

@ -1,70 +0,0 @@
name: Reading
run-name: 📚 ${{ inputs['book-status'] }} book ${{ inputs.isbn }}
# Grant the action permission to write to the repository
permissions:
contents: write
# Trigger the action
on:
workflow_dispatch:
inputs:
isbn:
description: The book's ISBN. Required.
required: true
type: string
book-status:
description: What is the status of the book? Required.
required: true
type: choice
default: "want to read"
options:
- "want to read"
- "started"
- "finished"
- "abandoned"
date:
description: Date to record the status of the book (YYYY-MM-DD). Leave blank for today. Optional.
type: string
notes:
description: Notes about the book. Optional.
type: string
rating:
description: Rate the book. Optional.
type: choice
default: "unrated"
options:
- "unrated"
- ⭐️
- ⭐️⭐️
- ⭐️⭐️⭐️
- ⭐️⭐️⭐️⭐️
- ⭐️⭐️⭐️⭐️⭐️
# Tags are optional.
tags:
description: Add tags to categorize the book. Separate each tag with a comma. Optional.
type: string
# Set up the steps to run the action
jobs:
update-library:
runs-on: macOS-latest
name: Read
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Read
uses: katydecorah/read-action@v8.1.0
with:
filename: src/_data/json/read.json
time-zone: America/Los_Angeles
thumbnail-width: 512
- name: Commit updated read file
run: |
git pull
git config --local user.email "hi@coryd.dev"
git config --local user.name "Cory Dransfeldt"
git add -A && git commit -m "📚 “${{ env.BookTitle }}” (${{ env.BookStatus }})"
git push

View file

@ -29,9 +29,6 @@
/sitemap.txt /sitemap.xml 301!
/blog / 301!
/posts/2024 / 301!
/tags/.env /tags/env 301!
/tags/chatgpt/ /tags/ai/ 301!
/tags/socialmedia /tags/social%20media/ 301!
/blog/digital-privacy-tools /posts/2021/digital-privacy-tools/ 301!
/assets/img/social-preview/your-site-your-home-your-web- /assets/img/social-preview/your-site-your-home-your-web-preview.jpeg 301!
/assets/img/social-preview/adding-a-light/dark-theme-toggle-preview.jpeg /assets/img/social-preview/adding-a-light-dark-theme-toggle-preview.jpeg 301!
@ -74,6 +71,7 @@
# general
/articles/ / 301!
/tags /search 301!
/tags/* /search 301!
/referrals /save 301!
/music/genre/* /music/genres/:splat 301!
/recent/movies /watching/recent/movies 301!

View file

@ -109,6 +109,21 @@ const emojiMap = (genre, artist) => {
return DEFAULT
}
const fetchGenreById = async (genreId) => {
const { data, error } = await supabase
.from('genres')
.select('name')
.eq('id', genreId)
.single()
if (error) {
console.error('Error fetching genre:', error)
return null
}
return data.name
}
export default async () => {
const { data, error } = await supabase
.from('listens')
@ -116,7 +131,7 @@ export default async () => {
track_name,
artist_name,
listened_at,
artists (mbid, genre, country)
artists (mbid, genres, country)
`)
.order('listened_at', { ascending: false })
.range(0, 1)
@ -136,10 +151,11 @@ export default async () => {
}
const scrobbleData = data[0]
scrobbleData.genre = await fetchGenreById(data[0].artists.genres)
return new Response(JSON.stringify({
content: `${emojiMap(
scrobbleData.artists.genre,
scrobbleData.genre,
scrobbleData.artist_name
)} ${scrobbleData.track_name} by <a href="https://coryd.dev/music/artists/${sanitizeMediaString(scrobbleData.artist_name)}-${sanitizeMediaString(parseCountryField(scrobbleData.artists.country))}">${
scrobbleData.artist_name

View file

@ -1,5 +1,3 @@
import authors from '../data/author-map.js'
import tagAliases from '../data/tag-aliases.js'
import { DateTime } from 'luxon'
export const searchIndex = (collection) => {
@ -7,8 +5,9 @@ export const searchIndex = (collection) => {
let id = 0
const collectionData = collection.getAll()[0]
const { data } = collectionData
const { collections: { posts, links }, movies } = data
const movieData = movies.movies.filter(movie => (movie.review?.length && movie.review?.length > 0 && movie.rating))
const { posts, links, movies, books } = data
const movieData = movies['movies'].filter(movie => (movie['review']?.length && movie['review']?.length > 0 && movie['rating']))
const bookData = books.filter(book => (book['review']?.length && book['review']?.length > 0 && book['rating']))
const addItemToIndex = (items, icon, getUrl, getTitle, getTags) => {
if (items) {
items.forEach((item) => {
@ -23,9 +22,10 @@ export const searchIndex = (collection) => {
}
}
addItemToIndex(posts, '📝', item => item.url.includes('http') ? item.url : `https://coryd.dev${item.url}`, item => item.data.title, item => item.data.tags.filter(tag => tag !== 'posts'))
addItemToIndex(links, '🔗', item => item.data.link, item => item.data.title, item => item.data.tags)
if (movieData) addItemToIndex(movieData, '🎥', item => item.url, item => `${item.title} (${item.rating})`)
addItemToIndex(posts, '📝', item => item['url'], item => item['title'], item => item['tags'])
addItemToIndex(links, '🔗', item => item['link'], item => item['title'], item => item['tags'])
if (movieData) addItemToIndex(movieData, '🎥', item => item['url'], item => `${item['title']} (${item['rating']})`, item => item['tags'])
if (bookData) addItemToIndex(bookData, '📖', item => item['url'], item => `${item['title']} (${item['rating']})`, item => item['tags'])
return searchIndex
}
@ -35,7 +35,8 @@ export const allContent = (collection) => {
const collectionData = collection.getAll()[0]
const { data } = collectionData
const {
collections: { posts, links },
posts,
links,
books,
movies: { movies }
} = data
@ -47,92 +48,30 @@ export const allContent = (collection) => {
if (!parsedDate.isValid) parsedDate = DateTime.fromFormat(date, 'dd-MM-yyyy')
return parsedDate.isValid ? parsedDate.toISO() : null
}
const authorLookup = (url) => {
if (!url) return null
const urlObject = new URL(url)
const baseUrl = urlObject.origin
return authors?.[baseUrl] || null
}
const addContent = (items, icon, getTitle, getDate) => {
if (items) {
items.forEach(item => {
const author = authorLookup(item.data?.link)
const content = {
url: item.url?.includes('http') ? item.url : `https://coryd.dev${item.url}`,
title: `${icon}: ${getTitle(item)}${author ? ' via ' + author : ''}`
url: `https://coryd.dev${item['url']}`,
title: `${icon}: ${getTitle(item)}${item?.['authors']?.['name'] ? ' via ' + item['authors']['name'] : ''}`
}
if (item.data?.link) content.url = item.data?.link
if (item.data?.description) content.description = `${item.data.description}<br/><br/>`
if (item?.description) content.description = `${item.description}<br/><br/>`
if (item?.['link']) content['url'] = item?.['link']
if (item?.['description']) content['description'] = `${item['description']}<br/><br/>`
const date = getDate ? parseDate(getDate(item)) : null
if (date) content.date = date
if (date) content['date'] = date
aggregateContent.push(content)
})
}
}
addContent(posts, '📝', item => item.data.title, item => item.data.date)
addContent(links, '🔗', item => item.data.title, item => item.data.date)
addContent(books.filter(book => book.status === 'finished'), '📖', item => `${item.title}${item.rating ? ' (' + item.rating + ')' : ''}`, item => item.date)
addContent(movies, '🎥', item => `${item.title}${item.rating ? ' (' + item.rating + ')' : ''}`, item => item.lastWatched)
addContent(posts, '📝', item => item['title'], item => item['date'])
addContent(links, '🔗', item => item['title'], item => item['date'])
addContent(books.filter(book => book['status'] === 'finished'), '📖', item => `${item['title']}${item['rating'] ? ' (' + item['rating'] + ')' : ''}`, item => item['date'])
addContent(movies, '🎥', item => `${item['title']}${item['rating'] ? ' (' + item['rating'] + ')' : ''}`, item => item['lastWatched'])
return aggregateContent.sort((a, b) => {
const dateA = a.date ? DateTime.fromISO(a.date) : DateTime.fromMillis(0)
const dateB = b.date ? DateTime.fromISO(b.date) : DateTime.fromMillis(0)
const dateA = a['date'] ? DateTime.fromISO(a['date']) : DateTime.fromMillis(0)
const dateB = b['date'] ? DateTime.fromISO(b['date']) : DateTime.fromMillis(0)
return dateB - dateA
})
}
export const tagList = (collection) => {
const tagsSet = new Set()
collection.getAll().forEach((item) => {
if (!item.data.tags) return
item.data.tags
.filter((tag) => !['posts', 'all'].includes(tag))
.forEach((tag) => tagsSet.add(tag))
})
return Array.from(tagsSet).sort()
}
export const tagMap = (collection) => {
const tags = {}
const collectionData = collection.getAll()[0]
const { data } = collectionData
const { collections: { posts, links }, books } = data
const processItems = (items, getUrl, getTags) => {
if (items) {
items.forEach((item) => {
const url = getUrl(item)
const tagString = [...new Set(getTags(item).map(tag => tagAliases[tag.toLowerCase()]))]
.join(' ')
.trim()
.replace(/\s+/g, ' ')
if (tagString) tags[url] = tagString
})
}
}
processItems(posts, item => item.url.includes('http') ? item.url : `https://coryd.dev${item.url}`, item => item.data.tags || [])
processItems(links, item => item.data.link, item => item.data.tags || [])
processItems(books, item => item.tags || [], item => item.tags || [])
return tags
}
export const tagsSortedByCount = (collection) => {
const tagStats = {}
collection.getFilteredByGlob('src/posts/**/*.*').forEach((item) => {
if (!item.data.tags) return
item.data.tags
.filter((tag) => !['posts', 'all', 'politics', 'net neutrality'].includes(tag))
.forEach((tag) => {
if (!tagStats[tag]) tagStats[tag] = 1
if (tagStats[tag]) tagStats[tag] = tagStats[tag] + 1
})
})
return Object.entries(tagStats).sort((a, b) => b[1] - a[1]).map(([key, value]) => `${key}`)
}
export const links = (collection) => collection.getFilteredByGlob('src/links/**/*.*').reverse()
export const booksToRead = (collection) => collection.getAll()[0].data.books.filter(book => book.status === 'want to read').sort((a, b) => a['title'].toLowerCase().localeCompare(b['title'].toLowerCase()))
}

View file

@ -1,242 +0,0 @@
export default {
"https://www.todayintabs.com": "Today in Tabs",
"https://keithjgrant.com": "Keith J. Grant",
"https://mxb.dev": "Max Böck",
"https://niclake.me": "Nic Lake",
"https://www.anildash.com": "Anil Dash",
"https://knowler.dev": "Nathan Knowler",
"https://www.vox.com": "Vox",
"https://rachsmith.com": "Rach Smith",
"https://zicklepop.com": "zicklepop",
"https://simonwillison.net": "Simon Willison",
"https://futurism.com": "Futurism",
"https://adamjones.me": "Adam Jones",
"https://janmaarten.com": "Jan Maarten",
"https://sheep.horse": "Andrew Stephens",
"https://nautil.us": "Nautilus",
"https://www.takahe.org.nz": "takahē magazine",
"https://shkspr.mobi": "Terence Eden",
"https://www.statsignificant.com": "Daniel Parris",
"https://statsignificant.com": "Daniel Parris",
"https://fy.blackhats.net.au": "Firstyear",
"https://512pixels.net" : "Stephen Hackett",
"https://bradfrost.com": "Brad Frost",
"https://theverge.com": "The Verge",
"https://www.theverge.com": "The Verge",
"https://www.theguardian.com": "The Guardian",
"https://www.newyorker.com": "The New Yorker",
"https://ploum.net": "Ploum.net",
"https://www.businessinsider.com": "Business Insier",
"https://ergaster.org": "Thibault Martin",
"https://www.oddbird.net": "OddBird",
"https://chrishannah.me": "Chris Hannah",
"https://robinrendle.com": "Robin Rendle",
"https://www.nngroup.com": "Nielsen Norman Group",
"https://www.vulture.com": "Vulture",
"https://blog.danslimmon.com": "Dan Slimmon",
"https://freddiedeboer.substack.com": "Freddie deBoer",
"https://educatedguesswork.org": "Eric Rescorla",
"https://www.theprivacywhisperer.com": "Luiza Jarovsky",
"https://www.matuzo.at": "Manuel Matuzović",
"https://theoutline.com": "The Outline",
"https://esif.dev": "Educational Sensational Inspirational Foundational",
"https://chriscoyier.net": "Chris Coyier",
"https://everythingchanges.us": "Mandy Brown",
"https://addyosmani.com": "Addy Osmani",
"https://thehistoryoftheweb.com": "The History of the Web",
"https://css-tricks.com": "CSS Tricks",
"https://www.latimes.com": "The LA Times",
"https://seldo.com": "There's no such thing as the fundamentals of web development | Seldo.com",
"https://defector.com": "Defector",
"https://heather-buchel.com": "Heather Buchel",
"https://blog.thecodewhisperer.com": "J. B. Rainsberger",
"https://thehardtimes.net": "Dan Rice",
"https://www.invisibleoranges.com": "Invisible Oranges",
"https://nazhamid.com": "Naz Hamid",
"https://ethanmarcotte.com": "Ethan Marcotte",
"https://blog.jim-nielsen.com": "Jim Nielsen",
"https://lucybellwood.com": "Lucy Bellwood",
"https://dev.to": "Dev.to Community",
"https://karawynn.substack.com": "Karawynn Long",
"https://leahreich.substack.com": "Leah Reich",
"https://erinkissane.com": "Erin Kissane",
"https://www.smashingmagazine.com": "Smashing Magazine",
"https://rknight.me": "Robb Knight",
"https://stephanango.com": "Steph Ango",
"https://pxlnv.com": "Nick Heer",
"https://jacobin.com": "Jacobin",
"https://www.techdirt.com": "TechDirt",
"https://www.zachleat.com": "Zach Leatherman",
"https://derekkedziora.com": "Derek Kedziora",
"https://www.afterbabel.com": "Freya India",
"https://jenniferplusplus.com": "Jennifer Moore",
"https://www.wired.com": "Wired",
"https://blog.j11y.io": "James Padolsey",
"https://www.hunker.com": "Hunker",
"https://www.htmhell.dev": "HTMHell",
"https://blakewatson.com": "blakewatson.com omg.lol: an oasis on the internetImported Layers",
"https://www.ursulakleguin.com": "Ursula K. Le Guin",
"https://pepelsbey.dev": "Vadim Makeev",
"https://slate.com": "Slate",
"https://adrianroselli.com": "Adrian Roselli",
"https://manuelmoreale.com": "Manu Moreale",
"https://frontside.com": "Frontside",
"https://xeiaso.net": "Xe Iaso",
"https://locusmag.com": "Locus",
"https://wheresyoured.at": "Ed Zitron",
"https://jacobian.org": "Jacob Kaplan-Moss",
"https://danmcquillan.org": "Dan Mcquillan",
"https://gkeenan.co": "Keenan",
"https://fsfe.org": "FSFE",
"https://pdx.su": "Jeff Sandberg",
"https://chrismcleod.dev": "Chris McLeod",
"https://flamedfury.com": "fLaMEd",
"https://www.rollingstone.com": "Rolling Stone",
"https://multiline.co": "Multiline Comment",
"https://thebaffler.com": "The Baffler",
"https://www.mayank.co": "Mayank",
"https://benmyers.dev": "Ben Myers",
"https://infrequently.org": "Alex Russell",
"https://12daysofweb.dev": "12 Days of Web",
"https://www.scientificamerican.com": "Scientific American",
"https://cdevroe.com": "Colin Devroe",
"https://tomcritchlow.com": "Tom Critchlow",
"https://deathtobullshit.com": "Death to Bullshit",
"https://eftegarie.com": "Amin Eftegarie",
"https://library.xandra.cc": "the library of alexandra",
"https://notebook.wesleyac.com": "Wesley Aptekar-Cassels",
"https://24ways.org": "24 ways",
"https://www.daniel.pizza": "Daniël van der Winden",
"https://www.thejaymo.net": "Jay Springett",
"https://dariusforoux.com": "Darius Foroux",
"https://briankoberlein.com": "Brian Koberlein",
"https://benhoyt.com": "Ben Hoyt",
"https://atthis.link": "Marc",
"https://ar.al": "Aral Balkan",
"https://maerk.xyz": "Mark",
"https://newrepublic.com": "Gil Duran",
"https://prospect.org": "David Dayen",
"https://gilest.org": "Giles Turnbull",
"https://www.jackcheng.com": "Jack Cheng",
"https://fromjason.xyz": "Jason Velazquez",
"https://www.fromjason.xyz": "JASON VELAZQUEZ",
"https://meyerweb.com": "Eric A. Meyer",
"https://citationneeded.news": "Molly White",
"https://hypercritical.co": "John Siracusa",
"https://thathtml.blog": "Jared White",
"https://proton.me": "Proton",
"https://blog.cassidoo.co": "Cassidy Williams",
"https://www.cassey.dev": "Cassey Lottman",
"https://doctorow.medium.com": "Cory Doctorow",
"https://jaredwhite.com": "Jared White",
"https://remotesynthesis.com": "Remote Synthesis",
"https://inessential.com": "Brent Simmons",
"https://read.engineerscodex.com": "Leonardo Creed",
"https://waxy.org": "Andy Baio",
"https://www.anildash.com": "Anil Dash",
"https://macwright.com": "Tom MacWright",
"http://maerk.xyz": "Mark",
"https://www.spacebar.news": "the spacebar",
"https://johan.hal.se": "Johan Halse",
"https://joanwestenberg.com": "Joan Westenberg",
"https://zachholman.com": "Zach Holman",
"https://jakelazaroff.com": "Jake Lazaroff",
"https://cloudfour.com": "Cloud Four",
"https://joe-steel.com": "Joe Rosensteel",
"https://aworkinglibrary.com": "Mandy Brown",
"https://max.engineer": "Max Chernyak",
"https://hamatti.org": "Juha-Matti Santala",
"https://blog.carlana.net": "Carlana Johnson",
"https://darkvisitors.com": "Dark Visitors",
"https://gomakethings.com": "Chris Ferdinandi",
"https://www.wheresyoured.at": "Ed Zitron",
"https://httpster.io": "Sami",
"https://www.tylerjfisher.com": "Tyler Fisher",
"https://baldurbjarnason.com": "Baldur Bjarnason",
"https://www.baldurbjarnason.com": "Baldur Bjarnason",
"https://lmnt.me": "Louie Mantia",
"https://www.lrb.co.uk": "London Review of Books",
"https://piccalil.li": "Andy Bell",
"https://robhorning.substack.com": "Rob Horning",
"https://explodingcomma.com": "Exploding Comma",
"https://mikegrindle.com": "Mike Grindle",
"https://garden.mattstein.com": "Matt Stein",
"https://notes.neatnik.net": "Adam Newbold",
"https://frills.dev": "Frills",
"https://iamfran.com": "Fran",
"https://www.citationneeded.news": "Molly White",
"https://www.musicbusinessworldwide.com": "Music Business Worldwide",
"https://joshcollinsworth.com": "Josh Collinsworth",
"https://collider.com": "Chase Hutchinson",
"https://bastianallgeier.com": "Bastian Allgeier",
"https://storyfair.net": "Sean Hinn",
"https://nuejs.org": "Nue",
"https://whitep4nth3r.com": "Salma Alam-Naylor",
"https://www.garbageday.email": "Ryan Broderick",
"https://lynnandtonic.com": "Lynn Fisher",
"https://blog.lukaszwojcik.net": "Łukasz Wójcik",
"https://ohhelloana.blog": "Ana Rodrigues",
"https://tonsky.me": "Nikita Prokopov",
"https://blog.chriszacharias.com": "Chris Zacharias",
"https://www.stereogum.com": "Stereogum",
"https://paulrobertlloyd.com": "Paul Robert Lloy",
"https://www.ramijames.com": "Rami James",
"https://connortumbleson.com": "Connor Tumbleson",
"http://www.aaronsw.com": "Aaron Swartz",
"https://tante.cc": "Jürgen Geuter",
"https://www.404media.co": "T404 Media",
"https://kevquirk.com": "Kev Quirk",
"https://zacharylipez.ghost.io": "Zachary Lipez",
"https://www.tbray.org": "Tim Bray",
"https://alistapart.com": "A List Apart",
"https://abookapart.com": "A Book ApartThe Wax and the Wane of the Web ",
"https://www.esquire.com": "Esquire",
"https://anhvn.com": "Anh",
"https://www.avclub.com": "The AV Club",
"https://yatil.net": "Eric Eggert",
"https://kyleshevlin.com": "Kyle Shevlin",
"https://www.technologyreview.com": "MIT Technology Review",
"https://www.alexhyett.com": "Alex Hyett",
"https://nicolasgallagher.com": "Nicolas Gallagher",
"https://mxstbr.com": "Max Stoiber",
"https://disconnect.blog": "Paris Marx",
"https://matthiasott.com": "Matthias Ott",
"https://daught.me": "Nathaniel Daught",
"https://www.vice.com": "Vice",
"https://boehs.org": "Evan Boehs",
"https://theinternet.review": "Jared White",
"https://zeldman.com": "Jeffrey Zeldman",
"https://gittings.studio": "Gittings Studio",
"https://www.schneier.com": "Bruce Schneier",
"https://adactio.com": "Jeremy Keith",
"https://hidde.blog": "Hidde de Vries",
"https://darthmall.net": "Evan Sheehan",
"https://darnell.day": "Darnell Clayton",
"https://chrisburnell.com": "Chris Burnell",
"https://css-irl.info": "CSS { In Real Life }",
"https://ishadeed.com": "Ahmad Shadeed",
"https://theconversation.com": "Danielle Williams",
"https://shellsharks.com": "Michael Sass",
"https://www.independent.com": "Santa Barbara Independent",
"https://lasso-security.webflow.io": "Lasso Security",
"https://www.ellyloel.com": "Elly Loel",
"https://www.thewrap.com": "Natalie Korach",
"https://alan.norbauer.com": "Alan Norbauer",
"https://tylersticka.com": "Tyler Sticka",
"https://seirdy.one": "Rohan Kumar",
"https://bored.horse": "Thord D. Hedengren",
"https://underlap.org": "Glyn Normington",
"https://theintercept.com": "The Intercept",
"https://softwarecrisis.dev": "Baldur Bjarnason",
"https://daverupert.com": "Dave Rupert",
"https://www.aalto.fi": "Aalto University",
"https://www.hollywoodreporter.com": "The Hollywood Reporter",
"https://creativegood.com": "Creative Good",
"https://scottwillsey.com": "Scott Willsey",
"https://www.jeremiak.com": "Jeremia Kimelman",
"https://bjhess.com": "Barry Hess",
"https://www.sandofsky.com": "Benjamin Sandofsky",
"https://www.noemamag.com": "Noema Magazine",
"https://sixcolors.com" :"Six Colors",
"https://www.sixcolors.com" :"Six Colors"
}

View file

@ -1,61 +0,0 @@
export default {
'11ty': '#Eleventy',
accessibility: '#Accessibility',
ai: '#AI',
backblaze: '#Backblaze',
'black metal': '#BlackMetal',
blogging: '#Blogging',
books: '#Books',
climate: '#Climate',
crypto: '#Crypto',
css: '#CSS',
'death metal': '#DeathMetal',
design: '#Design',
development: '#WebDev',
economics: '#Economics',
eleventy: '#Eleventy',
email: '#Email',
emo: '#Emo',
fastmail: '#Email',
fiction: '#Fiction',
gmail: '#Email',
grindcore: '#Grindcore',
health: '#Health',
'indie web': '#IndieWeb #SmallWeb',
ios: '#iOS #Apple',
javascript: '#JavaScript',
'last.fm': '#Music',
journalism: '#Journalism',
labor: '#Work',
lastfm: '#Music',
macos: '#macOS #Apple',
mastodon: '#Mastodon',
music: '#Music',
musicbrainz: '#MusicBrainz',
mystery: '#Mystery',
netlify: '#Netlify',
nonfiction: '#NonFiction',
plex: '#Plex',
politics: '#Politics',
privacy: '#Privacy',
productivity: '#Productivity',
react: '#JavaScript',
rss: '#RSS',
shoegaze: '#Shoegaze',
science: "Science",
'science-fiction': '#SciFi',
scifi: '#SciFi',
slack: '#Slack',
'social media': '#SocialMedia',
sports: '#Sports',
spotify: '#Music',
supabase: '#Supabase',
'surveillance capitalism': '#SurveillanceCapitalism',
'tattoos': '#Tattoos',
tech: '#Tech',
television: '#TV',
technology: '#Tech',
thriller: '#Thriller',
tv: '#TV',
'web components': '#WebComponents'
}

View file

@ -2,10 +2,8 @@ import { DateTime } from 'luxon'
import { URL } from 'url'
import slugify from 'slugify'
import sanitizeHtml from 'sanitize-html';
import authors from '../data/author-map.js'
import { shuffleArray, sanitizeMediaString } from '../utilities/index.js'
const utmPattern = /[?&](utm_[^&=]+=[^&#]*)/gi
const BASE_URL = 'https://coryd.dev'
export default {
@ -19,18 +17,7 @@ export default {
const replacement = '&amp;'
return string.replace(pattern, replacement)
},
stripUtm: (string) => {
if (!string) return
return string.replace(utmPattern, '')
},
replaceQuotes: (string) => string.replace(/"/g, "'"),
slugifyString: (str) => {
return slugify(str, {
replacement: '-',
remove: /[#,&,+()$~%.'":*?<>{}]/g,
lower: true,
})
},
formatNumber: (number) => number.toLocaleString('en-US'),
shuffleArray,
@ -39,56 +26,29 @@ export default {
const normalizedPage = page.includes('.html') ? page.replace('.html', '/') : page
return !!normalizedPage && normalizedPage.includes(category) && !/\d+/.test(normalizedPage);
},
isPost: (url) => {
if (url.includes('post')) return true;
return false
},
// analytics
getPopularPosts: (posts, analytics) => {
return posts
.filter((post) => {
if (analytics.find((p) => p.url.includes(post.url))) return true
if (analytics.find((p) => p.url.includes(slugify(post.title).toLowerCase()))) return true
})
.sort((a, b) => {
const visitors = (page) => analytics.filter((p) => p.url.includes(page.url)).pop().value
const visitors = (page) => analytics.filter((p) => p.url.includes(slugify(page.title).toLowerCase())).pop().value
return visitors(b) - visitors(a)
})
},
tagLookup: (url, tagMap) => {
if (!url) return
if (url.includes('#artists')) return '#Music'
if (url.includes('https://coryd.dev/books')) return '#Books #FinishedReading'
if (url.includes('https://coryd.dev/watching')) return '#Movies #Watched'
return tagMap[url] || ''
},
// posts
filterByPostType: (posts, postType) => {
if (postType === 'featured') return shuffleArray(posts.filter(post => post.data.featured === true)).slice(0, 3)
if (postType === 'featured') return shuffleArray(posts.filter(post => post.featured === true)).slice(0, 3)
return posts.slice(0, 5)
},
truncateByWordCount: (text, wordCount) => {
const words = sanitizeHtml(text, { allowedTags: ['']}).split(/\s+/);
if (words.length > wordCount) return `<p>${words.slice(0, wordCount).join(' ').replace(/[^a-zA-Z0-9]+$/, '')}...</p>`
return text
},
// watching
featuredWatching: (watching, count) => shuffleArray(watching.filter(watch => watch.favorite === true)).slice(0, count),
// authors
authorLookup: (url) => {
if (!url) return null
const urlObject = new URL(url)
const baseUrl = urlObject.origin
return authors?.[baseUrl] || null
},
// dates
readableDate: (date) => {
return DateTime.fromISO(date).toFormat('LLLL d, yyyy')
},
isoDateOnly: (date, separator) => {
let d = new Date(date)
let month = '' + (d.getMonth() + 1)
@ -100,10 +60,6 @@ export default {
return [year, month, day].join(separator)
},
stringToDate: (string) => {
if (!string) return
return new Date(string)
},
oldPost: (date) => {
return DateTime.now().diff(DateTime.fromJSDate(new Date(date)), 'years').years > 3;
},
@ -154,8 +110,8 @@ export default {
const feedNote = '<hr/><p>This is a full text feed, but not all content can be rendered perfectly within the feed. If something looks off, feel free to <a href="https://coryd.dev">visit my site</a> for the original post.</p>'
// set the entry url
if (entry.url.includes('http')) url = entry.url
if (!entry.url.includes('http')) url = new URL(entry.url, BASE_URL).toString()
if (entry.url?.includes('http')) url = entry.url
if (!entry.url?.includes('http')) url = new URL(entry.url, BASE_URL).toString()
if (entry?.data?.link) url = entry.data.link
// set the entry excerpt
@ -270,21 +226,5 @@ export default {
: `<a href="/music/artists/${sanitizeMediaString(dataSlice[dataSlice.length - 1]['name_string'])}-${sanitizeMediaString(dataSlice[dataSlice.length - 1]['country'].toLowerCase())}">${dataSlice[dataSlice.length - 1]['name_string']}</a>`
return `${allButLast} and ${last}`
},
// tags
filterTags: (tags) => tags.filter((tag) => tag.toLowerCase() !== 'posts'),
formatTag: (string) => {
const capitalizeFirstLetter = (string) => string.charAt(0).toUpperCase() + string.slice(1)
const normalizedString = string.toLowerCase()
if (
normalizedString === 'ios' ||
normalizedString === 'macos' ||
normalizedString === 'css' ||
normalizedString === 'rss' ||
normalizedString === 'ai'
) return `#${string}`
if (!string.includes(' ')) return `#${capitalizeFirstLetter(string)}`
return `#${string.split(' ').map(s => capitalizeFirstLetter(s)).join('')}`
}
}

1564
package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -1,14 +1,12 @@
{
"name": "coryd.dev",
"version": "17.6.4",
"version": "18.0.0",
"description": "The source for my personal site. Built using 11ty.",
"type": "module",
"scripts": {
"start": "node ./scripts/og-images/index.js && eleventy --serve",
"start": "eleventy --serve",
"start:quick": "eleventy --serve --incremental --ignore-initial",
"build": "ELEVENTY_PRODUCTION=true eleventy",
"build:ogi": "node ./scripts/og-images/index.js",
"update:books": "cd ./scripts/books && node index.js && cd ../../",
"update:deps": "npm upgrade && ncu",
"debug": "DEBUG=Eleventy* npx @11ty/eleventy --serve"
},
@ -25,7 +23,6 @@
"@cdransf/select-pagination": "^1.3.1",
"@cdransf/theme-toggle": "^1.3.2",
"@daviddarnes/mastodon-post": "^1.3.0",
"@zachleat/webcare-webshare": "^1.0.3",
"minisearch": "^6.3.0",
"youtube-video-element": "^1.1.5"
},
@ -34,7 +31,7 @@
"@11ty/eleventy-fetch": "^4.0.1",
"@11ty/eleventy-plugin-syntaxhighlight": "^5.0.0",
"@11tyrocks/eleventy-plugin-lightningcss": "^1.4.0",
"@aws-sdk/client-s3": "^3.583.0",
"@aws-sdk/client-s3": "^3.588.0",
"@cdransf/eleventy-plugin-tabler-icons": "^1.5.0",
"@supabase/supabase-js": "^2.43.4",
"dotenv-flow": "^4.1.0",
@ -46,7 +43,6 @@
"markdown-it-anchor": "^9.0.1",
"markdown-it-footnote": "^4.0.0",
"sanitize-html": "^2.13.0",
"sharp": "^0.33.4",
"slugify": "^1.6.6",
"terser": "^5.31.0"
}

View file

@ -1,51 +0,0 @@
import fs from 'fs'
import fetch from 'node-fetch'
import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3'
import dotenv from 'dotenv';
dotenv.config({ path: new URL('../../.env.local', import.meta.url).pathname });
const s3Client = new S3Client({
credentials: {
accessKeyId: process.env.B2_ACCESS_KEY_ID,
secretAccessKey: process.env.B2_SECRET_ACCESS_KEY
},
endpoint: {
url: 'https://s3.us-west-001.backblazeb2.com',
},
region: 'us-west-1',
});
const booksData = fs.readFileSync('../../src/_data/json/read.json', 'utf8');
const books = JSON.parse(booksData);
async function processBooks() {
for (const book of books) {
if (book.thumbnail.startsWith('https://coryd.dev/media/books/')) continue
const cleanedThumbnailURL = book.thumbnail.replace('&edge=curl', '');
const fileName = `${book.isbn}-${book.title.replace(/\s+/g, '-').replace(/[^\w\-]+/g, '').replace(/\-{2,}/g, '-').replace(/^-+|-+$/g, '') .toLowerCase().replace(/\s+/g, '-')}.jpg`;
const fileKey = `books/${fileName}`
try {
const response = await fetch(cleanedThumbnailURL);
const buffer = await response.buffer();
const putObjectParams = {
Bucket: process.env.B2_BUCKET_NAME,
Key: fileKey,
Body: buffer,
ContentType: 'image/jpeg'
};
await s3Client.send(new PutObjectCommand(putObjectParams));
book.thumbnail = `https://coryd.dev/media/books/${fileName}`;
console.log(`Uploaded and updated ${book.title}`);
} catch (error) {
console.error(`Failed to process ${book.title}: ${error}`);
}
}
fs.writeFileSync('../../src/_data/json/read.json', JSON.stringify(books, null, 2));
}
processBooks();

View file

@ -1,73 +0,0 @@
import { promises as fs } from 'fs'
import path from 'path'
import sharp from 'sharp'
import matter from 'gray-matter'
import slugify from 'slugify'
import { Liquid } from 'liquidjs'
import { DateTime } from 'luxon'
import { fileURLToPath } from 'url'
const FILE_NAME = fileURLToPath(import.meta.url)
const DIR_NAME = path.dirname(FILE_NAME)
const baseDir = path.join(DIR_NAME, '../../src/posts')
const outputDir = path.join(DIR_NAME, '../../src/assets/img/ogi/')
const engine = new Liquid({ extname: '.liquid' })
engine.registerFilter('date', (isoDateString, formatString = 'MMMM d, yyyy') => {
const date = DateTime.fromISO(isoDateString)
return date.isValid ? date.toFormat(formatString) : isoDateString
})
engine.registerFilter('splitLines', (input, maxCharLength) => {
return input.split(' ').reduce((acc, cur) => {
if (!acc.length || acc[acc.length - 1].length + cur.length + 1 > maxCharLength) {
acc.push(cur)
} else {
acc[acc.length - 1] += ' ' + cur
}
return acc
}, [])
})
engine.registerFilter('slugify', (input) => slugify(input, { lower: true, strict: true, remove: /[*+~.()'"!:@]/g }))
const svgToPNG = async (filePath) => {
try {
const fileContent = await fs.readFile(filePath, 'utf8')
const { data } = matter(fileContent)
const svgTemplatePath = path.resolve(DIR_NAME, 'index.liquid')
const templateContent = await fs.readFile(svgTemplatePath, 'utf8')
const svgContent = await engine.parseAndRender(templateContent, { preview: { data: data, date: data.date }})
const outputFile = path.join(outputDir, `${engine.filters.slugify(data.title)}-preview.png`)
await fs.mkdir(outputDir, { recursive: true })
await sharp(Buffer.from(svgContent)).png().toFile(outputFile)
} catch (error) {
console.error('Error processing file:', error)
}
}
const processPostDirectories = async (baseDir) => {
try {
const yearDirs = await fs.readdir(baseDir, { withFileTypes: true })
for (const dirent of yearDirs) {
if (dirent.isDirectory()) {
const yearPath = path.join(baseDir, dirent.name)
const markdownFiles = await fs.readdir(yearPath, { withFileTypes: true })
for (const file of markdownFiles) {
if (file.isFile() && file.name.endsWith('.md')) {
const filePath = path.join(yearPath, file.name)
await svgToPNG(filePath)
}
}
}
}
} catch (error) {
console.error('Failed to process directories:', error)
}
}
const generateOgImages = async () => await processPostDirectories(baseDir)
generateOgImages()

View file

@ -1,46 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg width="1200" height="630" viewBox="0 0 1200 630" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
{% assign titleInLines = preview.data.title | splitLines: 40 %}
{% assign numberOfLines = titleInLines.length %}
{% if numberOfLines == 1 %}
{% assign verticalStartingPoint = 340 %}
{% elsif numberOfLines == 2 %}
{% assign verticalStartingPoint = 290 %}
{% elsif numberOfLines == 3 %}
{% assign verticalStartingPoint = 250 %}
{% elsif numberOfLines == 4 %}
{% assign verticalStartingPoint = 210 %}
{% elsif numberOfLines == 5 %}
{% assign verticalStartingPoint = 170 %}
{% endif %}
<svg id="visual" viewBox="0 0 1200 630" width="1200" height="630" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1"><rect x="0" y="0" width="1200" height="630" fill="#000"></rect></svg>
<!-- date -->
<text
font-family="MonoLisa, Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, ui-monospace, monospace"
font-size="24"
font-weight="800"
fill="#fff"
>
<tspan x="80" y="{{ verticalStartingPoint | minus: 120 }}">
{{ preview.data.date | date: "MMMM d, yyyy" }}
</tspan>
</text>
<!-- title -->
<text
id="text"
font-family="MonoLisa, Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, ui-monospace, monospace"
font-size="40"
font-weight="800"
fill="#fff"
>
{% for line in titleInLines %}
{% capture spacing %}{{ forloop.index0 | times: 50 }}{% endcapture %}
<tspan x="80" y="{{ verticalStartingPoint | plus: spacing }}">
{{ line }}
</tspan>
{% endfor %}
</text>
</svg>

Before

Width:  |  Height:  |  Size: 1.7 KiB

View file

@ -16,7 +16,7 @@ export default async function () {
image,
release_date,
release_link,
artists (name_string, genre, mbid, country)
artists (name_string, mbid, country)
`)
.gt('release_date', today)
@ -31,8 +31,7 @@ export default async function () {
title: album['name'],
date: DateTime.fromISO(album['release_date']).toLocaleString(DateTime.DATE_FULL),
url: album['release_link'],
artist_url: `https://coryd.dev/music/artists/${sanitizeMediaString(album['artists']['name_string'])}-${sanitizeMediaString(parseCountryField(album['artists']['country']))}`,
genre: album['artists']['genre'],
artist_url: `/music/artists/${sanitizeMediaString(album['artists']['name_string'])}-${sanitizeMediaString(parseCountryField(album['artists']['country']))}`,
mbid: album['artists']['mbid'],
timestamp: DateTime.fromISO(album['release_date']).toSeconds()
}

View file

@ -1,8 +1,8 @@
import { createClient } from '@supabase/supabase-js'
import { parseCountryField } from '../../config/utilities/index.js'
const SUPABASE_URL = process.env.SUPABASE_URL || 'YOUR_SUPABASE_URL'
const SUPABASE_KEY = process.env.SUPABASE_KEY || 'YOUR_SUPABASE_KEY'
const SUPABASE_URL = process.env.SUPABASE_URL
const SUPABASE_KEY = process.env.SUPABASE_KEY
const supabase = createClient(SUPABASE_URL, SUPABASE_KEY)
const PAGE_SIZE = 50
@ -36,13 +36,29 @@ const fetchPaginatedData = async (table, selectFields) => {
return data
}
export default async function () {
const artists = await fetchPaginatedData('artists', 'mbid, name_string, image, genre, total_plays, country, description, favorite')
const albums = await fetchPaginatedData('albums', 'mbid, name, release_year, artist_mbid, total_plays')
const fetchGenreMapping = async () => {
const { data, error } = await supabase
.from('genres')
.select('id, name')
if (error) {
console.error('Error fetching genres:', error)
return {}
}
return data.reduce((acc, genre) => {
acc[genre.id] = genre.name
return acc
}, {})
}
export default async function () {
const genreMapping = await fetchGenreMapping()
const artists = await fetchPaginatedData('artists', 'id, mbid, name_string, image, total_plays, country, description, favorite, genres')
const albums = await fetchPaginatedData('albums', 'mbid, name, release_year, total_plays, artist')
const albumsByArtist = albums.reduce((acc, album) => {
if (!acc[album.artist_mbid]) acc[album.artist_mbid] = []
acc[album.artist_mbid].push({
if (!acc[album.artist]) acc[album.artist] = []
acc[album.artist].push({
id: album.id,
name: album.name,
release_year: album.release_year,
@ -51,10 +67,11 @@ export default async function () {
return acc
}, {})
artists.forEach(artist => {
artist.albums = albumsByArtist[artist.mbid]?.sort((a, b) => a['release_year'] - b['release_year']) || []
for (const artist of artists) {
artist.albums = albumsByArtist[artist.id]?.sort((a, b) => a['release_year'] - b['release_year']) || []
artist.country = parseCountryField(artist.country)
})
artist.genres = genreMapping[artist.genres] || ''
}
return artists
}

View file

@ -1,30 +1,77 @@
import { createRequire } from 'module'
import { createClient } from '@supabase/supabase-js'
const require = createRequire(import.meta.url)
const books = require('./json/read.json')
const SUPABASE_URL = process.env.SUPABASE_URL
const SUPABASE_KEY = process.env.SUPABASE_KEY
const supabase = createClient(SUPABASE_URL, SUPABASE_KEY)
const PAGE_SIZE = 1000
const fetchTagsForBook = async (bookId) => {
const { data, error } = await supabase
.from('books_tags')
.select('tags(id, name)')
.eq('books_id', bookId)
if (error) {
console.error(`Error fetching tags for book ${bookId}:`, error)
return []
}
return data.map(bt => bt.tags.name)
}
async function fetchAllBooks() {
let books = []
let from = 0
let to = PAGE_SIZE - 1
while (true) {
const { data, error } = await supabase
.from('books')
.select('*')
.range(from, to)
if (error) {
console.error('Error fetching data from Supabase:', error)
break
}
for (const book of data) {
book.tags = await fetchTagsForBook(book.id)
}
books = books.concat(data)
if (data.length < PAGE_SIZE) break
from += PAGE_SIZE
to += PAGE_SIZE
}
return books
}
export default async function () {
const books = await fetchAllBooks()
return books.map(book => {
let authors = ''
let date = book?.['dateAdded']
if (book['authors']?.length > 1) authors = book['authors'].join(', ')
if (book['authors']?.length === 1) authors = book['authors'][0]
if (book?.['dateStarted']) date = book['dateStarted']
if (book?.['dateFinished']) date = book['dateFinished']
const author = book['author'] || ''
let date = book?.['date_finished']
if (book?.['date_started']) date = book['date_started']
if (book?.['date_finished']) date = book['date_finished']
return {
title: book['title'],
authors,
author,
description: book['description'],
image: book['thumbnail'],
url: `https://coryd.dev/books/${book['isbn']}`,
url: `/books/${book['isbn']}`,
date,
status: book['status'],
tags: book['tags'],
categories: book['categories']?.length > 1 ? book['categories'].join(', ') : book['categories']?.[0],
rating: book['rating'] !== 'unrated' ? book['rating'] : '',
isbn: book['isbn'],
type: 'book',
}
})
}
}

View file

@ -1,8 +1,8 @@
import { createClient } from '@supabase/supabase-js'
import { parseCountryField } from '../../config/utilities/index.js'
const SUPABASE_URL = process.env.SUPABASE_URL || 'YOUR_SUPABASE_URL'
const SUPABASE_KEY = process.env.SUPABASE_KEY || 'YOUR_SUPABASE_KEY'
const SUPABASE_URL = process.env.SUPABASE_URL
const SUPABASE_KEY = process.env.SUPABASE_KEY
const supabase = createClient(SUPABASE_URL, SUPABASE_KEY)
export default async function fetchGenresWithArtists() {

File diff suppressed because it is too large Load diff

View file

@ -3,42 +3,42 @@ export default {
{
title: 'Stay True',
authors: 'Hua Hsu',
image: 'https://coryd.dev/media/books/9780385547772-stay-true.jpg',
image: '/media/books/9780385547772-stay-true.jpg',
url: 'https://openlibrary.org/isbn/9780593663660',
type: 'book',
},
{
title: 'Where Are Your Boys Tonight?',
authors: 'Chris Payne',
image: 'https://coryd.dev/media/books/9780063251281-where-are-your-boys-tonight.jpg',
image: '/media/books/9780063251281-where-are-your-boys-tonight.jpg',
url: 'https://openlibrary.org/isbn/9780063161573',
type: 'book',
},
{
title: 'Trouble Boys',
authors: 'Bob Mehr',
image: 'https://coryd.dev/media/books/0306818795-trouble-boys.jpg',
image: '/media/books/0306818795-trouble-boys.jpg',
url: 'https://openlibrary.org/isbn/9780306818790',
type: 'book',
},
{
title: 'Corporate Rock Sucks',
authors: 'Jim Ruland',
image: 'https://coryd.dev/media/books/9780306925481-corporate-rock-sucks.jpg',
image: '/media/books/9780306925481-corporate-rock-sucks.jpg',
url: 'https://openlibrary.org/isbn/9780306925474',
type: 'book',
},
{
title: 'Tracers in the Dark',
authors: 'Andy Greenberg',
image: 'https://coryd.dev/media/books/0593315618-tracers-in-the-dark.jpg',
image: '/media/books/0593315618-tracers-in-the-dark.jpg',
url: 'http://openlibrary.org/isbn/9780385548106',
type: 'book',
},
{
title: 'Girl in a Band',
authors: 'Kim Gordon',
image: 'https://coryd.dev/media/books/9780062295897-kim-gordon-girl-in-a-band.jpg',
image: '/media/books/9780062295897-kim-gordon-girl-in-a-band.jpg',
url: 'https://openlibrary.org/isbn/9780062295910',
type: 'book',
}
@ -47,56 +47,56 @@ export default {
{
title: 'the whaler',
artist: 'home is where',
image: 'https://coryd.dev/media/albums/home-is-where-the-whaler.jpg',
image: '/media/albums/home-is-where-the-whaler.jpg',
url: 'https://musicbrainz.org/release-group/6fe3516f-c324-4265-8f43-d902f3a4cc20',
type: 'album',
},
{
title: 'The Enduring Spirit',
artist: 'Tomb Mold',
image: 'https://coryd.dev/media/albums/tomb-mold-the-enduring-spirit.jpg',
image: '/media/albums/tomb-mold-the-enduring-spirit.jpg',
url: 'https://musicbrainz.org/release-group/cd3e5dfb-acca-4856-80f6-2e095ac3270d',
type: 'album',
},
{
title: 'A Dialogue With The Eeriest Sublime',
artist: 'Vertebra Atlantis',
image: 'https://coryd.dev/media/albums/vertebra-atlantis-a-dialogue-with-the-eeriest-sublime.jpg',
image: '/media/albums/vertebra-atlantis-a-dialogue-with-the-eeriest-sublime.jpg',
url: 'https://musicbrainz.org/release-group/b8f1913b-f461-443c-a26c-377b259f2af6',
type: 'album',
},
{
title: 'ONE MORE TIME...',
artist: 'blink-182',
image: 'https://coryd.dev/media/albums/blink-182-one-more-time.jpg',
image: '/media/albums/blink-182-one-more-time.jpg',
url: 'https://musicbrainz.org/release-group/520d6d45-19c8-4ee1-a954-180e7902f3da',
type: 'album',
},
{
title: 'Life Like',
artist: 'Dead Bob',
image: 'https://coryd.dev/media/albums/dead-bob-life-like.jpg',
image: '/media/albums/dead-bob-life-like.jpg',
url: 'https://musicbrainz.org/release-group/ab53e625-74af-4a09-a8ff-e1c08dbae596',
type: 'album',
},
{
title: 'Threads of Unknowing',
artist: 'VoidCeremony',
image: 'https://coryd.dev/media/albums/voidceremony-threads-of-unknowing.jpg',
image: '/media/albums/voidceremony-threads-of-unknowing.jpg',
url: 'https://musicbrainz.org/release-group/f1f91cde-ff57-41c8-bd58-28c236b3f0c6',
type: 'album',
},
{
title: 'Why Would I Watch',
artist: 'Hot Mulligan',
image: 'https://coryd.dev/media/albums/hot-mulligan-why-would-i-watch.jpg',
image: '/media/albums/hot-mulligan-why-would-i-watch.jpg',
url: 'https://musicbrainz.org/release-group/5afd31ea-3a96-4b99-a477-4d121efaedec',
type: 'album',
},
{
title: 'Losing What We Love',
artist: 'Knuckle Puck',
image: 'https://coryd.dev/media/albums/knuckle-puck-losing-what-we-love.jpg',
image: '/media/albums/knuckle-puck-losing-what-we-love.jpg',
url: 'https://musicbrainz.org/release-group/b51d8882-3854-400a-b79b-4353a77a389b',
type: 'album',
}

55
src/_data/links.js Normal file
View file

@ -0,0 +1,55 @@
import { createClient } from '@supabase/supabase-js'
const SUPABASE_URL = process.env.SUPABASE_URL
const SUPABASE_KEY = process.env.SUPABASE_KEY
const supabase = createClient(SUPABASE_URL, SUPABASE_KEY)
const PAGE_SIZE = 50
const fetchTagsForLink = async (linkId) => {
const { data, error } = await supabase
.from('links_tags')
.select('tags(id, name)')
.eq('links_id', linkId)
if (error) {
console.error(`Error fetching tags for link ${linkId}:`, error)
return []
}
return data.map((lt) => lt.tags.name)
}
const fetchAllLinks = async () => {
let links = []
let page = 0
let fetchMore = true
while (fetchMore) {
const { data, error } = await supabase
.from('links')
.select('*, authors (name, url)')
.order('date', { ascending: false })
.range(page * PAGE_SIZE, (page + 1) * PAGE_SIZE - 1)
if (error) {
console.error('Error fetching links:', error)
return links
}
if (data.length < PAGE_SIZE) fetchMore = false
for (const link of data) {
link.tags = await fetchTagsForLink(link.id)
}
links = links.concat(data)
page++
}
return links
}
export default async function () {
return await fetchAllLinks()
}

View file

@ -51,7 +51,7 @@ export default async function () {
lastWatched: item['last_watched'],
dateAdded: item['last_watched'],
year: item['year'],
url: `https://coryd.dev/watching/movies/${item['tmdb_id']}`,
url: `/watching/movies/${item['tmdb_id']}`,
description: `${item['title']} (${item['year']})<br/>Watched at: ${DateTime.fromISO(item['last_watched'], { zone: 'utc' }).setZone('America/Los_Angeles').toFormat('MMMM d, yyyy, h:mma')}`,
image: `https://coryd.dev/media/movies/poster-${item['tmdb_id']}.jpg`,
backdrop: `https://coryd.dev/media/movies/backdrops/backdrop-${item['tmdb_id']}.jpg`,
@ -76,6 +76,5 @@ export default async function () {
recentlyWatched: formatMovieData(recentlyWatchedMovies),
favorites: formatMovieData(favoriteMovies).sort((a, b) => a['title'].localeCompare(b['title'])),
collection: formatMovieData(collectedMovies),
toWatch: formatMovieData(movies, false).sort((a, b) => a['title'].localeCompare(b['title'])),
}
}

View file

@ -59,8 +59,25 @@ const fetchAllTimeData = async (fields, table) => {
return rows
}
const fetchGenreMapping = async () => {
const { data, error } = await supabase
.from('genres')
.select('id, name')
if (error) {
console.error('Error fetching genres:', error)
return {}
}
return data.reduce((acc, genre) => {
acc[genre.id] = genre.name
return acc
}, {})
}
const aggregateData = (data, groupByField, groupByType) => {
const aggregation = {}
const genreMapping = fetchGenreMapping()
data.forEach(item => {
const key = item[groupByField]
@ -70,21 +87,21 @@ const aggregateData = (data, groupByField, groupByType) => {
title: item[groupByField],
plays: 0,
mbid: item['albums']['mbid'],
url: `https://coryd.dev/music/artists/${sanitizeMediaString(item['artist_name'])}-${sanitizeMediaString(parseCountryField(item['artists']['country']))}`,
url: `/music/artists/${sanitizeMediaString(item['artist_name'])}-${sanitizeMediaString(parseCountryField(item['artists']['country']))}`,
image: item['albums']?.['image'] || '',
timestamp: item['listened_at'],
type: groupByType,
genre: item['artists']?.['genre'] || ''
genre: genreMapping[item['artists']['genre']] || ''
}
} else {
aggregation[key] = {
title: item[groupByField],
plays: 0,
mbid: item[groupByType]?.['mbid'] || '',
url: `https://coryd.dev/music/artists/${sanitizeMediaString(item['artist_name'])}-${sanitizeMediaString(parseCountryField(item['artists']['country']))}`,
url: `/music/artists/${sanitizeMediaString(item['artist_name'])}-${sanitizeMediaString(parseCountryField(item['artists']['country']))}`,
image: item[groupByType]?.image || '',
type: groupByType,
genre: item['artists']?.['genre'] || ''
genre: genreMapping[item['artists']['genre']] || ''
}
}
if (
@ -106,11 +123,11 @@ const aggregateData = (data, groupByField, groupByType) => {
const aggregateGenres = (data) => {
const genreAggregation = {}
const genreMapping = fetchGenreMapping()
data.forEach(item => {
const genre = item.artists.genre
if (!genreAggregation[genre]) {
genreAggregation[genre] = { genre, plays: 0 }
}
const genre = genreMapping[item['artists']['genre']] || ''
if (!genreAggregation[genre]) genreAggregation[genre] = { genre, plays: 0 }
genreAggregation[genre]['plays']++
})
return Object.values(genreAggregation).sort((a, b) => b['plays'] - a['plays'])
@ -130,7 +147,7 @@ export default async function() {
album_name,
album_key,
listened_at,
artists (mbid, image, genre, country),
artists (mbid, image, genres, country),
albums (mbid, image)
`

62
src/_data/posts.js Normal file
View file

@ -0,0 +1,62 @@
import { createClient } from '@supabase/supabase-js'
import { DateTime } from 'luxon'
import slugify from 'slugify'
const SUPABASE_URL = process.env.SUPABASE_URL
const SUPABASE_KEY = process.env.SUPABASE_KEY
const supabase = createClient(SUPABASE_URL, SUPABASE_KEY)
const PAGE_SIZE = 50
const fetchTagsForPost = async (postId) => {
const { data, error } = await supabase
.from('posts_tags')
.select('tags(id, name)')
.eq('posts_id', postId)
if (error) {
console.error(`Error fetching tags for post ${postId}:`, error)
return []
}
return data.map(pt => pt.tags.name)
}
const fetchAllPosts = async () => {
let posts = []
let page = 0
let fetchMore = true
while (fetchMore) {
const { data, error } = await supabase
.from('posts')
.select('*')
.order('date', { ascending: false })
.range(page * PAGE_SIZE, (page + 1) * PAGE_SIZE - 1)
if (error) {
console.error('Error fetching posts:', error)
return posts
}
if (data.length < PAGE_SIZE) fetchMore = false
for (const post of data) {
post.tags = await fetchTagsForPost(post.id)
post.url = `/posts/${DateTime.fromISO(post.date).year}/${slugify(post.title, {
replacement: '-',
remove: /[#,&,+()$~%.'":*?<>{}\[\]\/\\|`!@\^\—]/g,
lower: true,
})}/`
}
posts = posts.concat(data)
page++
}
return posts
}
export default async function () {
return await fetchAllPosts()
}

View file

@ -90,14 +90,14 @@ export default async function () {
showEpisodesMap[showTmdbId].episodes.push({
name: showTitle,
url: `https://coryd.dev/watching/shows/${showTmdbId}`,
url: `/watching/shows/${showTmdbId}`,
subtext: `${showTitle} • S${seasonNumber}E${episodeNumber}`,
episode: episodeNumber,
season: seasonNumber,
tmdbId: showTmdbId,
type: 'tv',
image: `https://coryd.dev/media/shows/poster-${showTmdbId}.jpg`,
backdrop: `https://coryd.dev/media/shows/backdrops/backdrop-${showTmdbId}.jpg`,
image: `/media/shows/poster-${showTmdbId}.jpg`,
backdrop: `/media/shows/backdrops/backdrop-${showTmdbId}.jpg`,
dateAdded: lastWatchedAt,
lastWatchedAt
})
@ -114,7 +114,7 @@ export default async function () {
if (show.episodes.length > 1) {
episodeData.push({
name: show.title,
url: `https://coryd.dev/watching/shows/${show['tmdbId']}`,
url: `/watching/shows/${show['tmdbId']}`,
subtext: `S${startingSeason}E${startingEpisode} - S${endingSeason}E${endingEpisode}`,
startingEpisode,
startingSeason,
@ -124,8 +124,8 @@ export default async function () {
collected: show['collected'],
favorite: show['favorite'],
type: 'tv-range',
image: `https://coryd.dev/media/shows/poster-${show['tmdbId']}.jpg`,
backdrop: `https://coryd.dev/media/shows/backdrops/backdrop-${show['tmdbId']}.jpg`,
image: `/media/shows/poster-${show['tmdbId']}.jpg`,
backdrop: `/media/shows/backdrops/backdrop-${show['tmdbId']}.jpg`,
})
} else {
const singleEpisode = show['episodes'][0]
@ -140,7 +140,7 @@ export default async function () {
const favoriteShows = shows.filter(show => show['favorite'])
const collectedShows = shows.filter(show => show['collected'])
const toWatch = shows.map(show => ({...show, url: `https://coryd.dev/watching/shows/${show['tmdb_id']}`})).filter(show => !show.episodes.some(episode => episode.last_watched_at)).sort((a, b) => a['title'].localeCompare(b['title']))
const toWatch = shows.map(show => ({...show, url: `/watching/shows/${show['tmdb_id']}`})).filter(show => !show.episodes.some(episode => episode.last_watched_at)).sort((a, b) => a['title'].localeCompare(b['title']))
return {
shows,

View file

@ -25,7 +25,7 @@
{%- assign pageDescription = meta.siteDescription -%}
{%- if schema == 'blog' -%}
{%- assign pageDescription = post_excerpt | markdown | strip_html -%}
{%- assign pageDescription = post.description | markdown | strip_html -%}
{%- elsif artist.description -%}
{%- assign pageDescription = artist.description | truncate: 300 -%}
{%- elsif book.description -%}
@ -42,9 +42,6 @@
{%- assign ogImage = meta.meta_data.opengraph_default -%}
{%- case schema -%}
{%- when 'blog' -%}
{%- assign ogBlogSlug = title | slugifyString -%}
{%- assign ogImage = meta.url | append: '/assets/img/ogi/' | append: ogBlogSlug | append: '-preview.png' -%}
{%- when 'music' -%}
{%- assign ogImage = music.recent.artists[0].image -%}
{%- when 'music-index' -%}

View file

@ -10,7 +10,7 @@
"id": "{{ entry.url | btoa }}",
"title": "{{ entry.title | replaceQuotes }}",
"url": "{{ entry.url }}",
"content_text": "{{ entry.title | replaceQuotes }}{% if entry.url | tagLookup: tagMap %} {{ entry.url | tagLookup: tagMap }} {{ entry.url }}{% else %} {{ entry.url }}{% endif %}",
"content_text": "{{ entry.title | replaceQuotes }} {{ entry.url }}",
"date_published": "{{ entry.date | stringToRFC822Date }}"
}{% if not forloop.last %},{% endif %}
{%- endfor %}

View file

@ -15,17 +15,16 @@
<height>144</height>
</image>
{% for entry in entries limit: 20 -%}
{% assign author = entry.url | stripUtm | authorLookup %}
{% assign rating = entry.rating %}
<item>
<title>
{{ entry.title | escape }}
{% if author %} via {{ author }}{% endif %}
{% if entry.authors %} via {{ entry.authors.name }}{% endif %}
{% if rating %} ({{ rating }}){% endif %}
</title>
<link>{{ entry.url | stripUtm | encodeAmp }}</link>
<link>{{ entry.url | encodeAmp }}</link>
<pubDate>{{ entry.date | stringToRFC822Date }}</pubDate>
<guid>{{ entry.url | stripUtm | encodeAmp }}</guid>
<guid>{{ entry.url | encodeAmp }}</guid>
<description>{{ entry.excerpt | escape }}</description>
</item>
{%- endfor %}

View file

@ -1,7 +1,7 @@
{%- assign posts = postData | filterByPostType: postType %}
<div class="article-widget-wrapper">
<div class="section-header-wrapper">
<h2 id="artists" class="section-header posts flex-centered">
<h2 class="section-header posts flex-centered">
{% tablericon icon title %}
{{ title }}
</h2>
@ -15,11 +15,11 @@
</time>
</div>
<a href="{{ post.url }}">
<h2 class="flex-centered">{{ post.data.title }}</h2>
<h2 class="flex-centered">{{ post.title }}</h2>
</a>
<span class="p-author h-card hidden">{{ meta.siteName }}</span>
<div class="p-summary hidden">{{ post.data.post_excerpt }}</div>
{{ post.data.post_excerpt | markdown | truncateByWordCount: 25 }}
{{ post.description | truncate: 300 }}
</article>
{% endfor %}
{% if postType != 'featured' %}

View file

@ -3,7 +3,7 @@
<div class="media-grid {% if shape == 'square' %}square{% else %}vertical{% endif %}">
{% for item in media limit: count | default: media.size %}
{% assign alt = item.alt | strip | escape %}
<a href="{{ item.url | stripUtm }}" title="{{ alt }}">
<a href="{{ item.url }}" title="{{ alt }}">
<div class="item-wrapper shadow">
<div class="meta-text">
{% if item.title %}

View file

@ -1,14 +1,14 @@
{% assign posts = posts | getPopularPosts: analytics %}
{% if posts.size > 0 %}
{% assign postData = posts | getPopularPosts: analytics %}
{% if postData.size > 0 %}
<h2 class="link-list-header flex-centered">
{% tablericon "flame" "Popular" %}
Popular posts
</h2>
<ul class="link-list">
{% for post in posts limit: 5 %}
{% for post in postData limit: 5 %}
<li>
<a class="no-underline" href="{{post.url}}" title="{{ post.data.title | escape}}">
{{ post.data.title }}
<a class="no-underline" href="{{ post.url }}" title="{{ post.title | escape}}">
{{ post.title }}
</a>
</li>
{% endfor %}

View file

@ -5,12 +5,11 @@
</h2>
<ul class="link-list">
{% for link in links limit: 5 %}
{% assign author = link.data.link | stripUtm | authorLookup %}
<li>
<a href="{{ link.data.link }}" title="{{ link.data.title | escape }}">
{{ link.data.title }}
<a href="{{ link.link }}" title="{{ link.title | escape }}">
{{ link.title }}
</a>
{% if author %} via {{ author }}{% endif %}
{% if link.authors %} via <a href="{{ link.authors.url }}">{{ link.authors.name }}</a>{% endif %}
</li>
{% endfor %}
</ul>

View file

@ -1,4 +0,0 @@
<script type="module" src="/assets/scripts/components/webcare-webshare.js"></script>
<webcare-webshare share-text="{{ title }} {{ url | tagLookup: tagMap }}" share-url="{{ url }}" copy-text="{{ title }} {{ url | tagLookup: tagMap }} {{ url }}">
<button class="share icon-small icon-center-vertical" disabled>{% tablericon "share" "Share" %}</button>
</webcare-webshare>

View file

@ -1,6 +0,0 @@
{% assign filteredTags = tags | filterTags %}
<div{% if hasSpace %} style="margin-bottom:var(--sizing-md)"{% endif %}>
{% for tag in filteredTags limit: 10 %}
<a class="tag-element" href="/tags/{{ tag | downcase }}">{{ tag | formatTag }}</a>
{% endfor %}
</div>

View file

@ -7,21 +7,19 @@ schema: blog
<div class="default-wrapper">
<article class="h-entry">
<div class="flex-centered gap-xs icon-small icon-light">
{% render "partials/widgets/share-button.liquid", url:postUrl, title:title, tagMap:collections.tagMap %}
{% tablericon "calendar-month" "Date" %}
<time class="dt-published" datetime="{{ date }}">
{{ date | date: "%B %e, %Y" }}
</time>
</div>
<h2 class="p-name">{{ title }}</h2>
<div class="text-small">{% render "partials/widgets/tags.liquid", tags:tags %}</div>
<span class="p-author h-card hidden">{{ meta.author }}</span>
<div class="p-summary hidden">{{ post_excerpt }}</div>
<div class="p-summary hidden">{{ post.description }}</div>
<div class="e-content">
{% render "partials/banners/old-post.liquid", date:date %}
{% render "partials/banners/old-post.liquid", date:post.date %}
{{ content }}
</div>
</article>
</div>
{% render "partials/widgets/mastodon-post.liquid", postUrl:postUrl, linkPosts:linkPosts %}
{% render "partials/widgets/addon-links.liquid", posts:collections.posts, analytics:analytics, links:collections.links %}
{% render "partials/widgets/addon-links.liquid", posts:posts, analytics:analytics, links:collections.links %}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Some files were not shown because too many files have changed in this diff Show more