commit 2a0ac1b1072ca8cb723817f06680a8c0a8c7ea92
Author: AltStack Bot
Date: Wed Feb 25 22:36:27 2026 +0530
Initialize public data and docs repository
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2226e7a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,35 @@
+# Dependencies
+node_modules
+/.pnp
+.pnp.js
+
+# Testing
+/coverage
+
+# Next.js
+.next/
+out/
+
+# Production
+/build
+
+# Misc
+.DS_Store
+*.pem
+*.key
+
+# Debug
+npm-debug.log*
+yarn-debug.log*
+pnpm-debug.log*
+
+# Local env files
+.env*.local
+*.bak
+*_keys.json
+cors.json
+
+# Vercel build artifacts
+.vercel/output/
+.vercel/builders/
+build.log
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..af45c66
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,36 @@
+# Contributing to The Alt Stack Data & Docs 🥞
+
+Thank you for your interest in contributing! Our community helps maintain the accuracy and quality of our open source data and documentation.
+
+## Types of Contributions
+
+1. **New Tools:** Adding an alternative to our dataset.
+2. **Data Fixes:** Correcting URLs, pricing, descriptions, or pros/cons.
+3. **Deployment Guides:** Writing or updating guides in `docs/app/deploy/`.
+
+## 1. Modifying Data (`/data/`)
+
+Our core data is stored in `data/tools.json`.
+
+1. Find the parent SaaS tool (e.g., "Slack") in the JSON structure.
+2. Add or modify the alternative under the `alternatives` array.
+3. Ensure you follow the structure defined in `data/schema/types.ts`.
+4. Run validation (if applicable locally) before committing.
+
+## 2. Modifying Documentation (`/docs/`)
+
+Our documentation is built with Next.js and Nextra. All pages are under `docs/app/`.
+
+1. Navigate to the appropriate folder (e.g., `docs/app/deploy` for guides).
+2. Create or edit the `.mdx` file.
+3. If creating a new page, make sure to add it to the adjacent `_meta.ts` file so it appears in the sidebar!
+
+## Pull Request Process
+
+1. Fork the repository and create your feature branch: `git checkout -b fix/name-of-tool-data`
+2. Make your targeted changes. **Keep PRs small** (e.g., fix one tool, don't change 50 things at once).
+3. Commit your changes with a descriptive message: `fix(data): update RocketChat pricing link`
+4. Open a Pull Request against our `main` branch.
+5. A maintainer will review your PR. We may request changes or ask clarifying questions.
+
+By contributing to this repository, you agree that your data and documentation contributions will be licensed under the CC BY 4.0 license.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..488a4b2
--- /dev/null
+++ b/README.md
@@ -0,0 +1,39 @@
+# The Alt Stack Data & Docs 🥞
+
+Welcome to the public repository for **The Alt Stack** context and data!
+
+This repository contains:
+1. **The Alt Stack Dataset** (`/data`): Our curated JSON data of open source alternatives to popular SaaS products.
+2. **The Documentation Site** (`/docs`): The source code for [docs.thealtstack.com](https://docs.thealtstack.com) containing 60+ deployment guides, concepts, and more.
+
+If you are looking for the main application (the UI, comparison engine, etc.), please note that the core application is closed-source. We open-source the data and documentation so the community can contribute to keeping the alternative software ecosystem accurate and well-documented.
+
+## 🤝 Contributing
+
+We welcome community contributions! This is the fastest way to get a new tool added or a deployment guide updated.
+
+Before submitting a pull request, please read our [Contributing Guidelines](CONTRIBUTING.md).
+
+### What you can contribute:
+- **New Tools:** Submit an addition to `data/tools.json`.
+- **Data Corrections:** Fix broken links, update pricing, or correct pros/cons.
+- **Deployment Guides:** Write or update a self-hosting guide in `docs/app/deploy/`.
+- **Typo Fixes:** Help us keep the documentation clean.
+
+### What NOT to contribute here:
+- Feature requests for the main application UI.
+- Bug reports for the closed-source platform (use the contact form on the main site).
+
+## 🗄️ Working with the Data
+
+Our core dataset lives in `data/tools.json` and follows the TypeScript schema defined in `data/schema/types.ts`.
+
+If you're building a project that references our data, you are welcome to consume it directly from this repository!
+
+## 📄 License
+
+This repository uses a dual-license model:
+* **Documentation & Data** (`/docs/**/*.mdx`, `/data/**/*.json`): Creative Commons Attribution 4.0 International ([CC BY 4.0](https://creativecommons.org/licenses/by/4.0/))
+* **Scripts & Code** (`/scripts`, `.ts`/`.js` files): [Apache License 2.0](LICENSE)
+
+*All tool logos in `assets/logos/` are the property of their respective trademark holders and are used for identification purposes only.*
diff --git a/assets/logos/1password.svg b/assets/logos/1password.svg
new file mode 100644
index 0000000..4057040
--- /dev/null
+++ b/assets/logos/1password.svg
@@ -0,0 +1 @@
+1Password
\ No newline at end of file
diff --git a/assets/logos/appwrite.svg b/assets/logos/appwrite.svg
new file mode 100644
index 0000000..f223307
--- /dev/null
+++ b/assets/logos/appwrite.svg
@@ -0,0 +1 @@
+Appwrite
\ No newline at end of file
diff --git a/assets/logos/auth0.svg b/assets/logos/auth0.svg
new file mode 100644
index 0000000..85ac633
--- /dev/null
+++ b/assets/logos/auth0.svg
@@ -0,0 +1 @@
+Auth0
\ No newline at end of file
diff --git a/assets/logos/autocad.svg b/assets/logos/autocad.svg
new file mode 100644
index 0000000..d1c4e2a
--- /dev/null
+++ b/assets/logos/autocad.svg
@@ -0,0 +1 @@
+Autodesk
\ No newline at end of file
diff --git a/assets/logos/bitwarden.svg b/assets/logos/bitwarden.svg
new file mode 100644
index 0000000..65b8513
--- /dev/null
+++ b/assets/logos/bitwarden.svg
@@ -0,0 +1 @@
+Bitwarden
\ No newline at end of file
diff --git a/assets/logos/calcom.svg b/assets/logos/calcom.svg
new file mode 100644
index 0000000..a24d70a
--- /dev/null
+++ b/assets/logos/calcom.svg
@@ -0,0 +1 @@
+Cal.com
\ No newline at end of file
diff --git a/assets/logos/calendly.svg b/assets/logos/calendly.svg
new file mode 100644
index 0000000..a7d771e
--- /dev/null
+++ b/assets/logos/calendly.svg
@@ -0,0 +1 @@
+Calendly
\ No newline at end of file
diff --git a/assets/logos/codespaces.svg b/assets/logos/codespaces.svg
new file mode 100644
index 0000000..775adcc
--- /dev/null
+++ b/assets/logos/codespaces.svg
@@ -0,0 +1 @@
+GitHub
\ No newline at end of file
diff --git a/assets/logos/confluence.svg b/assets/logos/confluence.svg
new file mode 100644
index 0000000..2daccf2
--- /dev/null
+++ b/assets/logos/confluence.svg
@@ -0,0 +1 @@
+Confluence
\ No newline at end of file
diff --git a/assets/logos/dashlane.svg b/assets/logos/dashlane.svg
new file mode 100644
index 0000000..16e0fbb
--- /dev/null
+++ b/assets/logos/dashlane.svg
@@ -0,0 +1 @@
+Dashlane
\ No newline at end of file
diff --git a/assets/logos/datadog.svg b/assets/logos/datadog.svg
new file mode 100644
index 0000000..245bfb5
--- /dev/null
+++ b/assets/logos/datadog.svg
@@ -0,0 +1 @@
+Datadog
\ No newline at end of file
diff --git a/assets/logos/falcon.svg b/assets/logos/falcon.svg
new file mode 100644
index 0000000..17066fd
--- /dev/null
+++ b/assets/logos/falcon.svg
@@ -0,0 +1 @@
+Technology Innovation Institute
diff --git a/assets/logos/figma.svg b/assets/logos/figma.svg
new file mode 100644
index 0000000..8b2e765
--- /dev/null
+++ b/assets/logos/figma.svg
@@ -0,0 +1 @@
+Figma
\ No newline at end of file
diff --git a/assets/logos/firebase.svg b/assets/logos/firebase.svg
new file mode 100644
index 0000000..f77fafc
--- /dev/null
+++ b/assets/logos/firebase.svg
@@ -0,0 +1 @@
+Firebase
\ No newline at end of file
diff --git a/assets/logos/flux.svg b/assets/logos/flux.svg
new file mode 100644
index 0000000..035d74a
--- /dev/null
+++ b/assets/logos/flux.svg
@@ -0,0 +1 @@
+Flux
diff --git a/assets/logos/freecad.svg b/assets/logos/freecad.svg
new file mode 100644
index 0000000..936fe68
--- /dev/null
+++ b/assets/logos/freecad.svg
@@ -0,0 +1 @@
+FreeCAD
\ No newline at end of file
diff --git a/assets/logos/gemma.svg b/assets/logos/gemma.svg
new file mode 100644
index 0000000..802a934
--- /dev/null
+++ b/assets/logos/gemma.svg
@@ -0,0 +1 @@
+Gemma
diff --git a/assets/logos/gimp.svg b/assets/logos/gimp.svg
new file mode 100644
index 0000000..db0b2e6
--- /dev/null
+++ b/assets/logos/gimp.svg
@@ -0,0 +1 @@
+GIMP
\ No newline at end of file
diff --git a/assets/logos/github-copilot.svg b/assets/logos/github-copilot.svg
new file mode 100644
index 0000000..8716d6f
--- /dev/null
+++ b/assets/logos/github-copilot.svg
@@ -0,0 +1 @@
+GitHub Copilot
\ No newline at end of file
diff --git a/assets/logos/google-analytics.svg b/assets/logos/google-analytics.svg
new file mode 100644
index 0000000..d5a9dcd
--- /dev/null
+++ b/assets/logos/google-analytics.svg
@@ -0,0 +1 @@
+Google Analytics
\ No newline at end of file
diff --git a/assets/logos/grok.svg b/assets/logos/grok.svg
new file mode 100644
index 0000000..ac7c94c
--- /dev/null
+++ b/assets/logos/grok.svg
@@ -0,0 +1 @@
+Grok
diff --git a/assets/logos/hootsuite.svg b/assets/logos/hootsuite.svg
new file mode 100644
index 0000000..098fb8d
--- /dev/null
+++ b/assets/logos/hootsuite.svg
@@ -0,0 +1 @@
+Hootsuite
\ No newline at end of file
diff --git a/assets/logos/intercom.svg b/assets/logos/intercom.svg
new file mode 100644
index 0000000..0a5b74c
--- /dev/null
+++ b/assets/logos/intercom.svg
@@ -0,0 +1 @@
+Intercom
\ No newline at end of file
diff --git a/assets/logos/jira.svg b/assets/logos/jira.svg
new file mode 100644
index 0000000..961caaf
--- /dev/null
+++ b/assets/logos/jira.svg
@@ -0,0 +1 @@
+Jira
\ No newline at end of file
diff --git a/assets/logos/jitsi-meet.svg b/assets/logos/jitsi-meet.svg
new file mode 100644
index 0000000..673bbbd
--- /dev/null
+++ b/assets/logos/jitsi-meet.svg
@@ -0,0 +1 @@
+Jitsi
\ No newline at end of file
diff --git a/assets/logos/kdenlive.svg b/assets/logos/kdenlive.svg
new file mode 100644
index 0000000..0392016
--- /dev/null
+++ b/assets/logos/kdenlive.svg
@@ -0,0 +1 @@
+Kdenlive
\ No newline at end of file
diff --git a/assets/logos/keepassxc.svg b/assets/logos/keepassxc.svg
new file mode 100644
index 0000000..f679938
--- /dev/null
+++ b/assets/logos/keepassxc.svg
@@ -0,0 +1 @@
+KeePassXC
\ No newline at end of file
diff --git a/assets/logos/krita.svg b/assets/logos/krita.svg
new file mode 100644
index 0000000..ca5590d
--- /dev/null
+++ b/assets/logos/krita.svg
@@ -0,0 +1 @@
+Krita
\ No newline at end of file
diff --git a/assets/logos/mailchimp.svg b/assets/logos/mailchimp.svg
new file mode 100644
index 0000000..0bfa8f5
--- /dev/null
+++ b/assets/logos/mailchimp.svg
@@ -0,0 +1 @@
+MailChimp
\ No newline at end of file
diff --git a/assets/logos/mailgun.svg b/assets/logos/mailgun.svg
new file mode 100644
index 0000000..7253b76
--- /dev/null
+++ b/assets/logos/mailgun.svg
@@ -0,0 +1 @@
+Mailgun
\ No newline at end of file
diff --git a/assets/logos/matomo.svg b/assets/logos/matomo.svg
new file mode 100644
index 0000000..bcff636
--- /dev/null
+++ b/assets/logos/matomo.svg
@@ -0,0 +1 @@
+Matomo
\ No newline at end of file
diff --git a/assets/logos/mattermost.svg b/assets/logos/mattermost.svg
new file mode 100644
index 0000000..0e5fc0b
--- /dev/null
+++ b/assets/logos/mattermost.svg
@@ -0,0 +1 @@
+Mattermost
\ No newline at end of file
diff --git a/assets/logos/meta.svg b/assets/logos/meta.svg
new file mode 100644
index 0000000..3ec38d1
--- /dev/null
+++ b/assets/logos/meta.svg
@@ -0,0 +1 @@
+Meta
diff --git a/assets/logos/minio.svg b/assets/logos/minio.svg
new file mode 100644
index 0000000..0a4e35d
--- /dev/null
+++ b/assets/logos/minio.svg
@@ -0,0 +1 @@
+MinIO
\ No newline at end of file
diff --git a/assets/logos/mistral.svg b/assets/logos/mistral.svg
new file mode 100644
index 0000000..7c5e604
--- /dev/null
+++ b/assets/logos/mistral.svg
@@ -0,0 +1 @@
+Mistral
diff --git a/assets/logos/n8n.svg b/assets/logos/n8n.svg
new file mode 100644
index 0000000..c15b9c7
--- /dev/null
+++ b/assets/logos/n8n.svg
@@ -0,0 +1 @@
+n8n
\ No newline at end of file
diff --git a/assets/logos/notion.svg b/assets/logos/notion.svg
new file mode 100644
index 0000000..86ee23f
--- /dev/null
+++ b/assets/logos/notion.svg
@@ -0,0 +1 @@
+Notion
\ No newline at end of file
diff --git a/assets/logos/odoo.svg b/assets/logos/odoo.svg
new file mode 100644
index 0000000..8813c31
--- /dev/null
+++ b/assets/logos/odoo.svg
@@ -0,0 +1 @@
+Odoo
\ No newline at end of file
diff --git a/assets/logos/okta.svg b/assets/logos/okta.svg
new file mode 100644
index 0000000..455851d
--- /dev/null
+++ b/assets/logos/okta.svg
@@ -0,0 +1 @@
+Okta
\ No newline at end of file
diff --git a/assets/logos/ollama.svg b/assets/logos/ollama.svg
new file mode 100644
index 0000000..ee042d0
--- /dev/null
+++ b/assets/logos/ollama.svg
@@ -0,0 +1 @@
+Ollama
\ No newline at end of file
diff --git a/assets/logos/onlyoffice.svg b/assets/logos/onlyoffice.svg
new file mode 100644
index 0000000..4a609be
--- /dev/null
+++ b/assets/logos/onlyoffice.svg
@@ -0,0 +1 @@
+ONLYOFFICE
\ No newline at end of file
diff --git a/assets/logos/penpot.svg b/assets/logos/penpot.svg
new file mode 100644
index 0000000..0ac8fba
--- /dev/null
+++ b/assets/logos/penpot.svg
@@ -0,0 +1 @@
+Penpot
\ No newline at end of file
diff --git a/assets/logos/phi.svg b/assets/logos/phi.svg
new file mode 100644
index 0000000..584d7b5
--- /dev/null
+++ b/assets/logos/phi.svg
@@ -0,0 +1 @@
+Azure
diff --git a/assets/logos/plane.svg b/assets/logos/plane.svg
new file mode 100644
index 0000000..b8b0100
--- /dev/null
+++ b/assets/logos/plane.svg
@@ -0,0 +1 @@
+Plane
\ No newline at end of file
diff --git a/assets/logos/plausible.svg b/assets/logos/plausible.svg
new file mode 100644
index 0000000..b70c5ef
--- /dev/null
+++ b/assets/logos/plausible.svg
@@ -0,0 +1 @@
+Plausible Analytics
\ No newline at end of file
diff --git a/assets/logos/pocketbase.svg b/assets/logos/pocketbase.svg
new file mode 100644
index 0000000..005f76d
--- /dev/null
+++ b/assets/logos/pocketbase.svg
@@ -0,0 +1 @@
+PocketBase
\ No newline at end of file
diff --git a/assets/logos/posthog.svg b/assets/logos/posthog.svg
new file mode 100644
index 0000000..445259b
--- /dev/null
+++ b/assets/logos/posthog.svg
@@ -0,0 +1 @@
+PostHog
\ No newline at end of file
diff --git a/assets/logos/quickbooks.svg b/assets/logos/quickbooks.svg
new file mode 100644
index 0000000..39f1009
--- /dev/null
+++ b/assets/logos/quickbooks.svg
@@ -0,0 +1 @@
+QuickBooks
\ No newline at end of file
diff --git a/assets/logos/qwen.svg b/assets/logos/qwen.svg
new file mode 100644
index 0000000..24d11f3
--- /dev/null
+++ b/assets/logos/qwen.svg
@@ -0,0 +1 @@
+Qwen
diff --git a/assets/logos/rocket-chat.svg b/assets/logos/rocket-chat.svg
new file mode 100644
index 0000000..334988c
--- /dev/null
+++ b/assets/logos/rocket-chat.svg
@@ -0,0 +1 @@
+Rocket.Chat
\ No newline at end of file
diff --git a/assets/logos/sap.svg b/assets/logos/sap.svg
new file mode 100644
index 0000000..b8c4f22
--- /dev/null
+++ b/assets/logos/sap.svg
@@ -0,0 +1 @@
+SAP
\ No newline at end of file
diff --git a/assets/logos/sentry.svg b/assets/logos/sentry.svg
new file mode 100644
index 0000000..0b47057
--- /dev/null
+++ b/assets/logos/sentry.svg
@@ -0,0 +1 @@
+Sentry
\ No newline at end of file
diff --git a/assets/logos/shopify.svg b/assets/logos/shopify.svg
new file mode 100644
index 0000000..3db6034
--- /dev/null
+++ b/assets/logos/shopify.svg
@@ -0,0 +1 @@
+Shopify
\ No newline at end of file
diff --git a/assets/logos/stability.svg b/assets/logos/stability.svg
new file mode 100644
index 0000000..6276dc7
--- /dev/null
+++ b/assets/logos/stability.svg
@@ -0,0 +1 @@
+Stability
diff --git a/assets/logos/supabase.svg b/assets/logos/supabase.svg
new file mode 100644
index 0000000..b9760e7
--- /dev/null
+++ b/assets/logos/supabase.svg
@@ -0,0 +1 @@
+Supabase
\ No newline at end of file
diff --git a/assets/logos/typeform.svg b/assets/logos/typeform.svg
new file mode 100644
index 0000000..23ba814
--- /dev/null
+++ b/assets/logos/typeform.svg
@@ -0,0 +1 @@
+Typeform
\ No newline at end of file
diff --git a/assets/logos/zapier.svg b/assets/logos/zapier.svg
new file mode 100644
index 0000000..2a2c899
--- /dev/null
+++ b/assets/logos/zapier.svg
@@ -0,0 +1 @@
+Zapier
\ No newline at end of file
diff --git a/assets/logos/zendesk.svg b/assets/logos/zendesk.svg
new file mode 100644
index 0000000..0849b82
--- /dev/null
+++ b/assets/logos/zendesk.svg
@@ -0,0 +1 @@
+Zendesk
\ No newline at end of file
diff --git a/assets/logos/zoom.svg b/assets/logos/zoom.svg
new file mode 100644
index 0000000..238e6ce
--- /dev/null
+++ b/assets/logos/zoom.svg
@@ -0,0 +1 @@
+Zoom
\ No newline at end of file
diff --git a/data/category_editorial.json b/data/category_editorial.json
new file mode 100644
index 0000000..a55f728
--- /dev/null
+++ b/data/category_editorial.json
@@ -0,0 +1,226 @@
+{
+ "Communication": {
+ "industry": [
+ "Team communication is the backbone of every modern organization. Whether your team is remote, hybrid, or co-located, the tools you choose to communicate shape how quickly decisions get made, how aligned people stay, and ultimately, how fast you can ship. A poorly configured messaging setup leads to scattered conversations, missed updates, and the kind of context-switching that quietly erodes productivity over weeks and months.",
+ "The dominant players in this space have built their products around a simple bet: once a team adopts a communication platform, switching costs are high enough to justify annual price increases. And for the most part, that bet has paid off — Slack's per-seat pricing has become one of the most debated line items in SaaS budgets, especially at companies north of 50 employees. Microsoft Teams bundles itself into the Microsoft 365 suite, making the true cost harder to isolate but no less real.",
+ "What's changed in recent years is the maturity of open-source alternatives. Self-hosted messaging platforms now offer threaded conversations, video calling, file sharing, integrations with CI/CD pipelines, and the kind of compliance certifications (SOC2, HIPAA) that used to be exclusive to enterprise vendors. The real unlock isn't just cost savings — it's the ability to own your communication infrastructure the same way you own your code."
+ ],
+ "oss_benefits_title": "Why Open-Source Communication Tools Make Sense",
+ "oss_benefits": [
+ "**Full data sovereignty** — every message, file, and call recording stays on your infrastructure. Essential for regulated industries, government contracts, and teams handling sensitive IP.",
+ "**No per-seat pricing** — most open-source communication platforms charge nothing regardless of team size. Even those with enterprise tiers charge a fraction of proprietary alternatives.",
+ "**Deep customization** — white-label the interface, build custom integrations, modify notification behavior, or integrate directly with your internal tools via open APIs.",
+ "**Community-driven roadmaps** — feature development is driven by actual users, not product managers optimizing for upsell opportunities."
+ ]
+ },
+ "AI Models": {
+ "industry": [
+ "Large language models have become foundational infrastructure for a growing number of companies — powering everything from customer support automation and code generation to legal document analysis and creative workflows. The pace of improvement has been relentless: models that would have been considered state-of-the-art six months ago are routinely surpassed by newer releases that are both more capable and more efficient to run.",
+ "For most of 2023 and 2024, the landscape was dominated by proprietary APIs — OpenAI's GPT series and Anthropic's Claude set the benchmarks, and building on top of them was the path of least resistance. But the economics of API-based inference don't scale well. At production volumes, per-token costs can easily reach five or six figures monthly. And once you factor in data privacy requirements, latency constraints, and the operational risk of depending on a single vendor, the calculus shifts significantly toward self-hosted alternatives.",
+ "The open-weight movement has delivered models that genuinely compete with proprietary offerings across most practical benchmarks. Meta's Llama series, DeepSeek's reasoning models, Mistral's efficient architectures, and Qwen's multilingual capabilities have collectively proven that you don't need to pay per token to get frontier-level performance. The remaining gap, which narrows with each release cycle, is increasingly a matter of fine-tuning and deployment infrastructure rather than raw model quality."
+ ],
+ "oss_benefits_title": "The Case for Open-Weight Models",
+ "oss_benefits": [
+ "**Zero marginal cost at scale** — once you've invested in inference hardware, every additional query is essentially free. This transforms the economics of AI-powered features from variable cost to fixed cost.",
+ "**Complete data privacy** — your prompts, context, and outputs never leave your network. Critical for healthcare, finance, legal, and any business handling PII.",
+ "**Full customization** — fine-tune on your domain data, adjust system prompts without restrictions, modify tokenizers, or quantize for your specific hardware profile.",
+ "**No vendor lock-in** — swap models as better ones emerge without rewriting integration code. Most open-weight models converge on compatible APIs and inference formats."
+ ]
+ },
+ "AI Runners": {
+ "industry": [
+ "Running AI models locally has gone from a hobbyist curiosity to a legitimate infrastructure choice. The shift was driven by three converging forces: rapidly improving open-weight models, dramatic reductions in quantization quality loss, and the rising cost consciousness around API-based inference. What was once a weekend experiment — getting a model to respond on your laptop — is now a production deployment pattern used by companies that need predictable costs and absolute data privacy.",
+ "The tooling around local inference has matured to match. Modern AI runners handle model downloading, quantization, context management, GPU memory allocation, and API serving with minimal configuration. Many offer OpenAI-compatible endpoints, which means existing application code that was built against commercial APIs can switch to self-hosted inference with a single URL change.",
+ "The hardware requirements have also dropped significantly. GGUF quantization, mixed-precision inference, and speculative decoding techniques mean that capable 7B-13B models run comfortably on consumer GPUs, and even larger 70B models are practical on workstation-grade hardware. For teams that don't need frontier-scale reasoning on every query, local inference is now the more economical choice by a wide margin."
+ ],
+ "oss_benefits_title": "Why Self-Hosted Inference Matters",
+ "oss_benefits": [
+ "**Predictable, fixed costs** — no per-token billing, no usage spikes, no surprise invoices. Your inference cost is your hardware amortization, period.",
+ "**Complete privacy and compliance** — prompts and outputs never leave your premises. Non-negotiable for legal, medical, and classified workloads.",
+ "**Latency control** — local inference eliminates network round-trips. Sub-100ms time-to-first-token is achievable for many model sizes.",
+ "**Model flexibility** — swap, fine-tune, or quantize models to match your exact performance-cost trade-off without waiting for a vendor to add support."
+ ]
+ },
+ "Monitoring": {
+ "industry": [
+ "Monitoring is one of those infrastructure investments that only gets appreciated after something goes wrong. Every minute of undetected downtime translates directly to lost revenue, eroded user trust, and cascading failures that are exponentially harder to debug the longer they persist. For modern web applications with distributed architectures, monitoring isn't optional — it's the difference between catching a degradation at 2% error rate versus discovering it at 20% when customers start complaining on social media.",
+ "The commercial monitoring landscape has consolidated around a few major players — Datadog, New Relic, and Splunk — that offer comprehensive platforms with enterprise features. But their pricing models, which typically scale with data ingestion volume, create a perverse incentive: the more you instrument your application (which you should), the more you pay. Teams routinely find themselves reducing log verbosity or sampling metrics just to stay within budget, which defeats the purpose of monitoring in the first place.",
+ "Open-source monitoring has matured to the point where self-hosted stacks built on Prometheus, Grafana, and OpenTelemetry can match commercial platforms in capability. The trade-off is operational overhead — you're responsible for keeping the monitoring infrastructure itself running. But for teams with the DevOps muscle to maintain it, the cost savings at scale are substantial, and the absence of data ingestion limits means you can instrument without compromise."
+ ],
+ "oss_benefits_title": "Why Open-Source Monitoring Wins at Scale",
+ "oss_benefits": [
+ "**No data ingestion limits** — instrument everything without worrying about per-GB pricing that punishes thoroughness.",
+ "**Full stack visibility** — combine metrics, logs, traces, and alerting in a single self-hosted stack with complete control over retention policies.",
+ "**Community-maintained integrations** — Prometheus exporters exist for virtually every database, message queue, web server, and cloud service you're running.",
+ "**Customizable alerting** — define alert rules, escalation policies, and notification channels that match your operational workflow exactly."
+ ]
+ },
+ "Analytics": {
+ "industry": [
+ "Understanding how users interact with your product is fundamental to making good decisions — about which features to build, where friction exists, and what's actually driving growth. Analytics tooling has become ubiquitous, but the way most teams implement it creates a quiet tension between insight and privacy. Every pageview, click, and scroll event sent to a third-party analytics service is data about your users that lives on someone else's infrastructure.",
+ "Google Analytics dominated this space for over a decade by being free and comprehensive. But the shift to GA4, growing regulatory pressure from GDPR and CCPA, and increasing user awareness of tracking have created an opening for alternatives that respect privacy by default. The question has moved from 'Should we track user behavior?' to 'Can we understand our users without compromising their privacy?'",
+ "The answer, increasingly, is yes. Privacy-first analytics platforms — both commercial and open-source — have proven that you can get actionable insights from aggregate data without building individual user profiles, dropping cookies, or sending behavioral data to third-party ad networks. For many teams, the switch isn't just about compliance; it's about building trust with users who are increasingly aware of how their data is being used."
+ ],
+ "oss_benefits_title": "Why Privacy-First Analytics is the Future",
+ "oss_benefits": [
+ "**GDPR compliant without banners** — no cookies means no consent dialogs interrupting your users' experience.",
+ "**Lightweight by design** — most open-source analytics scripts are under 5KB, versus 40-70KB for Google Analytics, directly improving page load times.",
+ "**Complete data ownership** — your analytics data stays on your servers. No data mining, no profile building, no third-party data sharing.",
+ "**Transparent methodology** — open-source means you can audit exactly how metrics are calculated and ensure the numbers are trustworthy."
+ ]
+ },
+ "Backend as a Service": {
+ "industry": [
+ "Building a backend from scratch — authentication, database, file storage, realtime subscriptions, serverless functions — is weeks or months of work before you ship a single user-facing feature. Backend-as-a-service platforms compress that timeline by providing these building blocks as managed services with SDKs for every major frontend framework. Firebase showed the industry what's possible; Supabase proved it could be done with open-source technology.",
+ "The trade-off with managed BaaS has always been control. Firebase's real-time database is fast to get started with but notoriously difficult to migrate away from. Pricing structures that charge per read/write operation create anxiety at scale. And for applications that need to comply with data residency requirements, the inability to choose where your data lives is a non-starter.",
+ "Self-hosted BaaS platforms have reached the point where the developer experience is genuinely comparable to managed alternatives. PostgreSQL-backed platforms like Supabase offer SQL access, row-level security, and realtime subscriptions. Appwrite and PocketBase provide complete backend stacks that deploy with a single Docker command. The infrastructure complexity that once made self-hosting impractical has been abstracted away by mature tooling."
+ ],
+ "oss_benefits_title": "Why Self-Hosted Backends Make Sense",
+ "oss_benefits": [
+ "**Zero vendor lock-in** — your data lives in standard PostgreSQL or SQLite databases. Migration is a pg_dump away.",
+ "**Predictable costs** — no per-operation billing. Your costs scale with your infrastructure, not your traffic patterns.",
+ "**Data residency control** — deploy wherever compliance requires, from EU data centers to air-gapped environments.",
+ "**Full stack access** — extend functionality at the database level, not just through vendor-defined SDKs and rules."
+ ]
+ },
+ "Project Management": {
+ "industry": [
+ "Project management tools are the operating system for how teams plan, prioritize, and track work. Get it right and you have visibility into what's happening across the organization, clear ownership of tasks, and a shared source of truth for deadlines. Get it wrong — or outgrow your tooling — and you end up with scattered context across Slack threads, Google Docs, and someone's mental model of what 'in progress' means.",
+ "Jira has been the default choice in enterprise settings for years, but its complexity has become its own kind of cost. Teams routinely spend more time configuring workflows, maintaining custom fields, and navigating a UI that feels designed for administrators rather than the people doing the actual work. Linear's success proved there's massive demand for tools that are fast, opinionated, and pleasant to use — but Linear's pricing and closed-source nature aren't for everyone.",
+ "Open-source project management tools have learned from both extremes. The current generation offers clean, modern interfaces inspired by Linear's speed and simplicity, while providing the flexibility to customize workflows that Jira users expect. Self-hosting means your planning data — which often contains sensitive roadmap and strategy information — stays within your infrastructure."
+ ],
+ "oss_benefits_title": "Why Open-Source Project Management",
+ "oss_benefits": [
+ "**No per-seat tax on growth** — add team members without budget conversations with finance.",
+ "**Sensitive roadmap data stays internal** — product strategy, timelines, and resource allocation don't leave your network.",
+ "**Customizable workflows** — modify issue types, states, and automation rules at the code level, not through limited configuration UIs.",
+ "**Integration freedom** — connect with your CI/CD, Slack, Git, and custom internal tools through open APIs."
+ ]
+ },
+ "Security": {
+ "industry": [
+ "Security infrastructure — password management, identity providers, authentication services, and encryption tooling — sits at the foundation of every application. A breach in any of these layers doesn't just affect one feature; it compromises the entire trust relationship with your users. The stakes are high enough that many teams default to commercial security products, reasoning that the cost of a vendor is trivial compared to the cost of a security incident.",
+ "But trusting a security vendor also means trusting their infrastructure, their access controls, their employee vetting, and their incident response. After high-profile breaches at LastPass and Okta, more teams are asking whether the convenience of managed security services justifies the concentration of risk. When your password vault or identity provider is a single vendor's cloud service, a compromise at that vendor becomes your compromise too.",
+ "Open-source security tools offer an alternative model: trust through transparency. When the source code is public, security researchers worldwide can audit it. When the data stays on your infrastructure, a vendor breach doesn't affect you. The trade-off is operational responsibility — but for teams that already manage their own infrastructure, self-hosting a password manager or identity provider is a natural extension."
+ ],
+ "oss_benefits_title": "Why Open-Source Security Infrastructure",
+ "oss_benefits": [
+ "**Auditable code** — the source is public, reviewed by the community, and regularly audited by independent security researchers.",
+ "**Zero-knowledge architecture** — your secrets never leave your infrastructure. No vendor employees can access your vaults or tokens.",
+ "**Air-gap capability** — deploy in fully isolated environments when compliance or classification requirements demand it.",
+ "**No subscription for essential security** — password management and authentication shouldn't be a recurring cost per user."
+ ]
+ },
+ "DevOps": {
+ "industry": [
+ "The promise of DevOps was simple: developers should be able to deploy their code without filing tickets, waiting for ops teams, or configuring infrastructure by hand. Platforms like Heroku, Vercel, and Railway delivered on that promise beautifully — git push and your app is live. But the convenience comes with constraints: vendor-specific build systems, pricing that scales with compute time, and the nagging awareness that your deployment pipeline is someone else's product decision.",
+ "Self-hosted Platform-as-a-Service alternatives have closed the experience gap significantly. Tools like Coolify, Dokku, and CapRover provide the same git-push deployment workflow on your own servers, with the added benefit of running on hardware you control. The pricing model shifts from per-project or per-build-minute to a flat monthly VPS cost that supports as many projects as your server can handle.",
+ "For freelancers and small teams running multiple projects, the economics are especially compelling. A $20/month VPS running a self-hosted PaaS can handle workloads that would cost $200+ across managed platforms. For larger teams, the value proposition shifts toward control: custom build pipelines, deployment policies, and the ability to integrate with internal infrastructure that cloud PaaS providers don't support."
+ ],
+ "oss_benefits_title": "Why Self-Hosted Deployment Platforms",
+ "oss_benefits": [
+ "**Flat infrastructure costs** — one server, unlimited projects. No per-app or per-build pricing.",
+ "**Full pipeline control** — customize build, test, and deployment steps without platform constraints.",
+ "**Infrastructure portability** — move between cloud providers or on-prem without rewriting deployment configurations.",
+ "**Multi-service support** — deploy databases, message queues, and background workers alongside your apps on the same platform."
+ ]
+ },
+ "Productivity": {
+ "industry": [
+ "Productivity software — document editors, note-taking apps, knowledge bases, wikis — is the digital workspace where ideas become artifacts. Notion's success proved that people want more than just a text editor; they want tools that can organize information spatially, link concepts together, and serve as both a writing surface and a lightweight database. But Notion and its commercial peers store every thought, draft, and internal document on their servers.",
+ "For companies, that means proprietary knowledge, strategic planning documents, and sensitive internal communications live on third-party infrastructure. For individuals, it means personal notes, journals, and creative work exist at the mercy of a subscription billing cycle. When your knowledge base is someone else's SaaS product, they have leverage over your most important asset: your accumulated knowledge.",
+ "Open-source productivity tools have evolved from basic Markdown editors into full workspace platforms. AppFlowy and AFFiNE offer Notion-like block editors with local-first architectures. Outline provides team knowledge bases with Slack integration. ONLYOFFICE delivers collaborative document editing that genuinely competes with Google Workspace. The common thread is data ownership — your documents, your server, your rules."
+ ],
+ "oss_benefits_title": "Why Open-Source Productivity Tools",
+ "oss_benefits": [
+ "**Local-first architecture** — your data exists on your device first, synced on your terms. No internet required to access your own notes.",
+ "**No content lock-in** — export everything in standard formats. Your knowledge base shouldn't be trapped in a proprietary database.",
+ "**Collaborative without compromise** — real-time editing and sharing without routing every keystroke through a third-party server.",
+ "**Offline-capable** — work anywhere, sync when you're ready. Perfect for environments with intermittent connectivity."
+ ]
+ },
+ "Design": {
+ "industry": [
+ "Design tools shape how products look and feel. For the last decade, Adobe's Creative Suite and Figma have defined what professional design tooling looks like — and what it costs. Adobe's subscription model transformed perpetual licenses into recurring revenue, while Figma proved that browser-based collaboration could rival native application performance. Both are excellent tools. Both also represent significant ongoing costs and deep vendor dependencies.",
+ "The open-source design ecosystem has expanded beyond GIMP as the sole Photoshop alternative. Krita has become the tool of choice for digital painters and illustrators, with a brush engine that many artists prefer over Photoshop's. Penpot offers browser-based collaborative design with SVG-native output. Inkscape handles vector graphics with a feature set that covers 90% of what Illustrator does. Each has carved out a niche where it genuinely excels rather than trying to replicate commercial tools feature-for-feature.",
+ "For teams considering a switch, the question isn't whether open-source design tools are 'good enough' — several are genuinely better for specific workflows. The question is whether your existing asset libraries, plugins, and team workflows can adapt. The answer, increasingly, is yes."
+ ],
+ "oss_benefits_title": "Why Open-Source Design Tools",
+ "oss_benefits": [
+ "**No subscription treadmill** — professional design capability without monthly fees that increase every year.",
+ "**Standard file formats** — SVG, PNG, PSD, and OpenRaster support means your assets aren't locked into one vendor's format.",
+ "**Extensible through plugins** — customize your workflow with community-built extensions, scripts, and brush packs.",
+ "**Cross-platform freedom** — run the same tool on Linux, macOS, and Windows without feature disparity."
+ ]
+ },
+ "CRM": {
+ "industry": [
+ "Customer relationship management sits at the heart of revenue operations. Every interaction — from first touchpoint through closed deal to ongoing account management — flows through the CRM. That centrality is exactly why CRM vendors can charge premium prices: once your sales process, reporting, and integrations are built around a platform, the switching cost feels enormous.",
+ "Salesforce perfected this dynamic. Its ecosystem of apps, consultants, and certifications creates gravitational pull that's hard to escape. HubSpot offered a friendlier on-ramp but follows the same playbook: free tier to get you in, premium features to keep you paying. For growing companies, CRM costs can quietly become one of the largest line items in the tools budget.",
+ "Open-source CRM alternatives approach the problem differently. Platforms like Twenty and Odoo offer modern interfaces with full control over your customer data. The functionality gap has narrowed — pipeline management, email tracking, activity logging, and reporting are all available. What's changed is the recognition that customer data is too strategically important to store on someone else's servers."
+ ],
+ "oss_benefits_title": "Why Open-Source CRM",
+ "oss_benefits": [
+ "**Your customer data, your servers** — sales intelligence and customer communications are among the most sensitive data a company has.",
+ "**No per-seat sales tax** — add SDRs, AEs, and CSMs without budget negotiations for each headcount.",
+ "**Deep customization** — modify deal stages, fields, automations, and reporting at the code level.",
+ "**Integration on your terms** — connect to your email, calendar, and internal tools without marketplace surcharges."
+ ]
+ },
+ "Marketing": {
+ "industry": [
+ "Marketing technology — email automation, newsletter platforms, campaign management, and transactional email — has become a critical layer in how businesses communicate with their audiences. The volume of email sent by companies has grown year over year, and with it, the bills from platforms like Mailchimp, SendGrid, and HubSpot. What starts as $50/month for a small list can grow to thousands as your subscriber base expands.",
+ "The economics of email marketing have a unique quirk: the value of your list compounds over time, but so does the cost of maintaining it on a managed platform. Switching providers means migrating subscriber data, rebuilding templates, re-verifying domains, and potentially losing engagement history. This lock-in is subtle but expensive — many teams continue paying premium prices simply because migration is daunting.",
+ "Self-hosted email and marketing tools fundamentally change this equation. Platforms like Listmonk can handle millions of subscribers on a single server. Mautic provides marketing automation comparable to HubSpot. Postal handles transactional email delivery at scale. The infrastructure cost is a fraction of managed alternatives, and the data — your subscriber lists, engagement metrics, and campaign history — stays entirely within your control."
+ ],
+ "oss_benefits_title": "Why Self-Hosted Marketing Infrastructure",
+ "oss_benefits": [
+ "**Scale without per-subscriber pricing** — your list can grow to millions without your bill growing proportionally.",
+ "**Full deliverability control** — manage your own IP reputation, DKIM, SPF, and DMARC settings.",
+ "**No data sharing with ad platforms** — your subscriber data isn't being used to train ad targeting models.",
+ "**Campaign data ownership** — engagement metrics, A/B test results, and audience segments stay on your infrastructure."
+ ]
+ },
+ "Support": {
+ "industry": [
+ "Customer support tooling directly impacts how quickly and effectively you can help your users. The experience of submitting a ticket, chatting with support, or reading documentation shapes perception of your entire product. Zendesk and Intercom have set the baseline for what teams expect from support platforms, but their per-agent pricing means that scaling your support team scales your tooling costs linearly.",
+ "The support space has also seen significant feature creep in pricing. Chatbots, knowledge bases, analytics dashboards, and multichannel inboxes are increasingly gated behind higher-tier plans. Teams frequently find themselves paying for an 'enterprise' plan not because they need enterprise features, but because the one specific capability they need was strategically placed in that tier.",
+ "Open-source helpdesk platforms offer the core functionality — ticketing, live chat, knowledge bases, and multichannel support — without per-agent pricing or feature gating. Tools like Zammad and Chaskiq provide mature platforms that can be self-hosted and customized to match your support workflow exactly."
+ ],
+ "oss_benefits_title": "Why Open-Source Support Platforms",
+ "oss_benefits": [
+ "**No per-agent pricing** — scale your support team without scaling your tooling costs.",
+ "**Omnichannel without upsells** — email, chat, social, and phone support in one platform, included by default.",
+ "**Complete conversation history ownership** — support interactions contain valuable product feedback. Keep that data accessible on your terms.",
+ "**Customizable workflows** — build escalation rules, SLA tracking, and routing logic that matches your specific support process."
+ ]
+ },
+ "Automation": {
+ "industry": [
+ "Workflow automation connects the tools your team already uses, eliminating the repetitive manual work that consumes hours every week. When a new lead fills out a form, automation can create a CRM record, send a welcome email, notify the sales team, and update a spreadsheet — all without human intervention. Zapier and Make have made this accessible to non-technical users, but at $20-50+ per month for serious usage, the cost adds up.",
+ "The real cost of managed automation isn't just the subscription — it's the per-task or per-operation pricing that makes complex workflows expensive. A workflow that triggers 10,000 times per month might cost $100+ on Zapier. Run that same workflow on a self-hosted platform like n8n, and the cost is whatever you're already paying for your server.",
+ "Open-source automation platforms have reached the point where they offer comparable visual builders, similar integration libraries, and the added benefit of running custom code nodes for anything the pre-built integrations don't cover. For technical teams, the ability to add JavaScript or Python logic directly into a workflow is a significant advantage over the more constrained no-code approaches."
+ ],
+ "oss_benefits_title": "Why Open-Source Automation",
+ "oss_benefits": [
+ "**No per-execution pricing** — run workflows as often as needed without counting operations.",
+ "**Code when you need it** — drop into JavaScript or Python for custom logic that no-code builders can't handle.",
+ "**Data stays local** — sensitive business data flowing through automation workflows doesn't leave your infrastructure.",
+ "**Custom integrations** — build connectors for internal APIs that managed platforms will never support."
+ ]
+ },
+ "E-commerce": {
+ "industry": [
+ "E-commerce platforms are the foundation of online retail. Shopify has made launching a store remarkably simple, but that simplicity comes with trade-offs: transaction fees on every sale, limited customization depth, and a dependency on Shopify's infrastructure for your entire business. When the platform decides to change its API, adjust its pricing, or deprecate a feature you rely on, you adapt or you scramble.",
+ "The headless commerce movement has shifted the conversation from 'which all-in-one platform' to 'which best-of-breed components.' By decoupling the storefront from the commerce engine, teams can use any frontend framework while plugging into a commerce backend for product management, orders, payments, and fulfillment. Open-source headless platforms like Medusa.js make this architecture accessible without enterprise licensing fees.",
+ "For brands that have outgrown template-based storefronts or need multi-market support, self-hosted commerce infrastructure offers the flexibility to build exactly the shopping experience their customers expect, without the constraints and costs of managed platforms."
+ ],
+ "oss_benefits_title": "Why Open-Source Commerce",
+ "oss_benefits": [
+ "**No transaction fees** — zero percent commission on every sale, regardless of volume.",
+ "**Complete storefront control** — build any frontend experience without template limitations.",
+ "**Multi-region and multi-currency** — handle international commerce without per-market licensing fees.",
+ "**Plugin architecture** — extend functionality with community-built modules for payments, fulfillment, and CMS integrations."
+ ]
+ }
+}
\ No newline at end of file
diff --git a/data/schema/types.ts b/data/schema/types.ts
new file mode 100644
index 0000000..9157a5c
--- /dev/null
+++ b/data/schema/types.ts
@@ -0,0 +1,66 @@
+export interface DeploymentConfig {
+ image: string;
+ port: number;
+ env?: { key: string; value: string }[];
+ volumes?: string[];
+ command?: string;
+ local_path?: string;
+ type?: 'docker-compose' | 'dockerfile';
+}
+
+export interface Tool {
+ slug: string;
+ name: string;
+ category: string;
+ is_open_source: boolean;
+ description: string;
+ website: string;
+ github_repo?: string;
+ stars?: number;
+ description_long?: string;
+ pros?: string[];
+ cons?: string[];
+ min_cost?: number;
+ avg_monthly_cost?: number; // Estimated self-hosting or SaaS cost
+ pricing_model?: 'Free' | 'Freemium' | 'Paid' | 'Paid (Subscription)' | 'Paid (One-time)';
+ has_free_trial?: boolean;
+ self_hostable?: boolean;
+ license?: string;
+ language?: string;
+ tags?: string[];
+ alternatives?: string[];
+ last_commit?: string;
+ logo_url?: string;
+ affiliate_url?: string;
+ referral_url?: string; // New field for specific referral links
+ deployment?: DeploymentConfig;
+ hardware_req?: string; // e.g., "16GB VRAM", "CPU only", "Cloud API"
+ hosting_type?: 'cloud' | 'self-hosted' | 'both'; // How the tool/model is accessed
+
+ ai_metadata?: {
+ vram_inference_gb?: number; // Recommended VRAM for inference (FP16/BF16)
+ context_window_tokens?: number; // Max sequence length
+ parameters_total_b?: number; // Billion parameters
+ parameters_active_b?: number; // For MoE models
+ is_multimodal?: boolean;
+ };
+}
+
+export interface Stack {
+ id: string;
+ name: string;
+ emoji: string;
+ tagline: string;
+ description: string;
+ monthlySaved: number;
+
+ // Ordered list of tools with specific roles in this stack
+ tools: {
+ category: string; // e.g. "The Database", "Authentication"
+ toolSlug: string;
+ }[];
+
+ // SEO
+ seo_title?: string;
+ seo_description?: string;
+}
diff --git a/data/seo.ts b/data/seo.ts
new file mode 100644
index 0000000..9870c49
--- /dev/null
+++ b/data/seo.ts
@@ -0,0 +1,34 @@
+import { Tool } from '../app/types';
+import toolsData from './tools.json';
+
+const tools = toolsData as Tool[];
+
+export interface VsPair {
+ slug: string; // "slack-vs-mattermost"
+ proprietaryTool: Tool;
+ opensourceTool: Tool;
+}
+
+export function generateVsPairs(): VsPair[] {
+ const pairs: VsPair[] = [];
+
+ // Find all proprietary tools
+ const proprietaryTools = tools.filter(t => !t.is_open_source);
+
+ proprietaryTools.forEach(propTool => {
+ if (!propTool.alternatives) return;
+
+ propTool.alternatives.forEach(altSlug => {
+ const altTool = tools.find(t => t.slug === altSlug);
+ if (altTool) {
+ pairs.push({
+ slug: `${propTool.slug}-vs-${altTool.slug}`,
+ proprietaryTool: propTool,
+ opensourceTool: altTool
+ });
+ }
+ });
+ });
+
+ return pairs;
+}
diff --git a/data/stacks.ts b/data/stacks.ts
new file mode 100644
index 0000000..901e6d6
--- /dev/null
+++ b/data/stacks.ts
@@ -0,0 +1,90 @@
+import { Stack } from '../app/types';
+
+export const STACKS: Stack[] = [
+ {
+ id: 'bootstrapper',
+ name: 'The Bootstrapper Stack',
+ emoji: '🚀',
+ tagline: 'Launch for $0/mo',
+ description: 'Everything you need to build, ship, and manage a SaaS product without spending a dime on software. Perfect for solo founders and early-stage startups.',
+ monthlySaved: 310,
+ tools: [
+ { category: 'Database & Auth', toolSlug: 'supabase' },
+ { category: 'Project Mgmt', toolSlug: 'plane' },
+ { category: 'Communication', toolSlug: 'rocketchat' },
+ { category: 'Deployment', toolSlug: 'coolify' },
+ { category: 'Analytics', toolSlug: 'plausible' },
+ { category: 'Design', toolSlug: 'penpot' }
+ ],
+ seo_title: 'The Bootstrapper Stack - Build SaaS for Free',
+ seo_description: 'The ultimate open-source stack for solo founders. Database, Auth, DevOps, and Design tools that cost $0/mo.'
+ },
+ {
+ id: 'designer',
+ name: 'The Designer Stack',
+ emoji: '🎨',
+ tagline: 'Ditch Creative Cloud',
+ description: 'Professional design tools that rival Adobe. From UI/UX prototyping to photo editing and digital art — all open source, all free.',
+ monthlySaved: 110,
+ tools: [
+ { category: 'UI/UX Design', toolSlug: 'penpot' },
+ { category: 'Photo Editing', toolSlug: 'gimp' },
+ { category: 'Digital Art', toolSlug: 'krita' },
+ { category: 'Knowledge Base', toolSlug: 'appflowy' }
+ ],
+ seo_title: 'Open Source Design Stack - Adobe Alternatives',
+ seo_description: 'Free, professional design tools to replace Adobe Creative Cloud. Penpot, GIMP, Krita, and more.'
+ },
+ {
+ id: 'ai-first',
+ name: 'The AI-First Stack',
+ emoji: '🤖',
+ tagline: 'Own your AI',
+ description: 'Run powerful AI locally. No API keys, no usage limits, no data leaving your machine. LLMs, image generation, and code completion — all self-hosted.',
+ monthlySaved: 69,
+ tools: [
+ { category: 'LLM Inference', toolSlug: 'llama' },
+ { category: 'Coding Model', toolSlug: 'deepseek' },
+ { category: 'Image Generation', toolSlug: 'stable-diffusion' },
+ { category: 'IDE Assistant', toolSlug: 'continue-dev' },
+ { category: 'Autocomplete', toolSlug: 'tabby' }
+ ],
+ seo_title: 'Local AI Stack - Self-Hosted LLMs & Tools',
+ seo_description: 'Run AI locally with this curated stack. Llama 3, Stable Diffusion, and coding assistants that respect your privacy.'
+ },
+ {
+ id: 'devops',
+ name: 'The DevOps Stack',
+ emoji: '⚙️',
+ tagline: 'Self-host everything',
+ description: 'From backend to hosting to monitoring — deploy and manage your entire infrastructure on your own terms. Zero vendor lock-in.',
+ monthlySaved: 375,
+ tools: [
+ { category: 'Backend as a Service', toolSlug: 'supabase' },
+ { category: 'PaaS (Vercel Alt)', toolSlug: 'coolify' },
+ { category: 'Git Deployment', toolSlug: 'dokku' },
+ { category: 'Web Analytics', toolSlug: 'plausible' },
+ { category: 'Product Analytics', toolSlug: 'posthog' }
+ ],
+ seo_title: 'Open Source DevOps Stack - Self-Hosted PaaS',
+ seo_description: 'Deploy like a pro with tools like Coolify, Dokku, and Supabase. The ultimate self-hosted infrastructure stack.'
+ },
+ {
+ id: 'privacy',
+ name: 'The Privacy Stack',
+ emoji: '🔒',
+ tagline: 'Zero data leaks',
+ description: 'Every tool runs on your infrastructure. Your data never touches a third-party server. For teams and individuals who take privacy seriously.',
+ monthlySaved: 185,
+ tools: [
+ { category: 'Password Manager', toolSlug: 'bitwarden' },
+ { category: 'Team Chat', toolSlug: 'mattermost' },
+ { category: 'Video Calls', toolSlug: 'jitsi' },
+ { category: 'Analytics', toolSlug: 'matomo' },
+ { category: 'Notes & Docs', toolSlug: 'appflowy' },
+ { category: 'Knowledge Base', toolSlug: 'affine' }
+ ],
+ seo_title: 'Privacy-First Software Stack - Secure Alternatives',
+ seo_description: 'A 100% self-hostable stack for maximum privacy. Bitwarden, Mattermost, Jitsi, and more.'
+ }
+];
diff --git a/data/tools-min.json b/data/tools-min.json
new file mode 100644
index 0000000..66849ca
--- /dev/null
+++ b/data/tools-min.json
@@ -0,0 +1,1653 @@
+[
+ {
+ "slug": "firebase",
+ "name": "Firebase",
+ "category": "Backend as a Service",
+ "is_open_source": false,
+ "description": "Google's app development platform.",
+ "logo_url": "/logos/firebase.svg",
+ "pricing_model": "Paid/Freemium",
+ "avg_monthly_cost": 25,
+ "alternatives": [
+ "supabase",
+ "appwrite",
+ "pocketbase"
+ ],
+ "tags": [
+ "Cloud",
+ "Database",
+ "Auth"
+ ],
+ "website": "https://firebase.google.com"
+ },
+ {
+ "slug": "supabase",
+ "name": "Supabase",
+ "category": "Backend as a Service",
+ "is_open_source": true,
+ "description": "The Postgres development platform. Supabase gives you a dedicated Postgres database to build your web, mobile, and AI applications.",
+ "logo_url": "/logos/supabase.svg",
+ "alternatives": [],
+ "tags": [
+ "Database",
+ "Realtime",
+ "Postgres"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://supabase.com"
+ },
+ {
+ "slug": "appwrite",
+ "name": "Appwrite",
+ "category": "Backend as a Service",
+ "is_open_source": true,
+ "description": "Appwrite® - complete cloud infrastructure for your web, mobile and AI apps. Including Auth, Databases, Storage, Functions, Messaging, Hosting, Realtime and more",
+ "logo_url": "/logos/appwrite.svg",
+ "alternatives": [],
+ "tags": [
+ "Database",
+ "Auth",
+ "Self-Hosted"
+ ],
+ "license": "BSD 3-Clause \"New\" or \"Revised\" License",
+ "website": "https://appwrite.io"
+ },
+ {
+ "slug": "pocketbase",
+ "name": "PocketBase",
+ "category": "Backend as a Service",
+ "is_open_source": true,
+ "description": "Open Source realtime backend in 1 file",
+ "logo_url": "/logos/pocketbase.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT License",
+ "website": "https://pocketbase.io"
+ },
+ {
+ "slug": "salesforce",
+ "name": "Salesforce",
+ "category": "CRM",
+ "is_open_source": false,
+ "description": "The world's #1 CRM.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=salesforce.com",
+ "pricing_model": "Paid",
+ "avg_monthly_cost": 25,
+ "alternatives": [
+ "odoo",
+ "erpnext"
+ ],
+ "tags": [],
+ "website": "https://salesforce.com"
+ },
+ {
+ "slug": "slack",
+ "name": "Slack",
+ "category": "Communication",
+ "is_open_source": false,
+ "description": "Team communication platform.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=slack.com",
+ "pricing_model": "Paid/Freemium",
+ "avg_monthly_cost": 12,
+ "alternatives": [
+ "mattermost",
+ "rocketchat"
+ ],
+ "tags": [],
+ "website": "https://slack.com"
+ },
+ {
+ "slug": "mattermost",
+ "name": "Mattermost",
+ "category": "Communication",
+ "is_open_source": true,
+ "description": "Mattermost is an open source platform for secure collaboration across the entire software development lifecycle..",
+ "logo_url": "/logos/mattermost.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://mattermost.com"
+ },
+ {
+ "slug": "rocketchat",
+ "name": "Rocket.Chat",
+ "category": "Communication",
+ "is_open_source": true,
+ "description": "The Secure CommsOS™ for mission-critical operations",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=rocket.chat",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://rocket.chat"
+ },
+ {
+ "slug": "jira",
+ "name": "Jira",
+ "category": "Project Management",
+ "is_open_source": false,
+ "description": "Issue tracking and project management tool.",
+ "logo_url": "/logos/jira.svg",
+ "pricing_model": "Paid",
+ "avg_monthly_cost": 15,
+ "alternatives": [
+ "plane",
+ "taiga"
+ ],
+ "tags": [],
+ "website": "https://www.atlassian.com/software/jira"
+ },
+ {
+ "slug": "plane",
+ "name": "Plane",
+ "category": "Project Management",
+ "is_open_source": true,
+ "description": "🔥🔥🔥 Open-source Jira, Linear, Monday, and ClickUp alternative. Plane is a modern project management platform to manage tasks, sprints, docs, and triage.",
+ "logo_url": "/logos/plane.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "GNU Affero General Public License v3.0",
+ "website": "https://plane.so"
+ },
+ {
+ "slug": "taiga",
+ "name": "Taiga",
+ "category": "Project Management",
+ "is_open_source": true,
+ "description": null,
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=taiga.io",
+ "alternatives": [],
+ "tags": [],
+ "license": "Mozilla Public License 2.0",
+ "website": "https://taiga.io"
+ },
+ {
+ "slug": "zoom",
+ "name": "Zoom",
+ "category": "Communication",
+ "is_open_source": false,
+ "description": "Video conferencing platform, cloud phone, webinars, and chat.",
+ "logo_url": "/logos/zoom.svg",
+ "pricing_model": "Paid/Freemium",
+ "avg_monthly_cost": 15,
+ "alternatives": [
+ "jitsi-meet"
+ ],
+ "tags": [],
+ "website": "https://zoom.us"
+ },
+ {
+ "slug": "jitsi-meet",
+ "name": "Jitsi Meet",
+ "category": "Communication",
+ "is_open_source": true,
+ "description": "Jitsi Meet - Secure, Simple and Scalable Video Conferences that you use as a standalone app or embed in your web application.",
+ "logo_url": "/logos/jitsi-meet.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "Apache License 2.0",
+ "website": "https://jitsi.org"
+ },
+ {
+ "slug": "photoshop",
+ "name": "Adobe Photoshop",
+ "category": "Design",
+ "is_open_source": false,
+ "description": "Industry standard image editing software.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.adobe.com",
+ "pricing_model": "Paid (Monthly)",
+ "avg_monthly_cost": 60,
+ "alternatives": [
+ "gimp",
+ "krita"
+ ],
+ "tags": [],
+ "website": "https://www.adobe.com/products/photoshop.html"
+ },
+ {
+ "slug": "gimp",
+ "name": "GIMP",
+ "category": "Design",
+ "is_open_source": true,
+ "description": "Read-only mirror of https://gitlab.gnome.org/GNOME/gimp",
+ "logo_url": "/logos/gimp.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://www.gimp.org"
+ },
+ {
+ "slug": "krita",
+ "name": "Krita",
+ "category": "Design",
+ "is_open_source": true,
+ "description": "Krita is a free and open source cross-platform application that offers an end-to-end solution for creating digital art files from scratch built on the KDE and Qt frameworks.",
+ "logo_url": "/logos/krita.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "GNU General Public License v3.0",
+ "website": "https://krita.org"
+ },
+ {
+ "slug": "figma",
+ "name": "Figma",
+ "category": "Design",
+ "is_open_source": false,
+ "description": "Collaborative interface design tool.",
+ "logo_url": "/logos/figma.svg",
+ "pricing_model": "Freemium/Paid",
+ "avg_monthly_cost": 15,
+ "alternatives": [
+ "penpot"
+ ],
+ "tags": [],
+ "website": "https://www.figma.com"
+ },
+ {
+ "slug": "penpot",
+ "name": "Penpot",
+ "category": "Design",
+ "is_open_source": true,
+ "description": "Penpot: The open-source design tool for design and code collaboration",
+ "logo_url": "/logos/penpot.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "Mozilla Public License 2.0",
+ "website": "https://penpot.app"
+ },
+ {
+ "slug": "notion",
+ "name": "Notion",
+ "category": "Productivity",
+ "is_open_source": false,
+ "description": "All-in-one workspace.",
+ "logo_url": "/logos/notion.svg",
+ "pricing_model": "Freemium/Paid",
+ "avg_monthly_cost": 10,
+ "alternatives": [
+ "appflowy",
+ "affine"
+ ],
+ "tags": [],
+ "website": "https://www.notion.so"
+ },
+ {
+ "slug": "appflowy",
+ "name": "AppFlowy",
+ "category": "Productivity",
+ "is_open_source": true,
+ "description": "Bring projects, wikis, and teams together with AI. AppFlowy is the AI collaborative workspace where you achieve more without losing control of your data. The leading open source Notion alternative.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.appflowy.io",
+ "alternatives": [],
+ "tags": [],
+ "license": "GNU Affero General Public License v3.0",
+ "website": "https://www.appflowy.io"
+ },
+ {
+ "slug": "affine",
+ "name": "AFFiNE",
+ "category": "Productivity",
+ "is_open_source": true,
+ "description": "There can be more than Notion and Miro. AFFiNE(pronounced [ə‘fain]) is a next-gen knowledge base that brings planning, sorting and creating all together. Privacy first, open-source, customizable and ready to use. ",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=affine.pro",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://affine.pro"
+ },
+ {
+ "slug": "google-analytics",
+ "name": "Google Analytics",
+ "category": "Analytics",
+ "is_open_source": false,
+ "description": "Web analytics service.",
+ "logo_url": "/logos/google-analytics.svg",
+ "pricing_model": "Free/Paid",
+ "avg_monthly_cost": 150,
+ "alternatives": [
+ "plausible",
+ "posthog",
+ "matomo"
+ ],
+ "tags": [],
+ "website": "https://analytics.google.com"
+ },
+ {
+ "slug": "plausible",
+ "name": "Plausible",
+ "category": "Analytics",
+ "is_open_source": true,
+ "description": "Simple, open source, lightweight and privacy-friendly web analytics alternative to Google Analytics.",
+ "logo_url": "/logos/plausible.svg",
+ "alternatives": [],
+ "tags": [
+ "Analytics",
+ "Privacy",
+ "GDPR"
+ ],
+ "license": "GNU Affero General Public License v3.0",
+ "website": "https://plausible.io"
+ },
+ {
+ "slug": "posthog",
+ "name": "PostHog",
+ "category": "Analytics",
+ "is_open_source": true,
+ "description": "🦔 PostHog is an all-in-one developer platform for building successful products. We offer product analytics, web analytics, session replay, error tracking, feature flags, experimentation, surveys, data warehouse, a CDP, and an AI product assistant to help debug your code, ship features faster, and keep all your usage and customer data in one stack.",
+ "logo_url": "/logos/posthog.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://posthog.com"
+ },
+ {
+ "slug": "matomo",
+ "name": "Matomo",
+ "category": "Analytics",
+ "is_open_source": true,
+ "description": "Empowering People Ethically 🚀 — Matomo is hiring! Join us → https://matomo.org/jobs Matomo is the leading open-source alternative to Google Analytics, giving you complete control and built-in privacy. Easily collect, visualise, and analyse data from websites & apps. Star us on GitHub ⭐️ – Pull Requests welcome! ",
+ "logo_url": "/logos/matomo.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "GNU General Public License v3.0",
+ "website": "https://matomo.org"
+ },
+ {
+ "slug": "1password",
+ "name": "1Password",
+ "category": "Security",
+ "is_open_source": false,
+ "description": "Password manager.",
+ "logo_url": "/logos/1password.svg",
+ "pricing_model": "Paid",
+ "avg_monthly_cost": 8,
+ "alternatives": [
+ "bitwarden",
+ "keepassxc"
+ ],
+ "tags": [],
+ "website": "https://1password.com"
+ },
+ {
+ "slug": "bitwarden",
+ "name": "Bitwarden",
+ "category": "Security",
+ "is_open_source": true,
+ "description": "Bitwarden infrastructure/backend (API, database, Docker, etc).",
+ "logo_url": "/logos/bitwarden.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://bitwarden.com"
+ },
+ {
+ "slug": "keepassxc",
+ "name": "KeePassXC",
+ "category": "Security",
+ "is_open_source": true,
+ "description": "KeePassXC is a cross-platform community-driven port of the Windows application “KeePass Password Safe”.",
+ "logo_url": "/logos/keepassxc.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://keepassxc.org"
+ },
+ {
+ "slug": "heroku",
+ "name": "Heroku",
+ "category": "DevOps",
+ "is_open_source": false,
+ "description": "Platform as a service.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=heroku.com",
+ "pricing_model": "Paid",
+ "avg_monthly_cost": 8,
+ "alternatives": [
+ "coolify",
+ "dokku"
+ ],
+ "tags": [],
+ "website": "https://heroku.com"
+ },
+ {
+ "slug": "coolify",
+ "name": "Coolify",
+ "category": "DevOps",
+ "is_open_source": true,
+ "description": "An open-source, self-hostable PaaS alternative to Vercel, Heroku & Netlify that lets you easily deploy static sites, databases, full-stack applications and 280+ one-click services on your own servers.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coolify.io",
+ "alternatives": [],
+ "tags": [
+ "DevOps",
+ "PaaS",
+ "Self-Hosted"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://coolify.io"
+ },
+ {
+ "slug": "sap",
+ "name": "SAP S/4HANA",
+ "category": "ERP",
+ "is_open_source": false,
+ "description": "The world leader in enterprise resource planning software.",
+ "logo_url": "/logos/sap.svg",
+ "pricing_model": "Paid (Enterprise)",
+ "avg_monthly_cost": 100,
+ "alternatives": [
+ "odoo",
+ "erpnext"
+ ],
+ "tags": [],
+ "website": "https://www.sap.com"
+ },
+ {
+ "slug": "odoo",
+ "name": "Odoo",
+ "category": "ERP",
+ "is_open_source": true,
+ "description": "A suite of open source business apps: CRM, eCommerce, accounting, manufacturing, warehouse, and more.",
+ "logo_url": "/logos/odoo.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "LGPL-3.0",
+ "website": "https://www.odoo.com"
+ },
+ {
+ "slug": "erpnext",
+ "name": "ERPNext",
+ "category": "ERP",
+ "is_open_source": true,
+ "description": "A free and open-source integrated Enterprise Resource Planning (ERP) software.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=erpnext.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "GNU General Public License v3.0",
+ "website": "https://erpnext.com"
+ },
+ {
+ "slug": "autocad",
+ "name": "AutoCAD",
+ "category": "CAD",
+ "is_open_source": false,
+ "description": "Professional computer-aided design (CAD) and drafting software.",
+ "logo_url": "/logos/autocad.svg",
+ "pricing_model": "Paid (Subscription)",
+ "avg_monthly_cost": 75,
+ "alternatives": [
+ "librecad",
+ "freecad"
+ ],
+ "tags": [],
+ "website": "https://www.autodesk.com/products/autocad"
+ },
+ {
+ "slug": "librecad",
+ "name": "LibreCAD",
+ "category": "CAD",
+ "is_open_source": true,
+ "description": "A mature, feature-rich 2D CAD application with a loyal user community.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=librecad.org",
+ "alternatives": [],
+ "tags": [],
+ "license": "GPLv2",
+ "website": "https://librecad.org"
+ },
+ {
+ "slug": "freecad",
+ "name": "FreeCAD",
+ "category": "CAD",
+ "is_open_source": true,
+ "description": "A general-purpose parametric 3D CAD modeler and a BIM software application.",
+ "logo_url": "/logos/freecad.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "LGPLv2+",
+ "website": "https://www.freecad.org"
+ },
+ {
+ "slug": "zapier",
+ "name": "Zapier",
+ "category": "Automation",
+ "is_open_source": false,
+ "description": "The pioneer in workflow automation for everyone.",
+ "logo_url": "/logos/zapier.svg",
+ "pricing_model": "Paid (Task-based)",
+ "avg_monthly_cost": 20,
+ "alternatives": [
+ "n8n",
+ "activepieces"
+ ],
+ "tags": [],
+ "website": "https://zapier.com"
+ },
+ {
+ "slug": "n8n",
+ "name": "n8n",
+ "category": "Automation",
+ "is_open_source": true,
+ "description": "Fair-code workflow automation tool. Easily automate tasks across different services.",
+ "logo_url": "/logos/n8n.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "Sustainable Use License",
+ "website": "https://n8n.io"
+ },
+ {
+ "slug": "activepieces",
+ "name": "Activepieces",
+ "category": "Automation",
+ "is_open_source": true,
+ "description": "Open source alternative to Zapier. Automate your work with 200+ apps.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=activepieces.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT",
+ "website": "https://www.activepieces.com"
+ },
+ {
+ "slug": "tableau",
+ "name": "Tableau",
+ "category": "Analytics",
+ "is_open_source": false,
+ "description": "Powerful data visualization and business intelligence platform.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tableau.com",
+ "pricing_model": "Paid (Seat-based)",
+ "avg_monthly_cost": 70,
+ "alternatives": [
+ "metabase",
+ "superset"
+ ],
+ "tags": [],
+ "website": "https://www.tableau.com"
+ },
+ {
+ "slug": "metabase",
+ "name": "Metabase",
+ "category": "Analytics",
+ "is_open_source": true,
+ "description": "The simplest, fastest way to get business intelligence and analytics throughout your company.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=metabase.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPLv3",
+ "website": "https://www.metabase.com"
+ },
+ {
+ "slug": "superset",
+ "name": "Apache Superset",
+ "category": "Analytics",
+ "is_open_source": true,
+ "description": "Enterprise-ready business intelligence web application.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=superset.apache.org",
+ "alternatives": [],
+ "tags": [],
+ "license": "Apache 2.0",
+ "website": "https://superset.apache.org"
+ },
+ {
+ "slug": "auth0",
+ "name": "Auth0",
+ "category": "Security",
+ "is_open_source": false,
+ "description": "The leading authentication and authorization platform.",
+ "logo_url": "/logos/auth0.svg",
+ "pricing_model": "Paid (MAU-based)",
+ "avg_monthly_cost": 23,
+ "alternatives": [
+ "keycloak",
+ "authentik"
+ ],
+ "tags": [],
+ "website": "https://auth0.com"
+ },
+ {
+ "slug": "keycloak",
+ "name": "Keycloak",
+ "category": "Security",
+ "is_open_source": true,
+ "description": "Open source identity and access management for modern applications and services.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=keycloak.org",
+ "alternatives": [],
+ "tags": [],
+ "license": "Apache 2.0",
+ "website": "https://www.keycloak.org"
+ },
+ {
+ "slug": "okta",
+ "name": "Okta",
+ "category": "Security",
+ "is_open_source": false,
+ "description": "The World's Identity Company, providing enterprise-grade IAM.",
+ "logo_url": "/logos/okta.svg",
+ "pricing_model": "Paid (User-based)",
+ "avg_monthly_cost": 6,
+ "alternatives": [
+ "authentik",
+ "keycloak"
+ ],
+ "tags": [],
+ "website": "https://okta.com"
+ },
+ {
+ "slug": "authentik",
+ "name": "Authentik",
+ "category": "Security",
+ "is_open_source": true,
+ "description": "The overall-best open-source identity provider, focused on flexibility and versatility.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=goauthentik.io",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT",
+ "website": "https://goauthentik.io"
+ },
+ {
+ "slug": "s3",
+ "name": "Amazon S3",
+ "category": "Cloud Infrastructure",
+ "is_open_source": false,
+ "description": "Object storage built to retrieve any amount of data from anywhere.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=aws.amazon.com",
+ "pricing_model": "Paid (Usage-based)",
+ "avg_monthly_cost": 23,
+ "alternatives": [
+ "minio"
+ ],
+ "tags": [],
+ "website": "https://aws.amazon.com/s3"
+ },
+ {
+ "slug": "minio",
+ "name": "MinIO",
+ "category": "Cloud Infrastructure",
+ "is_open_source": true,
+ "description": "High-performance, S3-compatible object storage for AI and enterprise data.",
+ "logo_url": "/logos/minio.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPLv3",
+ "website": "https://min.io"
+ },
+ {
+ "slug": "zendesk",
+ "name": "Zendesk",
+ "category": "Support",
+ "is_open_source": false,
+ "description": "The leader in customer service and engagement software.",
+ "logo_url": "/logos/zendesk.svg",
+ "pricing_model": "Paid (Agent-based)",
+ "avg_monthly_cost": 19,
+ "alternatives": [
+ "zammad"
+ ],
+ "tags": [],
+ "website": "https://www.zendesk.com"
+ },
+ {
+ "slug": "zammad",
+ "name": "Zammad",
+ "category": "Support",
+ "is_open_source": true,
+ "description": "A web-based, open source helpdesk/customer support system with many features.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=zammad.org",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPLv3",
+ "website": "https://zammad.org"
+ },
+ {
+ "slug": "workday",
+ "name": "Workday",
+ "category": "HR",
+ "is_open_source": false,
+ "description": "Enterprise management cloud for finance and human resources.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=workday.com",
+ "pricing_model": "Paid (Enterprise)",
+ "avg_monthly_cost": 45,
+ "alternatives": [
+ "orangehrm"
+ ],
+ "tags": [],
+ "website": "https://www.workday.com"
+ },
+ {
+ "slug": "orangehrm",
+ "name": "OrangeHRM",
+ "category": "HR",
+ "is_open_source": true,
+ "description": "The world's most popular open source human resource management software.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=orangehrm.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "GPLv2",
+ "website": "https://www.orangehrm.com"
+ },
+ {
+ "slug": "m365",
+ "name": "Microsoft 365",
+ "category": "Productivity",
+ "is_open_source": false,
+ "description": "The world's most popular office suite and cloud collaboration platform.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=office.com",
+ "pricing_model": "Paid (Subscription)",
+ "avg_monthly_cost": 12,
+ "alternatives": [
+ "onlyoffice",
+ "nextcloud"
+ ],
+ "tags": [],
+ "website": "https://www.office.com"
+ },
+ {
+ "slug": "onlyoffice",
+ "name": "ONLYOFFICE",
+ "category": "Productivity",
+ "is_open_source": true,
+ "description": "Powerful online document editors for text, spreadsheets, and presentations. Highly compatible with MS Office.",
+ "logo_url": "/logos/onlyoffice.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPLv3",
+ "website": "https://www.onlyoffice.com"
+ },
+ {
+ "slug": "shopify",
+ "name": "Shopify",
+ "category": "E-commerce",
+ "is_open_source": false,
+ "description": "Commerical platform that allows anyone to set up an online store.",
+ "logo_url": "/logos/shopify.svg",
+ "pricing_model": "Paid (Subscription)",
+ "avg_monthly_cost": 39,
+ "alternatives": [
+ "medusa",
+ "saleor"
+ ],
+ "tags": [],
+ "website": "https://www.shopify.com"
+ },
+ {
+ "slug": "medusa",
+ "name": "Medusa.js",
+ "category": "E-commerce",
+ "is_open_source": true,
+ "description": "The open-source alternative to Shopify. Building blocks for digital commerce.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=medusajs.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT",
+ "website": "https://medusajs.com"
+ },
+ {
+ "slug": "docusign",
+ "name": "DocuSign",
+ "category": "Legal",
+ "is_open_source": false,
+ "description": "The world's #1 way to sign electronically on practically any device, from almost anywhere, at any time.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=docusign.com",
+ "pricing_model": "Paid (Envelope-based)",
+ "avg_monthly_cost": 25,
+ "alternatives": [
+ "documenso"
+ ],
+ "tags": [],
+ "website": "https://www.docusign.com"
+ },
+ {
+ "slug": "documenso",
+ "name": "Documenso",
+ "category": "Legal",
+ "is_open_source": true,
+ "description": "The open-source DocuSign alternative. We aim to be the world's most trusted document signing platform.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=documenso.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPL-3.0",
+ "website": "https://documenso.com"
+ },
+ {
+ "slug": "mailchimp",
+ "name": "Mailchimp",
+ "category": "Marketing",
+ "is_open_source": false,
+ "description": "All-in-one marketing platform that helps you manage and talk to your clients, customers, and other interested parties.",
+ "logo_url": "/logos/mailchimp.svg",
+ "pricing_model": "Paid (Contact-based)",
+ "avg_monthly_cost": 13,
+ "alternatives": [
+ "listmonk",
+ "mautic"
+ ],
+ "tags": [],
+ "website": "https://mailchimp.com"
+ },
+ {
+ "slug": "listmonk",
+ "name": "Listmonk",
+ "category": "Marketing",
+ "is_open_source": true,
+ "description": "High performance, self-hosted newsletter and mailing list manager with a modern dashboard.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=listmonk.app",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPL-3.0",
+ "website": "https://listmonk.app"
+ },
+ {
+ "slug": "mautic",
+ "name": "Mautic",
+ "category": "Marketing",
+ "is_open_source": true,
+ "description": "World's largest open source marketing automation project.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mautic.org",
+ "alternatives": [],
+ "tags": [],
+ "license": "GPL-3.0",
+ "website": "https://www.mautic.org"
+ },
+ {
+ "slug": "statuspage",
+ "name": "Statuspage",
+ "category": "Monitoring",
+ "is_open_source": false,
+ "description": "The best way to communicate status and downtime to your customers.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=atlassian.com",
+ "pricing_model": "Paid (Atlassian)",
+ "avg_monthly_cost": 29,
+ "alternatives": [
+ "uptime-kuma"
+ ],
+ "tags": [],
+ "website": "https://www.atlassian.com/software/statuspage"
+ },
+ {
+ "slug": "uptime-kuma",
+ "name": "Uptime Kuma",
+ "category": "Monitoring",
+ "is_open_source": true,
+ "description": "A fancy self-hosted monitoring tool.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=uptime.kuma.pet",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT",
+ "website": "https://uptime.kuma.pet"
+ },
+ {
+ "slug": "datadog",
+ "name": "Datadog",
+ "category": "Monitoring",
+ "is_open_source": false,
+ "description": "Modern monitoring and security that gives you full visibility into your applications and infrastructure.",
+ "logo_url": "/logos/datadog.svg",
+ "pricing_model": "Paid (Usage-based)",
+ "avg_monthly_cost": 23,
+ "alternatives": [
+ "signoz"
+ ],
+ "tags": [],
+ "website": "https://www.datadoghq.com"
+ },
+ {
+ "slug": "signoz",
+ "name": "SigNoz",
+ "category": "Monitoring",
+ "is_open_source": true,
+ "description": "Open source observability platform. SigNoz helps developers monitor applications and troubleshoot problems.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=signoz.io",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT",
+ "website": "https://signoz.io"
+ },
+ {
+ "slug": "typeform",
+ "name": "Typeform",
+ "category": "Productivity",
+ "is_open_source": false,
+ "description": "Build beautiful, interactive forms, surveys, quizzes, and something else entirely.",
+ "logo_url": "/logos/typeform.svg",
+ "pricing_model": "Paid (Response-based)",
+ "avg_monthly_cost": 25,
+ "alternatives": [
+ "tally"
+ ],
+ "tags": [],
+ "website": "https://www.typeform.com"
+ },
+ {
+ "slug": "tally",
+ "name": "Tally",
+ "category": "Productivity",
+ "is_open_source": false,
+ "description": "The simplest way to create forms. Tally is a new type of form builder that works like a doc.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tally.so",
+ "pricing_model": "Free/Paid",
+ "alternatives": [],
+ "tags": [
+ "Forms",
+ "Surveys",
+ "No-code"
+ ],
+ "website": "https://tally.so"
+ },
+ {
+ "slug": "confluence",
+ "name": "Confluence",
+ "category": "Productivity",
+ "is_open_source": false,
+ "description": "Your remote-friendly team workspace where knowledge and collaboration meet.",
+ "logo_url": "/logos/confluence.svg",
+ "pricing_model": "Paid (Atlassian)",
+ "avg_monthly_cost": 10,
+ "alternatives": [
+ "outline"
+ ],
+ "tags": [],
+ "website": "https://www.atlassian.com/software/confluence"
+ },
+ {
+ "slug": "outline",
+ "name": "Outline",
+ "category": "Productivity",
+ "is_open_source": true,
+ "description": "Fast, collaborative, knowledge base for your team built using React and Markdown.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=getoutline.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://www.getoutline.com"
+ },
+ {
+ "slug": "hootsuite",
+ "name": "Hootsuite",
+ "category": "Marketing",
+ "is_open_source": false,
+ "description": "Social media marketing and management dashboard.",
+ "logo_url": "/logos/hootsuite.svg",
+ "pricing_model": "Paid (Seat-based)",
+ "avg_monthly_cost": 49,
+ "alternatives": [
+ "mixpost"
+ ],
+ "tags": [],
+ "website": "https://www.hootsuite.com"
+ },
+ {
+ "slug": "mixpost",
+ "name": "Mixpost",
+ "category": "Marketing",
+ "is_open_source": true,
+ "description": "Self-hosted social media management software.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mixpost.app",
+ "alternatives": [],
+ "tags": [],
+ "license": "Other",
+ "website": "https://mixpost.app"
+ },
+ {
+ "slug": "codespaces",
+ "name": "GitHub Codespaces",
+ "category": "DevOps",
+ "is_open_source": false,
+ "description": "Fast, cloud-hosted developer environments.",
+ "logo_url": "/logos/codespaces.svg",
+ "pricing_model": "Paid (Usage-based)",
+ "avg_monthly_cost": 15,
+ "alternatives": [
+ "coder"
+ ],
+ "tags": [],
+ "website": "https://github.com/features/codespaces"
+ },
+ {
+ "slug": "coder",
+ "name": "Coder",
+ "category": "DevOps",
+ "is_open_source": true,
+ "description": "Provision software development environments as code on your infrastructure.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coder.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPL-3.0",
+ "website": "https://coder.com"
+ },
+ {
+ "slug": "quickbooks",
+ "name": "QuickBooks",
+ "category": "Financial",
+ "is_open_source": false,
+ "description": "Smart, simple online accounting software for small businesses.",
+ "logo_url": "/logos/quickbooks.svg",
+ "pricing_model": "Paid (Monthly Subscription)",
+ "avg_monthly_cost": 25,
+ "alternatives": [
+ "akaunting",
+ "erpnext"
+ ],
+ "tags": [],
+ "website": "https://quickbooks.intuit.com"
+ },
+ {
+ "slug": "akaunting",
+ "name": "Akaunting",
+ "category": "Financial",
+ "is_open_source": true,
+ "description": "Free and open source online accounting software for small businesses and freelancers.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=akaunting.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "GPL-3.0",
+ "website": "https://akaunting.com"
+ },
+ {
+ "slug": "premiere",
+ "name": "Adobe Premiere Pro",
+ "category": "Creative",
+ "is_open_source": false,
+ "description": "Industry-leading video editing software for film, TV, and the web.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=adobe.com",
+ "pricing_model": "Paid (Creative Cloud)",
+ "avg_monthly_cost": 35,
+ "alternatives": [
+ "kdenlive"
+ ],
+ "tags": [],
+ "website": "https://www.adobe.com/products/premiere.html"
+ },
+ {
+ "slug": "kdenlive",
+ "name": "Kdenlive",
+ "category": "Creative",
+ "is_open_source": true,
+ "description": "Open source video editing software based on the MLT Framework and KDE.",
+ "logo_url": "/logos/kdenlive.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "GPL-3.0",
+ "website": "https://kdenlive.org"
+ },
+ {
+ "slug": "dashlane",
+ "name": "Dashlane",
+ "category": "Security",
+ "is_open_source": false,
+ "description": "Cloud-based password manager and digital wallet.",
+ "logo_url": "/logos/dashlane.svg",
+ "pricing_model": "Paid (Subscription)",
+ "avg_monthly_cost": 8,
+ "alternatives": [
+ "vaultwarden",
+ "bitwarden"
+ ],
+ "tags": [],
+ "website": "https://www.dashlane.com"
+ },
+ {
+ "slug": "vaultwarden",
+ "name": "Vaultwarden",
+ "category": "Security",
+ "is_open_source": true,
+ "description": "Unofficial Bitwarden compatible server written in Rust, formerly known as bitwarden_rs.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=bitwarden.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPL-3.0",
+ "website": "https://github.com/dani-garcia/vaultwarden"
+ },
+ {
+ "slug": "pipedrive",
+ "name": "Pipedrive",
+ "category": "CRM",
+ "is_open_source": false,
+ "description": "Sales CRM & pipeline management software that helps you get more organized.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=pipedrive.com",
+ "pricing_model": "Paid (Seat-based)",
+ "avg_monthly_cost": 15,
+ "alternatives": [
+ "twenty"
+ ],
+ "tags": [],
+ "website": "https://www.pipedrive.com"
+ },
+ {
+ "slug": "twenty",
+ "name": "Twenty",
+ "category": "CRM",
+ "is_open_source": true,
+ "description": "A modern open-source CRM alternative to Salesforce and Pipedrive.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=twenty.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPL-3.0",
+ "website": "https://twenty.com"
+ },
+ {
+ "slug": "sentry",
+ "name": "Sentry",
+ "category": "Monitoring",
+ "is_open_source": false,
+ "description": "Developer-first error tracking and performance monitoring.",
+ "logo_url": "/logos/sentry.svg",
+ "pricing_model": "Paid (Usage-based)",
+ "avg_monthly_cost": 26,
+ "alternatives": [
+ "glitchtip"
+ ],
+ "tags": [],
+ "website": "https://sentry.io"
+ },
+ {
+ "slug": "glitchtip",
+ "name": "GlitchTip",
+ "category": "Monitoring",
+ "is_open_source": true,
+ "description": "Open source error tracking that's compatible with Sentry SDKs.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=glitchtip.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT",
+ "website": "https://glitchtip.com"
+ },
+ {
+ "slug": "calendly",
+ "name": "Calendly",
+ "category": "Productivity",
+ "is_open_source": false,
+ "description": "The modern scheduling platform that makes 'finding time' a breeze.",
+ "logo_url": "/logos/calendly.svg",
+ "pricing_model": "Paid (Seat-based)",
+ "avg_monthly_cost": 10,
+ "alternatives": [
+ "calcom"
+ ],
+ "tags": [],
+ "website": "https://calendly.com"
+ },
+ {
+ "slug": "calcom",
+ "name": "Cal.com",
+ "category": "Productivity",
+ "is_open_source": true,
+ "description": "The open-source Calendly alternative. Take control of your scheduling.",
+ "logo_url": "/logos/calcom.svg",
+ "alternatives": [],
+ "tags": [],
+ "license": "AGPL-3.0",
+ "website": "https://cal.com"
+ },
+ {
+ "slug": "intercom",
+ "name": "Intercom",
+ "category": "Support",
+ "is_open_source": false,
+ "description": "The business messenger that builds real-time connections.",
+ "logo_url": "/logos/intercom.svg",
+ "pricing_model": "Paid (Seat-based)",
+ "avg_monthly_cost": 39,
+ "alternatives": [
+ "chaskiq",
+ "chatwoot"
+ ],
+ "tags": [],
+ "website": "https://www.intercom.com"
+ },
+ {
+ "slug": "chaskiq",
+ "name": "Chaskiq",
+ "category": "Support",
+ "is_open_source": true,
+ "description": "Open source conversational marketing platform alternative to Intercom and Drift.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=chaskiq.io",
+ "alternatives": [],
+ "tags": [],
+ "license": "GPL-3.0",
+ "website": "https://chaskiq.io"
+ },
+ {
+ "slug": "mailgun",
+ "name": "Mailgun",
+ "category": "Marketing",
+ "is_open_source": false,
+ "description": "Electronic mail delivery service for developers.",
+ "logo_url": "/logos/mailgun.svg",
+ "pricing_model": "Paid (Usage-based)",
+ "avg_monthly_cost": 15,
+ "alternatives": [
+ "postal"
+ ],
+ "tags": [],
+ "website": "https://www.mailgun.com"
+ },
+ {
+ "slug": "postal",
+ "name": "Postal",
+ "category": "Marketing",
+ "is_open_source": true,
+ "description": "A fully featured open source mail delivery platform for incoming & outgoing e-mail.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=postalserver.io",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT",
+ "website": "https://postalserver.io"
+ },
+ {
+ "slug": "segment",
+ "name": "Segment",
+ "category": "Marketing",
+ "is_open_source": false,
+ "description": "The leading customer data platform (CDP).",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=segment.com",
+ "pricing_model": "Paid (Usage-based)",
+ "avg_monthly_cost": 120,
+ "alternatives": [
+ "jitsu"
+ ],
+ "tags": [],
+ "website": "https://segment.com"
+ },
+ {
+ "slug": "jitsu",
+ "name": "Jitsu",
+ "category": "Marketing",
+ "is_open_source": true,
+ "description": "High-performance data collection platform and open-source Segment alternative.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jitsu.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT",
+ "website": "https://jitsu.com"
+ },
+ {
+ "slug": "dokku",
+ "name": "Dokku",
+ "category": "DevOps",
+ "is_open_source": true,
+ "description": "A docker-powered PaaS that helps you build and manage the lifecycle of applications",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=dokku.com",
+ "alternatives": [],
+ "tags": [],
+ "license": "MIT License",
+ "website": "https://dokku.com"
+ },
+ {
+ "slug": "chatgpt",
+ "name": "ChatGPT / OpenAI",
+ "category": "AI Models",
+ "is_open_source": false,
+ "description": "The leading commercial AI assistant and API platform (GPT-4o, o1).",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openai.com",
+ "pricing_model": "Paid/Freemium",
+ "avg_monthly_cost": 20,
+ "alternatives": [
+ "llama",
+ "deepseek",
+ "mistral"
+ ],
+ "tags": [
+ "AI",
+ "LLM",
+ "Chat"
+ ],
+ "website": "https://openai.com"
+ },
+ {
+ "slug": "llama",
+ "name": "Meta Llama 3.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "description": "Meta's flagship open-weight model with 128K context. Supports 8B, 70B, and 405B parameters.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=meta.com",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "LLM",
+ "128K Context"
+ ],
+ "license": "Llama 3.1 Community License",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 405,
+ "is_multimodal": false
+ },
+ "website": "https://llama.meta.com"
+ },
+ {
+ "slug": "deepseek",
+ "name": "DeepSeek-V3 / R1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "description": "Powerful open-source models including V3 (671B) and R1 (Reasoning). Rivals GPT-4o and o1.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=deepseek.com",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "LLM",
+ "Reasoning"
+ ],
+ "license": "MIT License",
+ "ai_metadata": {
+ "vram_inference_gb": 160,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 671,
+ "parameters_active_b": 37,
+ "is_multimodal": false
+ },
+ "website": "https://deepseek.com"
+ },
+ {
+ "slug": "mistral",
+ "name": "Mistral Large 2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "description": "Flagship 123B model from Mistral AI. Optimized for multilingual, reasoning, and coding tasks.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mistral.ai",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "LLM",
+ "EU"
+ ],
+ "license": "Mistral Research License",
+ "ai_metadata": {
+ "vram_inference_gb": 80,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 123,
+ "is_multimodal": false
+ },
+ "website": "https://mistral.ai"
+ },
+ {
+ "slug": "gemma",
+ "name": "Google Gemma 2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "description": "Google's open-weight models (9B, 27B) with class-leading performance and efficient architecture.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=google.com",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "LLM",
+ "Google"
+ ],
+ "license": "Gemma License",
+ "ai_metadata": {
+ "vram_inference_gb": 18,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 27,
+ "is_multimodal": false
+ },
+ "website": "https://ai.google.dev/gemma"
+ },
+ {
+ "slug": "qwen",
+ "name": "Qwen 2.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "description": "Comprehensive LLM series from Alibaba Cloud, excelling in coding, math, and multilingual support.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=alibaba.com",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "LLM",
+ "Coding"
+ ],
+ "license": "Apache License 2.0",
+ "ai_metadata": {
+ "vram_inference_gb": 40,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 72,
+ "is_multimodal": false
+ },
+ "website": "https://qwenlm.github.io"
+ },
+ {
+ "slug": "midjourney",
+ "name": "Midjourney",
+ "category": "AI Image Generation",
+ "is_open_source": false,
+ "description": "Leading AI image generation tool, known for artistic and photorealistic outputs.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=midjourney.com",
+ "pricing_model": "Paid (Subscription)",
+ "avg_monthly_cost": 10,
+ "alternatives": [
+ "stable-diffusion",
+ "flux"
+ ],
+ "tags": [
+ "AI",
+ "Image",
+ "Art"
+ ],
+ "website": "https://midjourney.com"
+ },
+ {
+ "slug": "stable-diffusion",
+ "name": "Stable Diffusion 3.5",
+ "category": "AI Image Generation",
+ "is_open_source": true,
+ "description": "The latest open-weights image generation model from Stability AI, offering superior prompt adherence.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=stability.ai",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Image",
+ "Prompt Adherence"
+ ],
+ "license": "Stability Community License",
+ "website": "https://stability.ai"
+ },
+ {
+ "slug": "mochi-1",
+ "name": "Mochi-1",
+ "category": "AI Video Generation",
+ "is_open_source": true,
+ "description": "High-fidelity open-weights video generation model from Genmo, rivaling closed-source alternatives.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=genmo.ai",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Video",
+ "Motion"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://www.genmo.ai"
+ },
+ {
+ "slug": "hunyuan-video",
+ "name": "HunyuanVideo 1.5",
+ "category": "AI Video Generation",
+ "is_open_source": true,
+ "description": "Tencent's state-of-the-art open-source video generation model with 13B parameters.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tencent.com",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Video",
+ "HD"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://github.com/Tencent/HunyuanVideo"
+ },
+ {
+ "slug": "flux",
+ "name": "FLUX",
+ "category": "AI Image Generation",
+ "is_open_source": true,
+ "description": "Next-gen open image generation model from Black Forest Labs. State-of-the-art quality rivaling Midjourney.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=blackforestlabs.ai",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Image",
+ "New"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://blackforestlabs.ai"
+ },
+ {
+ "slug": "github-copilot",
+ "name": "GitHub Copilot",
+ "category": "AI Coding",
+ "is_open_source": false,
+ "description": "AI pair programmer by GitHub/OpenAI. Integrates into VS Code and JetBrains.",
+ "logo_url": "/logos/github-copilot.svg",
+ "pricing_model": "Paid (Subscription)",
+ "avg_monthly_cost": 10,
+ "alternatives": [
+ "continue-dev",
+ "tabby"
+ ],
+ "tags": [
+ "AI",
+ "Coding",
+ "IDE"
+ ],
+ "website": "https://github.com/features/copilot"
+ },
+ {
+ "slug": "continue-dev",
+ "name": "Continue",
+ "category": "AI Coding",
+ "is_open_source": true,
+ "description": "Open-source AI code assistant for VS Code and JetBrains. Use any model (local or API).",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=continue.dev",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Coding",
+ "IDE"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://continue.dev"
+ },
+ {
+ "slug": "tabby",
+ "name": "TabbyML",
+ "category": "AI Coding",
+ "is_open_source": true,
+ "description": "Self-hosted AI coding assistant. An open-source, self-hosted alternative to GitHub Copilot.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tabby.tabbyml.com",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Coding",
+ "Self-Hosted"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://tabby.tabbyml.com"
+ },
+ {
+ "slug": "ollama",
+ "name": "Ollama",
+ "category": "AI Runners",
+ "is_open_source": true,
+ "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models locally.",
+ "logo_url": "/logos/ollama.svg",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Local",
+ "Runner"
+ ],
+ "license": "MIT License",
+ "website": "https://ollama.com"
+ },
+ {
+ "slug": "open-webui",
+ "name": "Open WebUI",
+ "category": "AI Interfaces",
+ "is_open_source": true,
+ "description": "User-friendly WebUI for LLMs (Formerly Ollama WebUI). Supports Ollama and OpenAI-compatible APIs.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openwebui.com",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "UI",
+ "Chat"
+ ],
+ "license": "MIT License",
+ "website": "https://openwebui.com"
+ },
+ {
+ "slug": "jan",
+ "name": "Jan",
+ "category": "AI Interfaces",
+ "is_open_source": true,
+ "description": "Jan is an open source alternative to ChatGPT that runs 100% offline on your computer.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jan.ai",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Desktop",
+ "Offline"
+ ],
+ "license": "AGPL-3.0",
+ "website": "https://jan.ai"
+ },
+ {
+ "slug": "lm-studio",
+ "name": "LM Studio",
+ "category": "AI Runners",
+ "is_open_source": false,
+ "description": "Discover, download, and run local LLMs. Easy GUI for GGUF models.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=lmstudio.ai",
+ "pricing_model": "Free (Proprietary)",
+ "alternatives": [
+ "ollama",
+ "gpt4all"
+ ],
+ "tags": [
+ "AI",
+ "Desktop",
+ "GUI"
+ ],
+ "website": "https://lmstudio.ai"
+ },
+ {
+ "slug": "gpt4all",
+ "name": "GPT4All",
+ "category": "AI Runners",
+ "is_open_source": true,
+ "description": "Run open-source LLMs locally on your CPU and GPU. No internet required.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=gpt4all.io",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Desktop",
+ "CPU"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://gpt4all.io"
+ },
+ {
+ "slug": "localai",
+ "name": "LocalAI",
+ "category": "AI Runners",
+ "is_open_source": true,
+ "description": "The specific build of LocalAI, the free, Open Source OpenAI alternative. Drop-in replacement for OpenAI API.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=localai.io",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "API",
+ "Backend"
+ ],
+ "license": "MIT License",
+ "website": "https://localai.io"
+ },
+ {
+ "slug": "flowise",
+ "name": "Flowise",
+ "category": "AI Tools",
+ "is_open_source": true,
+ "description": "Drag & drop UI to build your customized LLM flow using LangChainJS.",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=flowiseai.com",
+ "alternatives": [],
+ "tags": [
+ "AI",
+ "Low-Code",
+ "LangChain"
+ ],
+ "license": "Apache License 2.0",
+ "website": "https://flowiseai.com"
+ }
+]
\ No newline at end of file
diff --git a/data/tools.json b/data/tools.json
new file mode 100644
index 0000000..ca9a71b
--- /dev/null
+++ b/data/tools.json
@@ -0,0 +1,18080 @@
+[
+ {
+ "slug": "firebase",
+ "name": "Firebase",
+ "category": "Backend as a Service",
+ "is_open_source": false,
+ "pricing_model": "Paid/Freemium",
+ "website": "https://firebase.google.com",
+ "description": "Google's app development platform.",
+ "alternatives": [
+ "supabase",
+ "appwrite",
+ "pocketbase"
+ ],
+ "tags": [
+ "Cloud",
+ "Database",
+ "Auth"
+ ],
+ "logo_url": "/logos/firebase.svg",
+ "avg_monthly_cost": 25,
+ "pros": [
+ "Seamless Google ecosystem integration",
+ "Generous free tier (Spark plan)",
+ "Real-time database out of the box",
+ "Excellent mobile SDK support",
+ "Cloud Functions for serverless logic"
+ ],
+ "cons": [
+ "Vendor lock-in to Google",
+ "Pricing can spike unpredictably at scale",
+ "Limited query capabilities vs SQL"
+ ]
+ },
+ {
+ "slug": "supabase",
+ "name": "Supabase",
+ "category": "Backend as a Service",
+ "is_open_source": true,
+ "github_repo": "supabase/supabase",
+ "stars": 97401,
+ "website": "https://supabase.com",
+ "description": "The Postgres development platform. Supabase gives you a dedicated Postgres database to build your web, mobile, and AI applications.",
+ "pros": [
+ "Postgres under the hood",
+ "No vendor lock-in"
+ ],
+ "cons": [
+ "Self-hosting can be complex"
+ ],
+ "last_commit": "2026-02-09T16:09:10Z",
+ "language": "TypeScript",
+ "license": "Apache License 2.0",
+ "tags": [
+ "Database",
+ "Realtime",
+ "Postgres",
+ "AI"
+ ],
+ "logo_url": "/logos/supabase.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/supabase"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "appwrite",
+ "name": "Appwrite",
+ "category": "Backend as a Service",
+ "is_open_source": true,
+ "github_repo": "appwrite/appwrite",
+ "stars": 54727,
+ "website": "https://appwrite.io",
+ "description": "Appwrite® - complete cloud infrastructure for your web, mobile and AI apps. Including Auth, Databases, Storage, Functions, Messaging, Hosting, Realtime and more",
+ "pros": [
+ "Self-hosted with a single Docker command",
+ "Modular architecture — use only what you need"
+ ],
+ "cons": [
+ "Smaller ecosystem than Firebase or Supabase",
+ "Limited built-in analytics and reporting"
+ ],
+ "last_commit": "2026-02-09T16:12:32Z",
+ "language": "TypeScript",
+ "license": "BSD 3-Clause \"New\" or \"Revised\" License",
+ "tags": [
+ "Database",
+ "Auth",
+ "Self-Hosted"
+ ],
+ "logo_url": "/logos/appwrite.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/appwrite"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "pocketbase",
+ "name": "PocketBase",
+ "category": "Backend as a Service",
+ "is_open_source": true,
+ "github_repo": "pocketbase/pocketbase",
+ "website": "https://pocketbase.io",
+ "description": "Open Source realtime backend in 1 file",
+ "pros": [
+ "Ships as a single binary — no dependencies",
+ "Deploy anywhere in seconds with zero config",
+ "Embedded SQLite with realtime subscriptions"
+ ],
+ "cons": [
+ "SQLite only (for now)"
+ ],
+ "stars": 55980,
+ "last_commit": "2026-02-01T08:09:48Z",
+ "language": "Go",
+ "license": "MIT License",
+ "logo_url": "/logos/pocketbase.svg",
+ "deployment": {
+ "image": "pocketbase/pocketbase:latest",
+ "port": 8090,
+ "volumes": [
+ "./pb_data:/pb/pb_data"
+ ],
+ "command": "serve --http=0.0.0.0:8090",
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/pocketbase"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "salesforce",
+ "name": "Salesforce",
+ "category": "CRM",
+ "is_open_source": false,
+ "pricing_model": "Paid",
+ "avg_monthly_cost": 25,
+ "website": "https://salesforce.com",
+ "description": "The world's #1 CRM.",
+ "alternatives": [
+ "odoo",
+ "erpnext"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=salesforce.com",
+ "pros": [
+ "Industry-leading CRM platform",
+ "Massive app marketplace (AppExchange)",
+ "Highly customizable workflows",
+ "Enterprise-grade security and compliance"
+ ],
+ "cons": [
+ "Expensive per-seat licensing",
+ "Steep learning curve",
+ "Heavy and complex for small teams"
+ ]
+ },
+ {
+ "slug": "slack",
+ "name": "Slack",
+ "category": "Communication",
+ "is_open_source": false,
+ "pricing_model": "Paid/Freemium",
+ "website": "https://slack.com",
+ "description": "Team communication platform.",
+ "alternatives": [
+ "mattermost",
+ "rocketchat"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=slack.com",
+ "avg_monthly_cost": 12,
+ "pros": [
+ "Best-in-class team communication UX",
+ "Huge integration ecosystem (2,000+ apps)",
+ "Powerful search across conversations",
+ "Thread-based discussions reduce noise"
+ ],
+ "cons": [
+ "Expensive at scale ($8.75+/user/mo)",
+ "Can become a constant distraction",
+ "Message history limits on free plan"
+ ]
+ },
+ {
+ "slug": "mattermost",
+ "name": "Mattermost",
+ "category": "Communication",
+ "is_open_source": true,
+ "github_repo": "mattermost/mattermost",
+ "website": "https://mattermost.com",
+ "description": "Mattermost is an open source platform for secure collaboration across the entire software development lifecycle..",
+ "pros": [
+ "Enterprise-grade security with SOC2 and HIPAA compliance",
+ "Granular access control and audit logging",
+ "Slack-compatible webhook and bot ecosystem"
+ ],
+ "cons": [
+ "Self-hosting maintenance"
+ ],
+ "stars": 35213,
+ "last_commit": "2026-02-09T16:03:54Z",
+ "language": "TypeScript",
+ "license": "Other",
+ "logo_url": "/logos/mattermost.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/mattermost"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "rocketchat",
+ "name": "Rocket.Chat",
+ "category": "Communication",
+ "is_open_source": true,
+ "github_repo": "RocketChat/Rocket.Chat",
+ "website": "https://rocket.chat",
+ "description": "The Secure CommsOS™ for mission-critical operations",
+ "pros": [
+ "Unified inbox with omnichannel support for live chat, email, and social",
+ "Highly customizable with white-labeling options",
+ "End-to-end encrypted messaging available"
+ ],
+ "cons": [
+ "Resource intensive"
+ ],
+ "stars": 44546,
+ "last_commit": "2026-02-09T16:20:40Z",
+ "language": "TypeScript",
+ "license": "Other",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=rocket.chat",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/rocketchat"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "jira",
+ "name": "Jira",
+ "category": "Project Management",
+ "is_open_source": false,
+ "pricing_model": "Paid",
+ "avg_monthly_cost": 15,
+ "website": "https://www.atlassian.com/software/jira",
+ "description": "Issue tracking and project management tool.",
+ "alternatives": [
+ "plane",
+ "taiga"
+ ],
+ "logo_url": "/logos/jira.svg",
+ "pros": [
+ "Industry standard for project management",
+ "Deep Agile/Scrum/Kanban support",
+ "Powerful custom workflows and automation",
+ "Extensive integration ecosystem"
+ ],
+ "cons": [
+ "Notoriously complex UI",
+ "Slow performance with large projects",
+ "Expensive for growing teams"
+ ]
+ },
+ {
+ "slug": "plane",
+ "name": "Plane",
+ "category": "Project Management",
+ "is_open_source": true,
+ "github_repo": "makeplane/plane",
+ "website": "https://plane.so",
+ "description": "🔥🔥🔥 Open-source Jira, Linear, Monday, and ClickUp alternative. Plane is a modern project management platform to manage tasks, sprints, docs, and triage.",
+ "pros": [
+ "Clean, modern interface inspired by Linear",
+ "Blazing fast — sub-100ms interactions",
+ "Built-in cycles, modules, and views"
+ ],
+ "cons": [
+ "Still relatively new"
+ ],
+ "stars": 45490,
+ "last_commit": "2026-02-09T13:56:47Z",
+ "language": "TypeScript",
+ "license": "GNU Affero General Public License v3.0",
+ "logo_url": "/logos/plane.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/plane"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "taiga",
+ "name": "Taiga",
+ "category": "Project Management",
+ "is_open_source": true,
+ "github_repo": "taigaio/taiga-back",
+ "website": "https://taiga.io",
+ "description": null,
+ "pros": [
+ "Beautiful, kanban and scrum boards with drag-and-drop",
+ "Full Agile toolkit: epics, sprints, user stories",
+ "Built-in wiki and project documentation"
+ ],
+ "cons": [
+ "Complex setup"
+ ],
+ "stars": 807,
+ "last_commit": "2026-01-09T07:28:59Z",
+ "language": "Python",
+ "license": "Mozilla Public License 2.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=taiga.io",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/taiga"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "zoom",
+ "name": "Zoom",
+ "category": "Communication",
+ "is_open_source": false,
+ "pricing_model": "Paid/Freemium",
+ "avg_monthly_cost": 15,
+ "website": "https://zoom.us",
+ "description": "Video conferencing platform, cloud phone, webinars, and chat.",
+ "alternatives": [
+ "jitsi-meet"
+ ],
+ "logo_url": "/logos/zoom.svg",
+ "pros": [
+ "Reliable video quality even on poor connections",
+ "Easy to join without creating an account",
+ "Breakout rooms and webinar support for large events",
+ "Cross-platform with desktop, mobile, and web apps"
+ ],
+ "cons": [
+ "Free plan limited to 40-minute meetings",
+ "Privacy concerns and past security issues",
+ "Zoom fatigue is real"
+ ]
+ },
+ {
+ "slug": "jitsi-meet",
+ "name": "Jitsi Meet",
+ "category": "Communication",
+ "is_open_source": true,
+ "github_repo": "jitsi/jitsi-meet",
+ "website": "https://jitsi.org",
+ "description": "Jitsi Meet - Secure, Simple and Scalable Video Conferences that you use as a standalone app or embed in your web application.",
+ "pros": [
+ "Join calls without creating an account",
+ "End-to-end encrypted video conferencing",
+ "Scales to hundreds of participants with Jitsi Videobridge"
+ ],
+ "cons": [
+ "Performance on large calls"
+ ],
+ "stars": 28562,
+ "last_commit": "2026-02-09T12:49:10Z",
+ "language": "TypeScript",
+ "license": "Apache License 2.0",
+ "logo_url": "/logos/jitsi-meet.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/jitsi-meet"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "photoshop",
+ "name": "Adobe Photoshop",
+ "category": "Design",
+ "is_open_source": false,
+ "pricing_model": "Paid (Monthly)",
+ "avg_monthly_cost": 60,
+ "website": "https://www.adobe.com/products/photoshop.html",
+ "description": "Industry standard image editing software.",
+ "alternatives": [
+ "gimp",
+ "krita"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.adobe.com",
+ "pros": [
+ "Industry gold standard for image editing",
+ "Unmatched feature depth and precision",
+ "Huge plugin and template ecosystem",
+ "AI-powered generative fill and selection"
+ ],
+ "cons": [
+ "Subscription-only pricing ($22.99/mo)",
+ "Steep learning curve for beginners",
+ "Resource-heavy — needs powerful hardware"
+ ]
+ },
+ {
+ "slug": "gimp",
+ "name": "GIMP",
+ "category": "Design",
+ "is_open_source": true,
+ "github_repo": "GNOME/gimp",
+ "website": "https://www.gimp.org",
+ "description": "Read-only mirror of https://gitlab.gnome.org/GNOME/gimp",
+ "pros": [
+ "Professional-grade photo editing tools rivaling Photoshop",
+ "Extensible with Python and Script-Fu plugins",
+ "Cross-platform with native support for PSD, TIFF, and RAW"
+ ],
+ "cons": [
+ "Steep learning curve",
+ "Dated UI"
+ ],
+ "stars": 5960,
+ "last_commit": "2026-02-09T16:20:25Z",
+ "language": "C",
+ "license": "Other",
+ "logo_url": "/logos/gimp.svg",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "krita",
+ "name": "Krita",
+ "category": "Design",
+ "is_open_source": true,
+ "github_repo": "KDE/krita",
+ "website": "https://krita.org",
+ "description": "Krita is a free and open source cross-platform application that offers an end-to-end solution for creating digital art files from scratch built on the KDE and Qt frameworks.",
+ "pros": [
+ "Modern brush engine with 100+ built-in presets",
+ "HDR painting and animation timeline support",
+ "Optimized for drawing tablets with pressure sensitivity"
+ ],
+ "cons": [
+ "Less focused on photo manipulation"
+ ],
+ "stars": 9333,
+ "last_commit": "2026-02-09T13:47:56Z",
+ "language": "C++",
+ "license": "GNU General Public License v3.0",
+ "logo_url": "/logos/krita.svg",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "figma",
+ "name": "Figma",
+ "category": "Design",
+ "is_open_source": false,
+ "pricing_model": "Freemium/Paid",
+ "website": "https://www.figma.com",
+ "description": "Collaborative interface design tool.",
+ "alternatives": [
+ "penpot"
+ ],
+ "logo_url": "/logos/figma.svg",
+ "avg_monthly_cost": 15,
+ "pros": [
+ "Real-time multiplayer collaboration",
+ "Runs entirely in the browser",
+ "Excellent component and design system support",
+ "Free tier is generous for individuals"
+ ],
+ "cons": [
+ "Owned by Adobe (future pricing concerns)",
+ "Offline support is limited",
+ "Performance with very large files can lag"
+ ]
+ },
+ {
+ "slug": "penpot",
+ "name": "Penpot",
+ "category": "Design",
+ "is_open_source": true,
+ "github_repo": "penpot/penpot",
+ "website": "https://penpot.app",
+ "description": "Penpot: The open-source design tool for design and code collaboration",
+ "pros": [
+ "Runs entirely in the browser — no desktop app needed",
+ "SVG-native design — exports are pixel-perfect at any scale",
+ "Real-time multiplayer collaboration"
+ ],
+ "cons": [
+ "Newer ecosystem"
+ ],
+ "stars": 44155,
+ "last_commit": "2026-02-09T15:47:35Z",
+ "language": "Clojure",
+ "license": "Mozilla Public License 2.0",
+ "logo_url": "/logos/penpot.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/penpot"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "notion",
+ "name": "Notion",
+ "category": "Productivity",
+ "is_open_source": false,
+ "pricing_model": "Freemium/Paid",
+ "website": "https://www.notion.so",
+ "description": "All-in-one workspace.",
+ "alternatives": [
+ "appflowy",
+ "affine"
+ ],
+ "logo_url": "/logos/notion.svg",
+ "avg_monthly_cost": 10,
+ "pros": [
+ "All-in-one workspace (docs, wikis, databases)",
+ "Beautiful and intuitive interface",
+ "Powerful database views and relations",
+ "Great template gallery"
+ ],
+ "cons": [
+ "Can be slow with large workspaces",
+ "Offline mode is unreliable",
+ "No true end-to-end encryption"
+ ]
+ },
+ {
+ "slug": "appflowy",
+ "name": "AppFlowy",
+ "category": "Productivity",
+ "is_open_source": true,
+ "github_repo": "AppFlowy-IO/AppFlowy",
+ "website": "https://www.appflowy.io",
+ "description": "Bring projects, wikis, and teams together with AI. AppFlowy is the AI collaborative workspace where you achieve more without losing control of your data. The leading open source Notion alternative.",
+ "pros": [
+ "Local-first architecture — your data never leaves your machine",
+ "Privacy-focused alternative to Notion",
+ "Built in Rust for native desktop performance"
+ ],
+ "cons": [
+ "No web version (yet)"
+ ],
+ "stars": 68006,
+ "last_commit": "2026-01-28T09:20:38Z",
+ "language": "Dart",
+ "license": "GNU Affero General Public License v3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.appflowy.io",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/appflowy"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "affine",
+ "name": "AFFiNE",
+ "category": "Productivity",
+ "is_open_source": true,
+ "github_repo": "toeverything/AFFiNE",
+ "website": "https://affine.pro",
+ "description": "There can be more than Notion and Miro. AFFiNE(pronounced [ə‘fain]) is a next-gen knowledge base that brings planning, sorting and creating all together. Privacy first, open-source, customizable and ready to use. ",
+ "pros": [
+ "Modern block editor with Notion-like feel",
+ "Spatial canvas for whiteboarding and visual thinking",
+ "Hybrid local-first and cloud sync architecture"
+ ],
+ "cons": [
+ "Still in beta"
+ ],
+ "stars": 62693,
+ "last_commit": "2026-02-09T11:16:50Z",
+ "language": "TypeScript",
+ "license": "Other",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=affine.pro",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/affine"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "google-analytics",
+ "name": "Google Analytics",
+ "category": "Analytics",
+ "is_open_source": false,
+ "pricing_model": "Free/Paid",
+ "website": "https://analytics.google.com",
+ "description": "Web analytics service.",
+ "alternatives": [
+ "plausible",
+ "posthog",
+ "matomo"
+ ],
+ "logo_url": "/logos/google-analytics.svg",
+ "avg_monthly_cost": 150,
+ "pros": [
+ "Industry-standard reporting with Google Ads and Search Console integration",
+ "Advanced audience segmentation and cohort analysis",
+ "Free tier handles up to 10M hits per month"
+ ],
+ "cons": [
+ "Privacy concerns — data goes to Google",
+ "GA4 migration frustrated many users",
+ "Blocked by most ad blockers",
+ "Complex for beginners"
+ ]
+ },
+ {
+ "slug": "plausible",
+ "name": "Plausible",
+ "category": "Analytics",
+ "is_open_source": true,
+ "github_repo": "plausible/analytics",
+ "website": "https://plausible.io",
+ "description": "Simple, open source, lightweight and privacy-friendly web analytics alternative to Google Analytics.",
+ "pros": [
+ "Fully GDPR compliant with no cookies required",
+ "Lightweight script under 1KB — zero impact on page speed",
+ "Clean dashboard that shows what matters, nothing more"
+ ],
+ "cons": [
+ "Limited advanced features"
+ ],
+ "stars": 24198,
+ "last_commit": "2026-02-09T16:20:52Z",
+ "language": "Elixir",
+ "license": "GNU Affero General Public License v3.0",
+ "tags": [
+ "Analytics",
+ "Privacy",
+ "GDPR"
+ ],
+ "logo_url": "/logos/plausible.svg",
+ "deployment": {
+ "image": "plausible/analytics:latest",
+ "port": 8000,
+ "env": [
+ {
+ "key": "BASE_URL",
+ "value": "http://localhost:8000"
+ },
+ {
+ "key": "SECRET_KEY_BASE",
+ "value": "REPLACE_WITH_RANDOM_STRING"
+ }
+ ],
+ "volumes": [
+ "./plausible_db:/var/lib/clickhouse",
+ "./plausible_events:/var/lib/postgresql/data"
+ ],
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/plausible"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "posthog",
+ "name": "PostHog",
+ "category": "Analytics",
+ "is_open_source": true,
+ "github_repo": "PostHog/posthog",
+ "website": "https://posthog.com",
+ "description": "🦔 PostHog is an all-in-one developer platform for building successful products. We offer product analytics, web analytics, session replay, error tracking, feature flags, experimentation, surveys, data warehouse, a CDP, and an AI product assistant to help debug your code, ship features faster, and keep all your usage and customer data in one stack.",
+ "pros": [
+ "Session recording with heatmaps and click tracking",
+ "Built-in feature flags, A/B testing, and surveys",
+ "Warehouse-native — query your data with SQL"
+ ],
+ "cons": [
+ "Complex to self-host"
+ ],
+ "stars": 31181,
+ "last_commit": "2026-02-09T16:25:10Z",
+ "language": "Python",
+ "license": "Other",
+ "logo_url": "/logos/posthog.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/posthog"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "matomo",
+ "name": "Matomo",
+ "category": "Analytics",
+ "is_open_source": true,
+ "github_repo": "matomo-org/matomo",
+ "website": "https://matomo.org",
+ "description": "Empowering People Ethically 🚀 — Matomo is hiring! Join us → https://matomo.org/jobs Matomo is the leading open-source alternative to Google Analytics, giving you complete control and built-in privacy. Easily collect, visualise, and analyse data from websites & apps. Star us on GitHub ⭐️ – Pull Requests welcome! ",
+ "pros": [
+ "Feature-rich analytics rivaling Google Analytics",
+ "GDPR and CCPA compliant out of the box",
+ "Heatmaps, session recordings, and funnel analysis included"
+ ],
+ "cons": [
+ "UI feels dated"
+ ],
+ "stars": 21270,
+ "last_commit": "2026-02-09T15:36:30Z",
+ "language": "PHP",
+ "license": "GNU General Public License v3.0",
+ "logo_url": "/logos/matomo.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/matomo"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "1password",
+ "name": "1Password",
+ "category": "Security",
+ "is_open_source": false,
+ "pricing_model": "Paid",
+ "website": "https://1password.com",
+ "description": "Password manager.",
+ "alternatives": [
+ "bitwarden",
+ "keepassxc"
+ ],
+ "logo_url": "/logos/1password.svg",
+ "avg_monthly_cost": 8,
+ "pros": [
+ "Excellent cross-platform support",
+ "Travel Mode hides sensitive vaults",
+ "Watchtower alerts for compromised passwords",
+ "Family and team sharing built in"
+ ],
+ "cons": [
+ "No free tier ($2.99/mo minimum)",
+ "Cloud-only — no self-hosting option",
+ "Subscription model with no lifetime option"
+ ]
+ },
+ {
+ "slug": "bitwarden",
+ "name": "Bitwarden",
+ "category": "Security",
+ "is_open_source": true,
+ "github_repo": "bitwarden/server",
+ "website": "https://bitwarden.com",
+ "description": "Bitwarden infrastructure/backend (API, database, Docker, etc).",
+ "pros": [
+ "Independently audited security with full transparency reports",
+ "Cross-platform apps for every OS, browser, and device",
+ "Organization vaults with fine-grained sharing controls"
+ ],
+ "cons": [
+ "UI is functional but basic"
+ ],
+ "stars": 18027,
+ "last_commit": "2026-02-09T15:52:04Z",
+ "language": "C#",
+ "license": "Other",
+ "logo_url": "/logos/bitwarden.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/bitwarden"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "keepassxc",
+ "name": "KeePassXC",
+ "category": "Security",
+ "is_open_source": true,
+ "github_repo": "keepassxreboot/keepassxc",
+ "website": "https://keepassxc.org",
+ "description": "KeePassXC is a cross-platform community-driven port of the Windows application “KeePass Password Safe”.",
+ "pros": [
+ "Fully offline — database stored locally with AES-256 encryption",
+ "No cloud dependency — you control the sync method",
+ "Browser integration via KeePassXC-Browser extension"
+ ],
+ "cons": [
+ "No automatic sync (requires Dropbox/Syncthing)"
+ ],
+ "stars": 25810,
+ "last_commit": "2026-01-18T15:46:48Z",
+ "language": "C++",
+ "license": "Other",
+ "logo_url": "/logos/keepassxc.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/keepassxc"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "heroku",
+ "name": "Heroku",
+ "category": "DevOps",
+ "is_open_source": false,
+ "pricing_model": "Paid",
+ "avg_monthly_cost": 8,
+ "website": "https://heroku.com",
+ "description": "Platform as a service.",
+ "alternatives": [
+ "coolify",
+ "dokku"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=heroku.com",
+ "pros": [
+ "Dead-simple deployment (git push)",
+ "Great for prototypes and MVPs",
+ "Managed Postgres included",
+ "Add-ons marketplace for common services"
+ ],
+ "cons": [
+ "Eliminated free tier in 2022",
+ "Expensive at scale vs VPS",
+ "Limited container customization",
+ "Owned by Salesforce (less innovation)"
+ ]
+ },
+ {
+ "slug": "coolify",
+ "name": "Coolify",
+ "category": "DevOps",
+ "is_open_source": true,
+ "github_repo": "coollabsio/coolify",
+ "website": "https://coolify.io",
+ "description": "An open-source, self-hostable PaaS alternative to Vercel, Heroku & Netlify that lets you easily deploy static sites, databases, full-stack applications and 280+ one-click services on your own servers.",
+ "pros": [
+ "Polished, beautiful dashboard that rivals Vercel and Netlify",
+ "Deploy anything — Docker, static sites, databases, services",
+ "Automatic SSL, backups, and monitoring included"
+ ],
+ "cons": [
+ "One-man project (mostly)"
+ ],
+ "stars": 50421,
+ "last_commit": "2026-02-09T16:01:12Z",
+ "language": "PHP",
+ "license": "Apache License 2.0",
+ "tags": [
+ "DevOps",
+ "PaaS",
+ "Self-Hosted"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coolify.io",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/coolify"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "sap",
+ "name": "SAP S/4HANA",
+ "category": "ERP",
+ "is_open_source": false,
+ "pricing_model": "Paid (Enterprise)",
+ "avg_monthly_cost": 100,
+ "website": "https://www.sap.com",
+ "description": "The world leader in enterprise resource planning software.",
+ "alternatives": [
+ "odoo",
+ "erpnext"
+ ],
+ "logo_url": "/logos/sap.svg",
+ "pros": [
+ "Enterprise ERP market leader",
+ "Handles massive organizational complexity",
+ "Deep industry-specific solutions",
+ "Strong compliance and audit trails"
+ ],
+ "cons": [
+ "Extremely expensive to implement",
+ "Implementation takes months to years",
+ "Requires specialized consultants",
+ "Overkill for SMBs"
+ ]
+ },
+ {
+ "slug": "odoo",
+ "name": "Odoo",
+ "category": "ERP",
+ "is_open_source": true,
+ "github_repo": "odoo/odoo",
+ "stars": 48919,
+ "website": "https://www.odoo.com",
+ "description": "A suite of open source business apps: CRM, eCommerce, accounting, manufacturing, warehouse, and more.",
+ "pros": [
+ "All-in-one suite covering CRM, HR, inventory, and accounting",
+ "Modular app marketplace with 30,000+ extensions",
+ "Dual licensing — Community (free) and Enterprise"
+ ],
+ "cons": [
+ "Can be complex to customize",
+ "Enterprise features are paid"
+ ],
+ "last_commit": "2026-02-09T16:18:46Z",
+ "language": "Python",
+ "license": "LGPL-3.0",
+ "logo_url": "/logos/odoo.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/odoo"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "erpnext",
+ "name": "ERPNext",
+ "category": "ERP",
+ "is_open_source": true,
+ "github_repo": "frappe/erpnext",
+ "website": "https://erpnext.com",
+ "description": "A free and open-source integrated Enterprise Resource Planning (ERP) software.",
+ "pros": [
+ "Fully open source",
+ "No licensing fees"
+ ],
+ "cons": [
+ "Steep learning curve"
+ ],
+ "stars": 31635,
+ "last_commit": "2026-02-09T15:52:29Z",
+ "language": "Python",
+ "license": "GNU General Public License v3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=erpnext.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/erpnext"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "autocad",
+ "name": "AutoCAD",
+ "category": "CAD",
+ "is_open_source": false,
+ "pricing_model": "Paid (Subscription)",
+ "website": "https://www.autodesk.com/products/autocad",
+ "description": "Professional computer-aided design (CAD) and drafting software.",
+ "alternatives": [
+ "librecad",
+ "freecad"
+ ],
+ "logo_url": "/logos/autocad.svg",
+ "avg_monthly_cost": 75,
+ "pros": [
+ "Industry standard for CAD/engineering",
+ "Precise 2D and 3D modeling",
+ "Extensive library of tools and templates",
+ "Strong file format compatibility"
+ ],
+ "cons": [
+ "Expensive subscription ($1,975/yr)",
+ "Steep learning curve",
+ "Resource-intensive — needs workstation hardware"
+ ]
+ },
+ {
+ "slug": "librecad",
+ "name": "LibreCAD",
+ "category": "CAD",
+ "is_open_source": true,
+ "github_repo": "LibreCAD/LibreCAD",
+ "stars": 6500,
+ "website": "https://librecad.org",
+ "description": "A mature, feature-rich 2D CAD application with a loyal user community.",
+ "pros": [
+ "Purpose-built lightweight 2D CAD application",
+ "Native DXF support for industry-standard file exchange",
+ "Cross-platform with minimal system requirements"
+ ],
+ "cons": [
+ "2D only"
+ ],
+ "last_commit": "2026-02-05T10:00:00Z",
+ "language": "C++",
+ "license": "GPLv2",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=librecad.org",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "freecad",
+ "name": "FreeCAD",
+ "category": "CAD",
+ "is_open_source": true,
+ "github_repo": "FreeCAD/FreeCAD",
+ "stars": 21000,
+ "website": "https://www.freecad.org",
+ "description": "A general-purpose parametric 3D CAD modeler and a BIM software application.",
+ "pros": [
+ "Full parametric 3D modeling with constraint-based sketcher",
+ "Extensible 3D capabilities for mechanical engineering, architecture, and BIM",
+ "Python scripting and macro system for automation"
+ ],
+ "cons": [
+ "UI learning curve"
+ ],
+ "last_commit": "2026-02-08T14:00:00Z",
+ "language": "C++",
+ "license": "LGPLv2+",
+ "logo_url": "/logos/freecad.svg",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "zapier",
+ "name": "Zapier",
+ "category": "Automation",
+ "is_open_source": false,
+ "pricing_model": "Paid (Task-based)",
+ "website": "https://zapier.com",
+ "description": "The pioneer in workflow automation for everyone.",
+ "alternatives": [
+ "n8n",
+ "activepieces"
+ ],
+ "logo_url": "/logos/zapier.svg",
+ "avg_monthly_cost": 20,
+ "pros": [
+ "Connect 6,000+ apps without code",
+ "Easy visual workflow builder",
+ "Reliable trigger-based automation",
+ "Good for non-technical users"
+ ],
+ "cons": [
+ "Gets expensive fast (per-task pricing)",
+ "Limited logic and branching on lower tiers",
+ "5-minute polling delay on some triggers"
+ ]
+ },
+ {
+ "slug": "n8n",
+ "name": "n8n",
+ "category": "Automation",
+ "is_open_source": true,
+ "github_repo": "n8n-io/n8n",
+ "stars": 49000,
+ "website": "https://n8n.io",
+ "description": "Fair-code workflow automation tool. Easily automate tasks across different services.",
+ "pros": [
+ "Self-hosted workflow automation with 400+ integrations",
+ "Visual node-based editor for complex multi-step workflows",
+ "JavaScript/Python code nodes for custom logic"
+ ],
+ "cons": [
+ "Requires hosting knowledge"
+ ],
+ "last_commit": "2026-02-09T15:00:00Z",
+ "language": "TypeScript",
+ "license": "Sustainable Use License",
+ "logo_url": "/logos/n8n.svg",
+ "deployment": {
+ "image": "n8nio/n8n",
+ "port": 5678,
+ "env": [
+ {
+ "key": "N8N_BASIC_AUTH_ACTIVE",
+ "value": "true"
+ },
+ {
+ "key": "N8N_BASIC_AUTH_USER",
+ "value": "admin"
+ },
+ {
+ "key": "N8N_BASIC_AUTH_PASSWORD",
+ "value": "password"
+ }
+ ],
+ "volumes": [
+ "./n8n_data:/home/node/.n8n"
+ ],
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/n8n"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "activepieces",
+ "name": "Activepieces",
+ "category": "Automation",
+ "is_open_source": true,
+ "github_repo": "activepieces/activepieces",
+ "stars": 11000,
+ "website": "https://www.activepieces.com",
+ "description": "Open source alternative to Zapier. Automate your work with 200+ apps.",
+ "pros": [
+ "Beginner-friendly UI with a low learning curve",
+ "Open-source and self-hostable with Docker",
+ "Growing library of community-built connectors"
+ ],
+ "cons": [
+ "Smaller connector library than Zapier"
+ ],
+ "last_commit": "2026-02-09T16:00:00Z",
+ "language": "TypeScript",
+ "license": "MIT",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=activepieces.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/activepieces"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "tableau",
+ "name": "Tableau",
+ "category": "Analytics",
+ "is_open_source": false,
+ "pricing_model": "Paid (Seat-based)",
+ "avg_monthly_cost": 70,
+ "website": "https://www.tableau.com",
+ "description": "Powerful data visualization and business intelligence platform.",
+ "alternatives": [
+ "metabase",
+ "superset"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tableau.com",
+ "pros": [
+ "Best-in-class data visualization",
+ "Drag-and-drop dashboard creation",
+ "Handles massive datasets well",
+ "Strong community and learning resources"
+ ],
+ "cons": [
+ "Expensive licensing ($70+/user/mo)",
+ "Requires a data warehouse setup",
+ "Desktop app feels dated"
+ ]
+ },
+ {
+ "slug": "metabase",
+ "name": "Metabase",
+ "category": "Analytics",
+ "is_open_source": true,
+ "github_repo": "metabase/metabase",
+ "stars": 38000,
+ "website": "https://www.metabase.com",
+ "description": "The simplest, fastest way to get business intelligence and analytics throughout your company.",
+ "pros": [
+ "Extremely user friendly",
+ "Easy query builder"
+ ],
+ "cons": [
+ "Advanced visualizations can be limited"
+ ],
+ "last_commit": "2026-02-09T14:30:00Z",
+ "language": "Clojure",
+ "license": "AGPLv3",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=metabase.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/metabase"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "superset",
+ "name": "Apache Superset",
+ "category": "Analytics",
+ "is_open_source": true,
+ "github_repo": "apache/superset",
+ "stars": 59000,
+ "website": "https://superset.apache.org",
+ "description": "Enterprise-ready business intelligence web application.",
+ "pros": [
+ "Scaling to petabytes",
+ "Huge variety of charts"
+ ],
+ "cons": [
+ "Complex configuration"
+ ],
+ "last_commit": "2026-02-09T12:00:00Z",
+ "language": "Python",
+ "license": "Apache 2.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=superset.apache.org",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/superset"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "auth0",
+ "name": "Auth0",
+ "category": "Security",
+ "is_open_source": false,
+ "pricing_model": "Paid (MAU-based)",
+ "website": "https://auth0.com",
+ "description": "The leading authentication and authorization platform.",
+ "alternatives": [
+ "keycloak",
+ "authentik"
+ ],
+ "logo_url": "/logos/auth0.svg",
+ "avg_monthly_cost": 23,
+ "pros": [
+ "Feature-rich authentication platform",
+ "Social login, MFA, and SSO out of the box",
+ "Extensive SDK support across languages",
+ "Rules and hooks for custom auth logic"
+ ],
+ "cons": [
+ "Pricing jumps sharply after free tier",
+ "Can be complex to configure properly",
+ "Owned by Okta — consolidation concerns"
+ ]
+ },
+ {
+ "slug": "keycloak",
+ "name": "Keycloak",
+ "category": "Security",
+ "is_open_source": true,
+ "github_repo": "keycloak/keycloak",
+ "stars": 23000,
+ "website": "https://www.keycloak.org",
+ "description": "Open source identity and access management for modern applications and services.",
+ "pros": [
+ "Enterprise-standard identity provider supporting SAML and OIDC",
+ "Federated identity with social login and LDAP integration",
+ "Battle-tested by Red Hat in production environments"
+ ],
+ "cons": [
+ "UI can be clunky",
+ "Heavy resource usage"
+ ],
+ "last_commit": "2026-02-09T16:30:00Z",
+ "language": "Java",
+ "license": "Apache 2.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=keycloak.org",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/keycloak"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "okta",
+ "name": "Okta",
+ "category": "Security",
+ "is_open_source": false,
+ "pricing_model": "Paid (User-based)",
+ "website": "https://okta.com",
+ "description": "The World's Identity Company, providing enterprise-grade IAM.",
+ "alternatives": [
+ "authentik",
+ "keycloak"
+ ],
+ "logo_url": "/logos/okta.svg",
+ "avg_monthly_cost": 6,
+ "pros": [
+ "Enterprise SSO and identity management leader",
+ "Strong security and compliance certifications",
+ "Universal directory for user management",
+ "Extensive pre-built integrations"
+ ],
+ "cons": [
+ "Very expensive for small teams",
+ "Admin interface has a learning curve",
+ "Overkill for simple auth needs"
+ ]
+ },
+ {
+ "slug": "authentik",
+ "name": "Authentik",
+ "category": "Security",
+ "is_open_source": true,
+ "github_repo": "goauthentik/authentik",
+ "stars": 15000,
+ "website": "https://goauthentik.io",
+ "description": "The overall-best open-source identity provider, focused on flexibility and versatility.",
+ "pros": [
+ "Modern, intuitive admin interface with drag-and-drop flows",
+ "Easy customization of login pages and branding",
+ "Supports SAML, OAuth2, LDAP proxy, and SCIM"
+ ],
+ "cons": [
+ "Smaller community than Keycloak"
+ ],
+ "last_commit": "2026-02-09T17:00:00Z",
+ "language": "Python",
+ "license": "MIT",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=goauthentik.io",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/authentik"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "s3",
+ "name": "Amazon S3",
+ "category": "Cloud Infrastructure",
+ "is_open_source": false,
+ "pricing_model": "Paid (Usage-based)",
+ "website": "https://aws.amazon.com/s3",
+ "description": "Object storage built to retrieve any amount of data from anywhere.",
+ "alternatives": [
+ "minio"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=aws.amazon.com",
+ "avg_monthly_cost": 23,
+ "pros": [
+ "99.999999999% durability (11 nines)",
+ "Scales to virtually unlimited storage",
+ "Pay only for what you use",
+ "Industry standard — everything integrates with it"
+ ],
+ "cons": [
+ "Egress costs can surprise you",
+ "Complex IAM/bucket policy configuration",
+ "Vendor lock-in to AWS ecosystem"
+ ]
+ },
+ {
+ "slug": "minio",
+ "name": "MinIO",
+ "category": "Cloud Infrastructure",
+ "is_open_source": true,
+ "github_repo": "minio/minio",
+ "stars": 45000,
+ "website": "https://min.io",
+ "description": "High-performance, S3-compatible object storage for AI and enterprise data.",
+ "pros": [
+ "S3-compatible API — drop-in replacement for AWS S3",
+ "Extremely fast object storage optimized for AI/ML workloads",
+ "Kubernetes-native with operator support"
+ ],
+ "cons": [
+ "AGPL license can be strict"
+ ],
+ "last_commit": "2026-02-09T14:00:00Z",
+ "language": "Go",
+ "license": "AGPLv3",
+ "logo_url": "/logos/minio.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/minio"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "zendesk",
+ "name": "Zendesk",
+ "category": "Support",
+ "is_open_source": false,
+ "pricing_model": "Paid (Agent-based)",
+ "avg_monthly_cost": 19,
+ "website": "https://www.zendesk.com",
+ "description": "The leader in customer service and engagement software.",
+ "alternatives": [
+ "zammad"
+ ],
+ "logo_url": "/logos/zendesk.svg",
+ "pros": [
+ "Comprehensive customer support platform",
+ "Omnichannel support (email, chat, phone)",
+ "Powerful ticket management and routing",
+ "Large marketplace of integrations"
+ ],
+ "cons": [
+ "Expensive per-agent pricing",
+ "UI can feel bloated and slow",
+ "Basic plans lack important features"
+ ]
+ },
+ {
+ "slug": "zammad",
+ "name": "Zammad",
+ "category": "Support",
+ "is_open_source": true,
+ "github_repo": "zammad/zammad",
+ "stars": 5000,
+ "website": "https://zammad.org",
+ "description": "A web-based, open source helpdesk/customer support system with many features.",
+ "pros": [
+ "Omnichannel helpdesk with email, phone, chat, and social media",
+ "Full-text search with Elasticsearch integration",
+ "Customizable ticket workflows and SLA management"
+ ],
+ "cons": [
+ "Ruby hosting can be tricky"
+ ],
+ "last_commit": "2026-02-09T11:00:00Z",
+ "language": "Ruby",
+ "license": "AGPLv3",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=zammad.org",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/zammad"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "workday",
+ "name": "Workday",
+ "category": "HR",
+ "is_open_source": false,
+ "pricing_model": "Paid (Enterprise)",
+ "avg_monthly_cost": 45,
+ "website": "https://www.workday.com",
+ "description": "Enterprise management cloud for finance and human resources.",
+ "alternatives": [
+ "orangehrm"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=workday.com",
+ "pros": [
+ "Leading cloud HR and finance platform",
+ "Strong workforce analytics",
+ "Regular feature updates included",
+ "Built for enterprise compliance"
+ ],
+ "cons": [
+ "Extremely expensive to implement",
+ "Long implementation timelines",
+ "Complex for smaller organizations"
+ ]
+ },
+ {
+ "slug": "orangehrm",
+ "name": "OrangeHRM",
+ "category": "HR",
+ "is_open_source": true,
+ "github_repo": "orangehrm/orangehrm",
+ "stars": 1200,
+ "website": "https://www.orangehrm.com",
+ "description": "The world's most popular open source human resource management software.",
+ "pros": [
+ "Comprehensive HR suite covering recruitment, leave, and performance",
+ "Highly customizable with module-based architecture",
+ "Employee self-service portal for time-off and expenses"
+ ],
+ "cons": [
+ "UI feels a bit dated",
+ "Enterprise features are paid"
+ ],
+ "last_commit": "2026-02-09T10:00:00Z",
+ "language": "PHP",
+ "license": "GPLv2",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=orangehrm.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/orangehrm"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "m365",
+ "name": "Microsoft 365",
+ "category": "Productivity",
+ "is_open_source": false,
+ "pricing_model": "Paid (Subscription)",
+ "website": "https://www.office.com",
+ "description": "The world's most popular office suite and cloud collaboration platform.",
+ "alternatives": [
+ "onlyoffice"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=office.com",
+ "avg_monthly_cost": 12,
+ "pros": [
+ "Full productivity suite (Word, Excel, Teams)",
+ "Deep enterprise integration",
+ "1TB OneDrive storage included",
+ "Regular AI feature updates (Copilot)"
+ ],
+ "cons": [
+ "Subscription fatigue — perpetual payments",
+ "Teams can be resource-heavy",
+ "Complex licensing tiers"
+ ]
+ },
+ {
+ "slug": "onlyoffice",
+ "name": "ONLYOFFICE",
+ "category": "Productivity",
+ "is_open_source": true,
+ "github_repo": "ONLYOFFICE/DocumentServer",
+ "stars": 11000,
+ "website": "https://www.onlyoffice.com",
+ "description": "Powerful online document editors for text, spreadsheets, and presentations. Highly compatible with MS Office.",
+ "pros": [
+ "Full-featured collaborative editing for docs, sheets, and slides",
+ "Drop-in MS Office compatibility with high-fidelity rendering",
+ "Self-hosted integration with Nextcloud, Seafile, and more"
+ ],
+ "cons": [
+ "Self-hosting can be complex"
+ ],
+ "last_commit": "2026-02-09T15:30:00Z",
+ "language": "JavaScript",
+ "license": "AGPLv3",
+ "logo_url": "/logos/onlyoffice.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/onlyoffice"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "shopify",
+ "name": "Shopify",
+ "category": "E-commerce",
+ "is_open_source": false,
+ "pricing_model": "Paid (Subscription)",
+ "website": "https://www.shopify.com",
+ "description": "Commerical platform that allows anyone to set up an online store.",
+ "alternatives": [
+ "medusa"
+ ],
+ "logo_url": "/logos/shopify.svg",
+ "avg_monthly_cost": 39,
+ "pros": [
+ "Easiest way to start selling online",
+ "Beautiful themes and fast checkout",
+ "Apps for almost any e-commerce need",
+ "Handles payments, shipping, and taxes"
+ ],
+ "cons": [
+ "Transaction fees unless using Shopify Payments",
+ "Monthly costs add up with apps",
+ "Limited customization vs self-hosted solutions"
+ ]
+ },
+ {
+ "slug": "medusa",
+ "name": "Medusa.js",
+ "category": "E-commerce",
+ "is_open_source": true,
+ "github_repo": "medusajs/medusa",
+ "stars": 24000,
+ "website": "https://medusajs.com",
+ "description": "The open-source alternative to Shopify. Building blocks for digital commerce.",
+ "pros": [
+ "Headless commerce with extreme flexibility for custom storefronts",
+ "Plugin architecture for payments, fulfillment, and CMS",
+ "Multi-region and multi-currency support built in"
+ ],
+ "cons": [
+ "Requires developer knowledge"
+ ],
+ "last_commit": "2026-02-09T16:45:00Z",
+ "language": "TypeScript",
+ "license": "MIT",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=medusajs.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/medusa"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "docusign",
+ "name": "DocuSign",
+ "category": "Legal",
+ "is_open_source": false,
+ "pricing_model": "Paid (Envelope-based)",
+ "website": "https://www.docusign.com",
+ "description": "The world's #1 way to sign electronically on practically any device, from almost anywhere, at any time.",
+ "alternatives": [
+ "documenso"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=docusign.com",
+ "avg_monthly_cost": 25,
+ "pros": [
+ "Industry standard for e-signatures",
+ "Legally binding in most countries",
+ "Workflow automation for document routing",
+ "Strong mobile experience"
+ ],
+ "cons": [
+ "Expensive for occasional use",
+ "UI feels dated compared to competitors",
+ "Limited free tier"
+ ]
+ },
+ {
+ "slug": "documenso",
+ "name": "Documenso",
+ "category": "Legal",
+ "is_open_source": true,
+ "github_repo": "documenso/documenso",
+ "stars": 8000,
+ "website": "https://documenso.com",
+ "description": "The open-source DocuSign alternative. We aim to be the world's most trusted document signing platform.",
+ "pros": [
+ "Self-hosted digital signatures with full audit trail",
+ "Developer-friendly API and webhook integration",
+ "Beautiful, modern signing experience"
+ ],
+ "cons": [
+ "Newer ecosystem"
+ ],
+ "last_commit": "2026-02-10T09:00:00Z",
+ "language": "TypeScript",
+ "license": "AGPL-3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=documenso.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/documenso"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "mailchimp",
+ "name": "Mailchimp",
+ "category": "Marketing",
+ "is_open_source": false,
+ "pricing_model": "Paid (Contact-based)",
+ "website": "https://mailchimp.com",
+ "description": "All-in-one marketing platform that helps you manage and talk to your clients, customers, and other interested parties.",
+ "alternatives": [
+ "listmonk",
+ "mautic"
+ ],
+ "logo_url": "/logos/mailchimp.svg",
+ "avg_monthly_cost": 13,
+ "pros": [
+ "Beginner-friendly email marketing",
+ "Good free tier for small lists",
+ "Built-in landing page builder",
+ "Detailed campaign analytics"
+ ],
+ "cons": [
+ "Pricing increases steeply with list size",
+ "Owned by Intuit (less indie-friendly)",
+ "Template editor is limiting"
+ ]
+ },
+ {
+ "slug": "listmonk",
+ "name": "Listmonk",
+ "category": "Marketing",
+ "is_open_source": true,
+ "github_repo": "knadh/listmonk",
+ "stars": 19000,
+ "website": "https://listmonk.app",
+ "description": "High performance, self-hosted newsletter and mailing list manager with a modern dashboard.",
+ "pros": [
+ "Handles millions of subscribers with blazing fast performance",
+ "Templating engine with rich media and personalization",
+ "Manages bounces, unsubscribes, and analytics automatically"
+ ],
+ "cons": [
+ "No built-in sending (needs SMTP/SES)"
+ ],
+ "last_commit": "2026-02-05T12:00:00Z",
+ "language": "Go",
+ "license": "AGPL-3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=listmonk.app",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/listmonk"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "mautic",
+ "name": "Mautic",
+ "category": "Marketing",
+ "is_open_source": true,
+ "github_repo": "mautic/mautic",
+ "stars": 7000,
+ "website": "https://www.mautic.org",
+ "description": "World's largest open source marketing automation project.",
+ "pros": [
+ "Full marketing automation with CRM-grade contact management",
+ "Visual campaign builder with multi-channel triggers",
+ "Email, SMS, and social media campaign support"
+ ],
+ "cons": [
+ "Complex setup and maintenance"
+ ],
+ "last_commit": "2026-02-09T18:00:00Z",
+ "language": "PHP",
+ "license": "GPL-3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mautic.org",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/mautic"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "statuspage",
+ "name": "Statuspage",
+ "category": "Monitoring",
+ "is_open_source": false,
+ "pricing_model": "Paid (Atlassian)",
+ "website": "https://www.atlassian.com/software/statuspage",
+ "description": "The best way to communicate status and downtime to your customers.",
+ "alternatives": [
+ "uptime-kuma"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=atlassian.com",
+ "avg_monthly_cost": 29,
+ "pros": [
+ "Clean, professional status pages",
+ "Integrated incident management",
+ "Email/SMS subscriber notifications",
+ "Atlassian ecosystem integration"
+ ],
+ "cons": [
+ "Expensive for what it does ($29+/mo)",
+ "Limited customization options",
+ "Overkill if you just need a simple status page"
+ ]
+ },
+ {
+ "slug": "uptime-kuma",
+ "name": "Uptime Kuma",
+ "category": "Monitoring",
+ "is_open_source": true,
+ "github_repo": "louislam/uptime-kuma",
+ "stars": 55000,
+ "website": "https://uptime.kuma.pet",
+ "description": "A fancy self-hosted monitoring tool.",
+ "pros": [
+ "Beautiful, real-time monitoring dashboard",
+ "Multi-protocol support: HTTP, TCP, DNS, Docker, and more",
+ "Notification integrations with 90+ services including Slack, Discord, and Telegram"
+ ],
+ "cons": [
+ "Self-hosted only (usually)"
+ ],
+ "last_commit": "2026-02-10T08:00:00Z",
+ "language": "JavaScript",
+ "license": "MIT",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=uptime.kuma.pet",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/uptime-kuma"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "datadog",
+ "name": "Datadog",
+ "category": "Monitoring",
+ "is_open_source": false,
+ "pricing_model": "Paid (Usage-based)",
+ "website": "https://www.datadoghq.com",
+ "description": "Modern monitoring and security that gives you full visibility into your applications and infrastructure.",
+ "alternatives": [
+ "signoz"
+ ],
+ "logo_url": "/logos/datadog.svg",
+ "avg_monthly_cost": 23,
+ "pros": [
+ "Comprehensive observability platform",
+ "APM, logs, metrics in one place",
+ "Excellent dashboards and alerting",
+ "Supports 750+ integrations"
+ ],
+ "cons": [
+ "Notoriously expensive at scale",
+ "Complex pricing model (per host, per GB)",
+ "Can become a significant budget item"
+ ]
+ },
+ {
+ "slug": "signoz",
+ "name": "SigNoz",
+ "category": "Monitoring",
+ "is_open_source": true,
+ "github_repo": "signoz/signoz",
+ "stars": 18000,
+ "website": "https://signoz.io",
+ "description": "Open source observability platform. SigNoz helps developers monitor applications and troubleshoot problems.",
+ "pros": [
+ "Unified metrics, traces, and logs in a single platform",
+ "OpenTelemetry native — no proprietary agents required",
+ "ClickHouse-powered for fast queries at scale"
+ ],
+ "cons": [
+ "High resource usage (ClickHouse)"
+ ],
+ "last_commit": "2026-02-09T20:00:00Z",
+ "language": "Go",
+ "license": "MIT",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=signoz.io",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/signoz"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "typeform",
+ "name": "Typeform",
+ "category": "Productivity",
+ "is_open_source": false,
+ "pricing_model": "Paid (Response-based)",
+ "website": "https://www.typeform.com",
+ "description": "Build beautiful, interactive forms, surveys, quizzes, and something else entirely.",
+ "alternatives": [
+ "tally"
+ ],
+ "logo_url": "/logos/typeform.svg",
+ "avg_monthly_cost": 25,
+ "pros": [
+ "Beautiful, conversational form experience",
+ "High completion rates vs traditional forms",
+ "Logic jumps and conditional flows",
+ "Great integrations (Zapier, webhooks)"
+ ],
+ "cons": [
+ "Expensive for the response limits",
+ "Limited free tier (10 responses/mo)",
+ "Not ideal for complex multi-page forms"
+ ]
+ },
+ {
+ "slug": "tally",
+ "name": "Tally",
+ "category": "Productivity",
+ "is_open_source": false,
+ "is_free_tier_generous": true,
+ "pricing_model": "Free/Paid",
+ "website": "https://tally.so",
+ "description": "The simplest way to create forms. Tally is a new type of form builder that works like a doc.",
+ "pros": [
+ "Notion-like form building experience with no-code simplicity",
+ "Unlimited forms and responses on the free tier",
+ "Conditional logic, hidden fields, and payment collection"
+ ],
+ "cons": [
+ "Wait, it's not open source (but highly OS-friendly community)"
+ ],
+ "tags": [
+ "Forms",
+ "Surveys",
+ "No-code"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tally.so"
+ },
+ {
+ "slug": "confluence",
+ "name": "Confluence",
+ "category": "Productivity",
+ "is_open_source": false,
+ "pricing_model": "Paid (Atlassian)",
+ "website": "https://www.atlassian.com/software/confluence",
+ "description": "Your remote-friendly team workspace where knowledge and collaboration meet.",
+ "alternatives": [
+ "outline"
+ ],
+ "logo_url": "/logos/confluence.svg",
+ "avg_monthly_cost": 10,
+ "pros": [
+ "Deep Jira integration for dev teams",
+ "Structured knowledge base with spaces",
+ "Templates for common documentation",
+ "Permissions and access control"
+ ],
+ "cons": [
+ "Slow and bloated interface",
+ "Search is frustratingly poor",
+ "Editing experience lags behind Notion"
+ ]
+ },
+ {
+ "slug": "outline",
+ "name": "Outline",
+ "category": "Productivity",
+ "is_open_source": true,
+ "github_repo": "outline/outline",
+ "stars": 24000,
+ "website": "https://www.getoutline.com",
+ "description": "Fast, collaborative, knowledge base for your team built using React and Markdown.",
+ "pros": [
+ "Sub-second search across all documents",
+ "Beautifully designed editor with Markdown shortcuts",
+ "Integrates with Slack, Figma, and 20+ tools out of the box"
+ ],
+ "cons": [
+ "Hard to self-host (complex storage requirements)"
+ ],
+ "last_commit": "2026-02-10T12:00:00Z",
+ "language": "TypeScript",
+ "license": "Other",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=getoutline.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/outline"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "hootsuite",
+ "name": "Hootsuite",
+ "category": "Marketing",
+ "is_open_source": false,
+ "pricing_model": "Paid (Seat-based)",
+ "website": "https://www.hootsuite.com",
+ "description": "Social media marketing and management dashboard.",
+ "alternatives": [
+ "mixpost"
+ ],
+ "logo_url": "/logos/hootsuite.svg",
+ "avg_monthly_cost": 49,
+ "pros": [
+ "Manage multiple social accounts in one place",
+ "Post scheduling across platforms",
+ "Team collaboration and approval workflows",
+ "Analytics and reporting dashboard"
+ ],
+ "cons": [
+ "Expensive plans ($99+/mo)",
+ "UI feels cluttered and dated",
+ "Free plan was eliminated"
+ ]
+ },
+ {
+ "slug": "mixpost",
+ "name": "Mixpost",
+ "category": "Marketing",
+ "is_open_source": true,
+ "github_repo": "inovector/mixpost",
+ "stars": 3000,
+ "website": "https://mixpost.app",
+ "description": "Self-hosted social media management software.",
+ "pros": [
+ "Own your data",
+ "No monthly subscription"
+ ],
+ "cons": [
+ "Newer, fewer social connectors"
+ ],
+ "last_commit": "2026-02-01T15:00:00Z",
+ "language": "PHP",
+ "license": "Other",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mixpost.app",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/mixpost"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "codespaces",
+ "name": "GitHub Codespaces",
+ "category": "DevOps",
+ "is_open_source": false,
+ "pricing_model": "Paid (Usage-based)",
+ "website": "https://github.com/features/codespaces",
+ "description": "Fast, cloud-hosted developer environments.",
+ "alternatives": [
+ "coder"
+ ],
+ "logo_url": "/logos/codespaces.svg",
+ "avg_monthly_cost": 15,
+ "pros": [
+ "Full VS Code in the browser",
+ "Pre-configured dev environments",
+ "Instant onboarding for new contributors",
+ "Deep GitHub integration"
+ ],
+ "cons": [
+ "Usage-based pricing adds up",
+ "Requires stable internet connection",
+ "Limited GPU/compute options"
+ ]
+ },
+ {
+ "slug": "coder",
+ "name": "Coder",
+ "category": "DevOps",
+ "is_open_source": true,
+ "github_repo": "coder/coder",
+ "stars": 20000,
+ "website": "https://coder.com",
+ "description": "Provision software development environments as code on your infrastructure.",
+ "pros": [
+ "Run dev environments on any infrastructure — cloud, on-prem, or hybrid",
+ "Self-hosted remote development with VS Code and JetBrains support",
+ "Ephemeral workspaces with Terraform-based provisioning"
+ ],
+ "cons": [
+ "Requires K8s or Terraform knowledge"
+ ],
+ "last_commit": "2026-02-09T22:00:00Z",
+ "language": "Go",
+ "license": "AGPL-3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coder.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/coder"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "quickbooks",
+ "name": "QuickBooks",
+ "category": "Financial",
+ "is_open_source": false,
+ "pricing_model": "Paid (Monthly Subscription)",
+ "website": "https://quickbooks.intuit.com",
+ "description": "Smart, simple online accounting software for small businesses.",
+ "alternatives": [
+ "akaunting",
+ "erpnext"
+ ],
+ "logo_url": "/logos/quickbooks.svg",
+ "avg_monthly_cost": 25,
+ "pros": [
+ "Industry standard for small business accounting",
+ "Easy invoicing and expense tracking",
+ "Bank feed integration",
+ "Tax preparation features"
+ ],
+ "cons": [
+ "Subscription pricing keeps increasing",
+ "Performance issues with large files",
+ "Limited multi-currency support"
+ ]
+ },
+ {
+ "slug": "akaunting",
+ "name": "Akaunting",
+ "category": "Financial",
+ "is_open_source": true,
+ "github_repo": "akaunting/akaunting",
+ "stars": 12000,
+ "website": "https://akaunting.com",
+ "description": "Free and open source online accounting software for small businesses and freelancers.",
+ "pros": [
+ "Modular app store",
+ "Multilingual and multicurrency"
+ ],
+ "cons": [
+ "Some essential apps are paid"
+ ],
+ "last_commit": "2026-02-08T14:00:00Z",
+ "language": "PHP",
+ "license": "GPL-3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=akaunting.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/akaunting"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "premiere",
+ "name": "Adobe Premiere Pro",
+ "category": "Creative",
+ "is_open_source": false,
+ "pricing_model": "Paid (Creative Cloud)",
+ "website": "https://www.adobe.com/products/premiere.html",
+ "description": "Industry-leading video editing software for film, TV, and the web.",
+ "alternatives": [
+ "kdenlive"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=adobe.com",
+ "avg_monthly_cost": 35,
+ "pros": [
+ "Professional-grade video editing",
+ "Excellent integration with After Effects",
+ "Industry standard in film and media",
+ "AI-powered features (scene detection, auto-reframe)"
+ ],
+ "cons": [
+ "Subscription-only ($22.99/mo)",
+ "Resource-intensive — needs powerful hardware",
+ "Steep learning curve"
+ ]
+ },
+ {
+ "slug": "kdenlive",
+ "name": "Kdenlive",
+ "category": "Creative",
+ "is_open_source": true,
+ "github_repo": "KDE/kdenlive",
+ "stars": 3500,
+ "website": "https://kdenlive.org",
+ "description": "Open source video editing software based on the MLT Framework and KDE.",
+ "pros": [
+ "Truly free forever",
+ "Powerful multi-track editing"
+ ],
+ "cons": [
+ "UI can be intimidating for beginners"
+ ],
+ "last_commit": "2026-02-10T11:00:00Z",
+ "language": "C++",
+ "license": "GPL-3.0",
+ "logo_url": "/logos/kdenlive.svg",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "dashlane",
+ "name": "Dashlane",
+ "category": "Security",
+ "is_open_source": false,
+ "pricing_model": "Paid (Subscription)",
+ "website": "https://www.dashlane.com",
+ "description": "Cloud-based password manager and digital wallet.",
+ "alternatives": [
+ "vaultwarden",
+ "bitwarden"
+ ],
+ "logo_url": "/logos/dashlane.svg",
+ "avg_monthly_cost": 8,
+ "pros": [
+ "Clean, intuitive interface",
+ "Built-in VPN on premium plans",
+ "Dark web monitoring alerts",
+ "Secure sharing for teams"
+ ],
+ "cons": [
+ "More expensive than competitors",
+ "Free tier limited to 25 passwords",
+ "Desktop app was discontinued"
+ ]
+ },
+ {
+ "slug": "vaultwarden",
+ "name": "Vaultwarden",
+ "category": "Security",
+ "is_open_source": true,
+ "github_repo": "dani-garcia/vaultwarden",
+ "stars": 32000,
+ "website": "https://github.com/dani-garcia/vaultwarden",
+ "description": "Unofficial Bitwarden compatible server written in Rust, formerly known as bitwarden_rs.",
+ "pros": [
+ "Full Bitwarden API compatibility in a lightweight Rust binary",
+ "Runs on 50MB of RAM — perfect for Raspberry Pi or small VPS",
+ "Supports organizations, attachments, and Bitwarden Send"
+ ],
+ "cons": [
+ "Third-party implementation (not security audited officially)"
+ ],
+ "last_commit": "2026-02-09T10:00:00Z",
+ "language": "Rust",
+ "license": "AGPL-3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=bitwarden.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/vaultwarden"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "pipedrive",
+ "name": "Pipedrive",
+ "category": "CRM",
+ "is_open_source": false,
+ "pricing_model": "Paid (Seat-based)",
+ "website": "https://www.pipedrive.com",
+ "description": "Sales CRM & pipeline management software that helps you get more organized.",
+ "alternatives": [
+ "twenty"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=pipedrive.com",
+ "avg_monthly_cost": 15,
+ "pros": [
+ "Simple, visual sales pipeline",
+ "Easy to set up and use",
+ "Good automation for follow-ups",
+ "Affordable entry-level pricing"
+ ],
+ "cons": [
+ "Limited features vs Salesforce",
+ "Reporting could be more powerful",
+ "No free tier"
+ ]
+ },
+ {
+ "slug": "twenty",
+ "name": "Twenty",
+ "category": "CRM",
+ "is_open_source": true,
+ "github_repo": "twentyhq/twenty",
+ "stars": 15000,
+ "website": "https://twenty.com",
+ "description": "A modern open-source CRM alternative to Salesforce and Pipedrive.",
+ "pros": [
+ "Clean, Notion-like interface for CRM workflows",
+ "Deeply customizable data models and views",
+ "GraphQL API for flexible integrations"
+ ],
+ "cons": [
+ "Still in early development"
+ ],
+ "last_commit": "2026-02-10T14:00:00Z",
+ "language": "TypeScript",
+ "license": "AGPL-3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=twenty.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/twenty"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "sentry",
+ "name": "Sentry",
+ "category": "Monitoring",
+ "is_open_source": false,
+ "pricing_model": "Paid (Usage-based)",
+ "website": "https://sentry.io",
+ "description": "Developer-first error tracking and performance monitoring.",
+ "alternatives": [
+ "glitchtip"
+ ],
+ "logo_url": "/logos/sentry.svg",
+ "avg_monthly_cost": 26,
+ "pros": [
+ "Best-in-class error tracking",
+ "Stack traces with source maps",
+ "Performance monitoring built in",
+ "Supports 100+ platforms and languages"
+ ],
+ "cons": [
+ "Can be noisy without proper filtering",
+ "Pricing based on error volume",
+ "Self-hosting is complex"
+ ]
+ },
+ {
+ "slug": "glitchtip",
+ "name": "GlitchTip",
+ "category": "Monitoring",
+ "is_open_source": true,
+ "github_repo": "glitchtip/glitchtip",
+ "stars": 3000,
+ "website": "https://glitchtip.com",
+ "description": "Open source error tracking that's compatible with Sentry SDKs.",
+ "pros": [
+ "Sentry-compatible error tracking that simplifies self-hosting",
+ "Lightweight alternative requiring minimal server resources",
+ "Performance monitoring with transaction tracking"
+ ],
+ "cons": [
+ "Less polished UI than Sentry"
+ ],
+ "last_commit": "2026-02-05T09:00:00Z",
+ "language": "Python",
+ "license": "MIT",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=glitchtip.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/glitchtip"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "calendly",
+ "name": "Calendly",
+ "category": "Productivity",
+ "is_open_source": false,
+ "pricing_model": "Paid (Seat-based)",
+ "website": "https://calendly.com",
+ "description": "The modern scheduling platform that makes 'finding time' a breeze.",
+ "alternatives": [
+ "calcom"
+ ],
+ "logo_url": "/logos/calendly.svg",
+ "avg_monthly_cost": 10,
+ "pros": [
+ "Frictionless scheduling experience",
+ "Integrates with Google/Outlook calendars",
+ "Team scheduling and round-robin",
+ "Customizable booking pages"
+ ],
+ "cons": [
+ "Free plan limited to one event type",
+ "Premium features locked behind $10+/mo",
+ "Branding on free tier"
+ ]
+ },
+ {
+ "slug": "calcom",
+ "name": "Cal.com",
+ "category": "Productivity",
+ "is_open_source": true,
+ "github_repo": "calcom/cal.com",
+ "stars": 30000,
+ "website": "https://cal.com",
+ "description": "The open-source Calendly alternative. Take control of your scheduling.",
+ "pros": [
+ "Self-hosted scheduling — no data leaves your server",
+ "Deeply extensible with a plugin architecture and API",
+ "Round-robin, collective, and managed event types"
+ ],
+ "cons": [
+ "Can be overkill for simple use cases"
+ ],
+ "last_commit": "2026-02-10T07:00:00Z",
+ "language": "TypeScript",
+ "license": "AGPL-3.0",
+ "logo_url": "/logos/calcom.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/calcom"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "intercom",
+ "name": "Intercom",
+ "category": "Support",
+ "is_open_source": false,
+ "pricing_model": "Paid (Seat-based)",
+ "website": "https://www.intercom.com",
+ "description": "The business messenger that builds real-time connections.",
+ "alternatives": [
+ "chaskiq"
+ ],
+ "logo_url": "/logos/intercom.svg",
+ "avg_monthly_cost": 39,
+ "pros": [
+ "Best-in-class live chat and messaging",
+ "AI chatbot (Fin) handles common questions",
+ "Product tours and onboarding flows",
+ "Unified inbox for support"
+ ],
+ "cons": [
+ "Very expensive ($74+/mo starting)",
+ "Pricing model is complex and confusing",
+ "Can be overkill for small teams"
+ ]
+ },
+ {
+ "slug": "chaskiq",
+ "name": "Chaskiq",
+ "category": "Support",
+ "is_open_source": true,
+ "github_repo": "chaskiq/chaskiq",
+ "stars": 4000,
+ "website": "https://chaskiq.io",
+ "description": "Open source conversational marketing platform alternative to Intercom and Drift.",
+ "pros": [
+ "Self-hosted customer messaging that replaces Intercom",
+ "Bot automation with visual workflow builder",
+ "Multi-channel support including web chat, email, and WhatsApp"
+ ],
+ "cons": [
+ "Smaller community than Chatwoot"
+ ],
+ "last_commit": "2026-01-28T12:00:00Z",
+ "language": "Ruby",
+ "license": "GPL-3.0",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=chaskiq.io",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/chaskiq"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "mailgun",
+ "name": "Mailgun",
+ "category": "Marketing",
+ "is_open_source": false,
+ "pricing_model": "Paid (Usage-based)",
+ "website": "https://www.mailgun.com",
+ "description": "Electronic mail delivery service for developers.",
+ "alternatives": [
+ "postal"
+ ],
+ "logo_url": "/logos/mailgun.svg",
+ "avg_monthly_cost": 15,
+ "pros": [
+ "Reliable transactional email delivery",
+ "Powerful email API and SMTP relay",
+ "Detailed delivery analytics",
+ "Good documentation"
+ ],
+ "cons": [
+ "No visual email builder",
+ "Pricing increased significantly",
+ "Support quality has declined"
+ ]
+ },
+ {
+ "slug": "postal",
+ "name": "Postal",
+ "category": "Marketing",
+ "is_open_source": true,
+ "github_repo": "postalserver/postal",
+ "stars": 15000,
+ "website": "https://postalserver.io",
+ "description": "A fully featured open source mail delivery platform for incoming & outgoing e-mail.",
+ "pros": [
+ "High-performance mail delivery server built for throughput",
+ "Detailed delivery tracking with click and open analytics",
+ "IP pool management and DKIM/SPF configuration"
+ ],
+ "cons": [
+ "Extremely complex to manage delivery (IP warm-up)"
+ ],
+ "last_commit": "2026-02-09T13:00:00Z",
+ "language": "Ruby",
+ "license": "MIT",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=postalserver.io",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/postal"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "segment",
+ "name": "Segment",
+ "category": "Marketing",
+ "is_open_source": false,
+ "pricing_model": "Paid (Usage-based)",
+ "website": "https://segment.com",
+ "description": "The leading customer data platform (CDP).",
+ "alternatives": [
+ "jitsu"
+ ],
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=segment.com",
+ "avg_monthly_cost": 120,
+ "pros": [
+ "Single API for all analytics tools",
+ "Customer data platform (CDP) capabilities",
+ "200+ destination integrations",
+ "Clean data pipeline management"
+ ],
+ "cons": [
+ "Extremely expensive ($120+/mo to start)",
+ "Complex to set up properly",
+ "Overkill for simple tracking needs"
+ ]
+ },
+ {
+ "slug": "jitsu",
+ "name": "Jitsu",
+ "category": "Marketing",
+ "is_open_source": true,
+ "github_repo": "jitsucom/jitsu",
+ "stars": 5000,
+ "website": "https://jitsu.com",
+ "description": "High-performance data collection platform and open-source Segment alternative.",
+ "pros": [
+ "Unlimited data volume",
+ "Real-time data streaming"
+ ],
+ "cons": [
+ "Fewer destinations than Segment"
+ ],
+ "last_commit": "2026-02-10T16:00:00Z",
+ "language": "TypeScript",
+ "license": "MIT",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jitsu.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/jitsu"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "dokku",
+ "name": "Dokku",
+ "category": "DevOps",
+ "is_open_source": true,
+ "github_repo": "dokku/dokku",
+ "website": "https://dokku.com",
+ "description": "A docker-powered PaaS that helps you build and manage the lifecycle of applications",
+ "pros": [
+ "Rock-solid stability — battle-tested since 2013",
+ "Heroku-compatible buildpacks and Procfile workflow",
+ "Zero-downtime deploys with simple git push"
+ ],
+ "cons": [
+ "CLI driven"
+ ],
+ "stars": 31874,
+ "last_commit": "2026-02-09T15:40:31Z",
+ "language": "Shell",
+ "license": "MIT License",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=dokku.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/dokku"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "chatgpt",
+ "name": "ChatGPT / OpenAI",
+ "category": "AI Models",
+ "is_open_source": false,
+ "pricing_model": "Paid/Freemium",
+ "website": "https://openai.com",
+ "description": "The leading commercial AI assistant and API platform (GPT-4o, o1).",
+ "alternatives": [
+ "llama",
+ "deepseek",
+ "mistral"
+ ],
+ "tags": [
+ "AI",
+ "LLM",
+ "Chat"
+ ],
+ "hosting_type": "cloud",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openai.com",
+ "avg_monthly_cost": 20,
+ "pros": [
+ "Most capable general-purpose AI assistant",
+ "Excellent at writing, coding, and reasoning",
+ "Plugin ecosystem and GPT store",
+ "Supports image, voice, and file inputs"
+ ],
+ "cons": [
+ "$20/mo for GPT-4 access",
+ "Can hallucinate confidently",
+ "No self-hosting option",
+ "Data privacy concerns for sensitive info"
+ ]
+ },
+ {
+ "slug": "llama",
+ "name": "Meta Llama 3.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "meta-llama/llama3",
+ "website": "https://llama.meta.com",
+ "description": "Meta's flagship open-weight model with 128K context. Supports 8B, 70B, and 405B parameters.",
+ "pros": [
+ "Massive 128K token context window for long documents",
+ "Strong multilingual support across 8+ languages",
+ "SOTA 405B variant competing with GPT-4 at a fraction of the cost"
+ ],
+ "cons": [
+ "405B requires massive hardware",
+ "Llama Community License"
+ ],
+ "stars": 65000,
+ "language": "Python",
+ "license": "Llama 3.1 Community License",
+ "tags": [
+ "AI",
+ "LLM",
+ "128K Context"
+ ],
+ "hardware_req": "8GB VRAM (8B), 40GB+ VRAM (70B), 800GB+ VRAM (405B)",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 405,
+ "is_multimodal": false
+ },
+ "logo_url": "/logos/meta.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/llama"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "deepseek",
+ "name": "DeepSeek-V3 / R1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "deepseek-ai/DeepSeek-V3",
+ "website": "https://deepseek.com",
+ "description": "Powerful open-source models including V3 (671B) and R1 (Reasoning). Rivals GPT-4o and o1.",
+ "pros": [
+ "State-of-the-art reasoning (R1)",
+ "Extremely cost efficient",
+ "MIT License (V3/R1)"
+ ],
+ "cons": [
+ "Full model requires huge VRAM",
+ "Newer ecosystem"
+ ],
+ "stars": 110000,
+ "language": "Python",
+ "license": "MIT License",
+ "tags": [
+ "AI",
+ "LLM",
+ "Reasoning"
+ ],
+ "alternatives": [
+ "llama",
+ "mistral",
+ "qwen",
+ "deepseek-v3-1"
+ ],
+ "hardware_req": "8GB VRAM (Distilled), 160GB+ VRAM (Full)",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 160,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 671,
+ "parameters_active_b": 37,
+ "is_multimodal": false
+ },
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=deepseek.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/deepseek"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "mistral",
+ "name": "Mistral Large 2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "mistralai/mistral-inference",
+ "website": "https://mistral.ai",
+ "description": "Flagship 123B model from Mistral AI. Optimized for multilingual, reasoning, and coding tasks.",
+ "pros": [
+ "State-of-the-art performance per parameter on benchmarks",
+ "128K context window with function-calling support",
+ "Efficient Mixture-of-Experts architecture for fast inference"
+ ],
+ "cons": [
+ "Mistral Research License",
+ "Requires high VRAM (80GB+)"
+ ],
+ "stars": 20000,
+ "language": "Python",
+ "license": "Mistral Research License",
+ "tags": [
+ "AI",
+ "LLM",
+ "EU"
+ ],
+ "hardware_req": "80GB+ VRAM (FP16), 40GB+ (8-bit)",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 80,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 123,
+ "is_multimodal": false
+ },
+ "logo_url": "/logos/mistral.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/mistral"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "gemma",
+ "name": "Google Gemma 2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "google/gemma-2",
+ "website": "https://ai.google.dev/gemma",
+ "description": "Google's open-weight models (9B, 27B) with class-leading performance and efficient architecture.",
+ "pros": [
+ "Distilled for performance",
+ "Excellent 27B variant",
+ "Google AI ecosystem"
+ ],
+ "cons": [
+ "8K context window",
+ "Gemma Terms of Use"
+ ],
+ "stars": 20000,
+ "language": "Python",
+ "license": "Gemma License",
+ "tags": [
+ "AI",
+ "LLM",
+ "Google"
+ ],
+ "hardware_req": "8GB VRAM (9B), 24GB+ VRAM (27B)",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 18,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 27,
+ "is_multimodal": false
+ },
+ "logo_url": "/logos/gemma.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/gemma"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "qwen",
+ "name": "Qwen 2.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "QwenLM/Qwen2.5",
+ "website": "https://qwenlm.github.io",
+ "description": "Comprehensive LLM series from Alibaba Cloud, excelling in coding, math, and multilingual support.",
+ "pros": [
+ "128K context window",
+ "Top-tier coding ability",
+ "Apache 2.0 (mostly)"
+ ],
+ "cons": [
+ "72B requires significant VRAM"
+ ],
+ "stars": 50000,
+ "language": "Python",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "Coding"
+ ],
+ "hardware_req": "8GB VRAM (7B), 40GB+ VRAM (32B), 140GB+ VRAM (72B)",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 40,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 72,
+ "is_multimodal": false
+ },
+ "logo_url": "/logos/qwen.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/qwen"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "midjourney",
+ "name": "Midjourney",
+ "category": "AI Image Generation",
+ "is_open_source": false,
+ "pricing_model": "Paid (Subscription)",
+ "website": "https://midjourney.com",
+ "description": "Leading AI image generation tool, known for artistic and photorealistic outputs.",
+ "alternatives": [
+ "stable-diffusion",
+ "flux"
+ ],
+ "tags": [
+ "AI",
+ "Image",
+ "Art"
+ ],
+ "hosting_type": "cloud",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=midjourney.com",
+ "avg_monthly_cost": 10,
+ "pros": [
+ "Best-in-class AI image generation quality",
+ "Stunning artistic and photorealistic outputs",
+ "Active community for inspiration",
+ "V6 handles text in images well"
+ ],
+ "cons": [
+ "Discord-only interface (no standalone app)",
+ "No free tier ($10/mo minimum)",
+ "Limited control over exact outputs",
+ "No API for automation"
+ ]
+ },
+ {
+ "slug": "stable-diffusion",
+ "name": "Stable Diffusion 3.5",
+ "category": "AI Image Generation",
+ "is_open_source": true,
+ "github_repo": "Stability-AI/sd3.5",
+ "website": "https://stability.ai",
+ "description": "The latest open-weights image generation model from Stability AI, offering superior prompt adherence.",
+ "pros": [
+ "Run image generation entirely on your own GPU",
+ "Extensive community with thousands of fine-tuned models",
+ "ControlNet, inpainting, and img2img for precise creative control"
+ ],
+ "cons": [
+ "Stability Community License",
+ "Requires 8GB+ VRAM"
+ ],
+ "stars": 10000,
+ "language": "Python",
+ "license": "Stability Community License",
+ "tags": [
+ "AI",
+ "Image",
+ "Prompt Adherence"
+ ],
+ "hardware_req": "8GB VRAM (Medium), 16GB+ VRAM (Large)",
+ "hosting_type": "self-hosted",
+ "logo_url": "/logos/stability.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/stable-diffusion"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "mochi-1",
+ "name": "Mochi-1",
+ "category": "AI Video Generation",
+ "is_open_source": true,
+ "github_repo": "genmoai/mochi1",
+ "website": "https://www.genmo.ai",
+ "description": "High-fidelity open-weights video generation model from Genmo, rivaling closed-source alternatives.",
+ "pros": [
+ "Realistic motion",
+ "Adobe-like quality",
+ "Apache 2.0 license"
+ ],
+ "cons": [
+ "Extreme hardware requirements",
+ "Memory intensive"
+ ],
+ "stars": 5000,
+ "language": "Python",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "Video",
+ "Motion"
+ ],
+ "hardware_req": "24GB VRAM (Minimal), 80GB VRAM (Recommended)",
+ "hosting_type": "both",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=genmo.ai",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "hunyuan-video",
+ "name": "HunyuanVideo 1.5",
+ "category": "AI Video Generation",
+ "is_open_source": true,
+ "github_repo": "Tencent/HunyuanVideo",
+ "website": "https://github.com/Tencent/HunyuanVideo",
+ "description": "Tencent's state-of-the-art open-source video generation model with 13B parameters.",
+ "pros": [
+ "Native 720p output",
+ "Long sequences support",
+ "Stable and clean motion"
+ ],
+ "cons": [
+ "High compute cost"
+ ],
+ "stars": 8000,
+ "language": "Python",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "Video",
+ "HD"
+ ],
+ "hardware_req": "14GB VRAM (v1.5/distilled), 45GB+ VRAM (Base)",
+ "hosting_type": "both",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tencent.com",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "flux",
+ "name": "FLUX",
+ "category": "AI Image Generation",
+ "is_open_source": true,
+ "github_repo": "black-forest-labs/flux",
+ "website": "https://blackforestlabs.ai",
+ "description": "Next-gen open image generation model from Black Forest Labs. State-of-the-art quality rivaling Midjourney.",
+ "pros": [
+ "Outstanding image quality",
+ "Open weights available",
+ "Rapid community adoption"
+ ],
+ "cons": [
+ "High VRAM requirement",
+ "Newer (less tooling)"
+ ],
+ "stars": 20000,
+ "language": "Python",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "Image",
+ "New"
+ ],
+ "hardware_req": "12GB+ VRAM (Schnell), 24GB+ (Dev)",
+ "hosting_type": "both",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=blackforestlabs.ai",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "github-copilot",
+ "name": "GitHub Copilot",
+ "category": "AI Coding",
+ "is_open_source": false,
+ "pricing_model": "Paid (Subscription)",
+ "website": "https://github.com/features/copilot",
+ "description": "AI pair programmer by GitHub/OpenAI. Integrates into VS Code and JetBrains.",
+ "alternatives": [
+ "continue-dev",
+ "tabby"
+ ],
+ "tags": [
+ "AI",
+ "Coding",
+ "IDE"
+ ],
+ "hosting_type": "cloud",
+ "logo_url": "/logos/github-copilot.svg",
+ "avg_monthly_cost": 10,
+ "pros": [
+ "Best AI code completion in the market",
+ "Deep IDE integration (VS Code, JetBrains)",
+ "Understands project context",
+ "Copilot Chat for code explanations"
+ ],
+ "cons": [
+ "$10/mo per user",
+ "Can suggest insecure or outdated patterns",
+ "Privacy concerns with code telemetry",
+ "Dependent on GitHub/Microsoft"
+ ]
+ },
+ {
+ "slug": "continue-dev",
+ "name": "Continue",
+ "category": "AI Coding",
+ "is_open_source": true,
+ "github_repo": "continuedev/continue",
+ "website": "https://continue.dev",
+ "description": "Open-source AI code assistant for VS Code and JetBrains. Use any model (local or API).",
+ "pros": [
+ "Highly customizable AI coding assistant — bring your own model",
+ "Works with VS Code and JetBrains natively",
+ "Context-aware with codebase indexing and retrieval"
+ ],
+ "cons": [
+ "Requires model setup"
+ ],
+ "stars": 25000,
+ "language": "TypeScript",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "Coding",
+ "IDE",
+ "Self-Hosted"
+ ],
+ "hardware_req": "Depends on chosen model",
+ "hosting_type": "both",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=continue.dev",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "tabby",
+ "name": "TabbyML",
+ "category": "AI Coding",
+ "is_open_source": true,
+ "github_repo": "TabbyML/tabby",
+ "website": "https://tabby.tabbyml.com",
+ "description": "Self-hosted AI coding assistant. An open-source, self-hosted alternative to GitHub Copilot.",
+ "pros": [
+ "Enterprise-ready self-hosted code completion",
+ "Supports multiple model backends including local GGUF",
+ "IDE extensions for VS Code, Vim, and IntelliJ"
+ ],
+ "cons": [
+ "Needs GPU for best results"
+ ],
+ "stars": 25000,
+ "language": "Rust",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "Coding",
+ "Self-Hosted"
+ ],
+ "hardware_req": "8GB+ VRAM recommended",
+ "hosting_type": "self-hosted",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tabby.tabbyml.com",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/tabby"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "ollama",
+ "name": "Ollama",
+ "category": "AI Runners",
+ "is_open_source": true,
+ "github_repo": "ollama/ollama",
+ "website": "https://ollama.com",
+ "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models locally.",
+ "pros": [
+ "Run any open model locally with a single command",
+ "OpenAI-compatible API for drop-in integration",
+ "Automatic model management with quantization support"
+ ],
+ "cons": [
+ "Command line focused (needs UI)"
+ ],
+ "stars": 60000,
+ "language": "Go",
+ "license": "MIT License",
+ "tags": [
+ "AI",
+ "Local",
+ "Runner"
+ ],
+ "hardware_req": "8GB+ RAM",
+ "hosting_type": "self-hosted",
+ "logo_url": "/logos/ollama.svg",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/ollama"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "open-webui",
+ "name": "Open WebUI",
+ "category": "AI Interfaces",
+ "is_open_source": true,
+ "github_repo": "open-webui/open-webui",
+ "website": "https://openwebui.com",
+ "description": "User-friendly WebUI for LLMs (Formerly Ollama WebUI). Supports Ollama and OpenAI-compatible APIs.",
+ "pros": [
+ "ChatGPT-like UI",
+ "Multi-model chat",
+ "RAG support"
+ ],
+ "cons": [
+ "Requires backend (like Ollama)"
+ ],
+ "stars": 15000,
+ "language": "Svelte",
+ "license": "MIT License",
+ "tags": [
+ "AI",
+ "UI",
+ "Chat"
+ ],
+ "hosting_type": "self-hosted",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openwebui.com",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "jan",
+ "name": "Jan",
+ "category": "AI Interfaces",
+ "is_open_source": true,
+ "github_repo": "janhq/jan",
+ "website": "https://jan.ai",
+ "description": "Jan is an open source alternative to ChatGPT that runs 100% offline on your computer.",
+ "pros": [
+ "Runs offline",
+ "Native app (no Docker)",
+ "Local model manager"
+ ],
+ "cons": [
+ "Heavy resource usage"
+ ],
+ "stars": 18000,
+ "language": "TypeScript",
+ "license": "AGPL-3.0",
+ "tags": [
+ "AI",
+ "Desktop",
+ "Offline"
+ ],
+ "hardware_req": "Apple Silicon or NVIDIA GPU",
+ "hosting_type": "self-hosted",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jan.ai",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "lm-studio",
+ "name": "LM Studio",
+ "category": "AI Runners",
+ "is_open_source": false,
+ "pricing_model": "Free (Proprietary)",
+ "website": "https://lmstudio.ai",
+ "description": "Discover, download, and run local LLMs. Easy GUI for GGUF models.",
+ "alternatives": [
+ "ollama",
+ "gpt4all"
+ ],
+ "tags": [
+ "AI",
+ "Desktop",
+ "GUI"
+ ],
+ "hardware_req": "Apple Silicon or NVIDIA/AMD GPU",
+ "hosting_type": "self-hosted",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=lmstudio.ai",
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "pros": [
+ "Run LLMs locally with a clean GUI",
+ "No cloud dependency — fully offline",
+ "Supports GGUF and other quantized formats",
+ "Built-in model discovery and download"
+ ],
+ "cons": [
+ "Requires decent hardware (8GB+ RAM)",
+ "Closed source despite local-first approach",
+ "Limited compared to CLI tools like Ollama"
+ ]
+ },
+ {
+ "slug": "gpt4all",
+ "name": "GPT4All",
+ "category": "AI Runners",
+ "is_open_source": true,
+ "github_repo": "nomic-ai/gpt4all",
+ "website": "https://gpt4all.io",
+ "description": "Run open-source LLMs locally on your CPU and GPU. No internet required.",
+ "pros": [
+ "One-click desktop installer — no terminal needed",
+ "Built-in RAG for chatting with your local documents",
+ "Runs on CPU — no GPU required for basic models"
+ ],
+ "cons": [
+ "Slower on CPU"
+ ],
+ "stars": 65000,
+ "language": "C++",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "Desktop",
+ "CPU"
+ ],
+ "hosting_type": "self-hosted",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=gpt4all.io",
+ "deployment": {
+ "type": "docker-compose",
+ "local_path": "./.docker-deploy/gpt4all"
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "localai",
+ "name": "LocalAI",
+ "category": "AI Runners",
+ "is_open_source": true,
+ "github_repo": "mudler/LocalAI",
+ "website": "https://localai.io",
+ "description": "The specific build of LocalAI, the free, Open Source OpenAI alternative. Drop-in replacement for OpenAI API.",
+ "pros": [
+ "OpenAI API compatible",
+ "Runs on consumer hardware",
+ "No GPU required"
+ ],
+ "cons": [
+ "Configuration heavy"
+ ],
+ "stars": 20000,
+ "language": "Go",
+ "license": "MIT License",
+ "tags": [
+ "AI",
+ "API",
+ "Backend"
+ ],
+ "hosting_type": "self-hosted",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=localai.io",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "flowise",
+ "name": "Flowise",
+ "category": "AI Tools",
+ "is_open_source": true,
+ "github_repo": "FlowiseAI/Flowise",
+ "website": "https://flowiseai.com",
+ "description": "Drag & drop UI to build your customized LLM flow using LangChainJS.",
+ "pros": [
+ "Low-code",
+ "Visual builder",
+ "Rich integrations"
+ ],
+ "cons": [
+ "Node.js dependency"
+ ],
+ "stars": 28000,
+ "language": "TypeScript",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "Low-Code",
+ "LangChain"
+ ],
+ "hosting_type": "self-hosted",
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=flowiseai.com",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "llama-4",
+ "name": "Meta Llama 4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "meta-llama/llama4",
+ "website": "https://llama.meta.com",
+ "description": "The latest generation of Llama. 'Maverick' architecture with 256K context. The new standard for open weights.",
+ "pros": [
+ "Next-gen Maverick architecture — faster and smarter than Llama 3",
+ "256K context window — double that of most competitors",
+ "Native multimodal support for images, video, and text"
+ ],
+ "cons": [
+ "High VRAM for top tiers"
+ ],
+ "stars": 45000,
+ "language": "Python",
+ "license": "Llama Community License",
+ "tags": [
+ "AI",
+ "LLM",
+ "2026",
+ "SOTA"
+ ],
+ "hardware_req": "12GB VRAM (Medium), 48GB+ VRAM (Large)",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 12,
+ "context_window_tokens": 256000,
+ "parameters_total_b": 65,
+ "is_multimodal": true
+ },
+ "logo_url": "/logos/meta.svg",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "gemma-3",
+ "name": "Google Gemma 3",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "google/gemma-3",
+ "website": "https://ai.google.dev/gemma",
+ "description": "Gemma 3 (27B) delivers GPT-5 class performance on a single GPU. Optimized for reasoning and agents.",
+ "pros": [
+ "Incredible 27B performance",
+ "Agent-centric design",
+ "JAX/PyTorch native"
+ ],
+ "cons": [
+ "limited to 27B size currently"
+ ],
+ "stars": 15000,
+ "language": "Python",
+ "license": "Gemma License",
+ "tags": [
+ "AI",
+ "LLM",
+ "Google",
+ "2026"
+ ],
+ "hardware_req": "24GB VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 24,
+ "context_window_tokens": 1000000,
+ "parameters_total_b": 27,
+ "is_multimodal": true
+ },
+ "logo_url": "/logos/gemma.svg",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "qwen-3",
+ "name": "Qwen 3 (235B)",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "QwenLM/Qwen3",
+ "website": "https://qwenlm.github.io",
+ "description": "Massive 235B param model. The absolute king of coding and mathematics benchmarks in 2026.",
+ "pros": [
+ "Unmatched coding performance",
+ "Excellent math/reasoning",
+ "MoE efficiency"
+ ],
+ "cons": [
+ "Requires multi-GPU setup"
+ ],
+ "stars": 35000,
+ "language": "Python",
+ "license": "Apache License 2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "Coding",
+ "MoE"
+ ],
+ "hardware_req": "140GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 140,
+ "context_window_tokens": 1000000,
+ "parameters_total_b": 235,
+ "is_multimodal": false
+ },
+ "logo_url": "/logos/qwen.svg",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "deepseek-v3-1",
+ "name": "DeepSeek V3.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "github_repo": "deepseek-ai/DeepSeek-V3.1",
+ "website": "https://deepseek.com",
+ "description": "Refined V3 architecture with improved instruction following and reduced hallucination rates.",
+ "pros": [
+ "API pricing 10-50x cheaper than GPT-4 equivalents",
+ "Open weights with full model access — no API lock-in",
+ "Top-tier reasoning that rivals closed-source frontier models"
+ ],
+ "cons": [
+ "Complex serving stack"
+ ],
+ "stars": 120000,
+ "language": "Python",
+ "license": "MIT License",
+ "tags": [
+ "AI",
+ "LLM",
+ "Reasoning"
+ ],
+ "alternatives": [
+ "deepseek",
+ "llama",
+ "mistral",
+ "qwen"
+ ],
+ "hardware_req": "80GB VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 80,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 685,
+ "is_multimodal": false
+ },
+ "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=deepseek.com",
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "llama-3-1-8b",
+ "name": "Llama 3.1 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "The latest 8B parameter model from Meta, optimized for efficiency and edge devices.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-3-1-70b",
+ "name": "Llama 3.1 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "A powerful 70B model by Meta, rivaling closed-source top-tier models.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "49GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-3-1-405b",
+ "name": "Llama 3.1 405B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "Meta's massive 405B frontier-class open weights model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "284GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 284,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 405,
+ "parameters_active_b": 405,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-3-8b",
+ "name": "Llama 3 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "Meta's highly capable 8B model, a standard for local LLM inference.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-3-70b",
+ "name": "Llama 3 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "Meta's previous generation 70B heavy-hitter.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "49GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-2-7b",
+ "name": "Llama 2 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "The classic 7B model that started the open-weight revolution.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-2-13b",
+ "name": "Llama 2 13B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "A balanced 13B model from the Llama 2 series.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-2-70b",
+ "name": "Llama 2 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "The largest Llama 2 model, widely used for fine-tuning.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "49GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "code-llama-7b",
+ "name": "Code Llama 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "Specialized coding model based on Llama 2.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 100000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "code-llama-13b",
+ "name": "Code Llama 13B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "Mid-sized specialized coding model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 100000,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "code-llama-34b",
+ "name": "Code Llama 34B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "Large coding model with excellent performance.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 24,
+ "context_window_tokens": 100000,
+ "parameters_total_b": 34,
+ "parameters_active_b": 34,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "code-llama-70b",
+ "name": "Code Llama 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llama.meta.com",
+ "description": "The most powerful Code Llama variant.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Open Weights",
+ "AI",
+ "LLM",
+ "Meta"
+ ],
+ "hardware_req": "49GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 100000,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "mistral-7b-v0-3",
+ "name": "Mistral 7B v0.3",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://mistral.ai",
+ "description": "Updated 7B model from Mistral AI with extended vocabulary and function calling.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Europe",
+ "Mistral AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "mistral-nemo-12b",
+ "name": "Mistral Nemo 12B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://mistral.ai",
+ "description": "A native 12B model built in collaboration with NVIDIA, fitting in 24GB VRAM.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Europe",
+ "Mistral AI"
+ ],
+ "hardware_req": "8GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 12,
+ "parameters_active_b": 12,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "mixtral-8x7b",
+ "name": "Mixtral 8x7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://mistral.ai",
+ "description": "The first high-performance open sparse Mixture-of-Experts (MoE) model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Europe",
+ "Mistral AI"
+ ],
+ "hardware_req": "33GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 33,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 47,
+ "parameters_active_b": 47,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "mixtral-8x22b",
+ "name": "Mixtral 8x22B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://mistral.ai",
+ "description": "A massive MoE model setting new standards for open weights.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Europe",
+ "Mistral AI"
+ ],
+ "hardware_req": "99GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 99,
+ "context_window_tokens": 65000,
+ "parameters_total_b": 141,
+ "parameters_active_b": 141,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "codestral-22b",
+ "name": "Codestral 22B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://mistral.ai",
+ "description": "Mistral's first dedicated code model, proficient in 80+ languages.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Europe",
+ "Mistral AI"
+ ],
+ "hardware_req": "15GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 15,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 22,
+ "parameters_active_b": 22,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "mathstral-7b",
+ "name": "Mathstral 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://mistral.ai",
+ "description": "Specialized model for math and reasoning tasks.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Europe",
+ "Mistral AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "ministral-3b",
+ "name": "Ministral 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://mistral.ai",
+ "description": "Mistral's efficient edge model for mobile and low-latency use cases.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Europe",
+ "Mistral AI"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "ministral-8b",
+ "name": "Ministral 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://mistral.ai",
+ "description": "A powerful edge model bridging the gap between small and medium LLMs.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Europe",
+ "Mistral AI"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "qwen-2-5-0-5b",
+ "name": "Qwen 2.5 0.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Tiny but capable model for extreme edge analytics.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 0.5,
+ "parameters_active_b": 0.5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-1-5b",
+ "name": "Qwen 2.5 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Small footprint model punching above its weight.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 1.5,
+ "parameters_active_b": 1.5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-3b",
+ "name": "Qwen 2.5 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Balanced 3B model, great for mobile inference.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-7b",
+ "name": "Qwen 2.5 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "The 7B workhorse of the Qwen 2.5 family, beating Llama 3.1 8B in many benchmarks.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-14b",
+ "name": "Qwen 2.5 14B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "A sweet-spot size for dual-GPU or high VRAM consumer cards.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "10GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-32b",
+ "name": "Qwen 2.5 32B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Ideally sized for 24GB VRAM cards like the RTX 3090/4090.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "22GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-72b",
+ "name": "Qwen 2.5 72B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Top-tier open weights model, consistently ranking high on leaderboards.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "50GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 50,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 72,
+ "parameters_active_b": 72,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-coder-1-5b",
+ "name": "Qwen 2.5 Coder 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Tiny coding assistant.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 1.5,
+ "parameters_active_b": 1.5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-coder-7b",
+ "name": "Qwen 2.5 Coder 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "State-of-the-art 7B coding model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-5-coder-32b",
+ "name": "Qwen 2.5 Coder 32B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Powerful coding model fitting in consumer hardware.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "22GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-vl-7b",
+ "name": "Qwen 2 VL 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Vision-Language model capable of understanding images and video.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": true
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen-2-vl-72b",
+ "name": "Qwen 2 VL 72B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://qwenlm.github.io",
+ "description": "Massive Vision-Language model for complex visual reasoning.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Alibaba Cloud",
+ "Qwen",
+ "LLM",
+ "AI",
+ "Alibaba"
+ ],
+ "hardware_req": "50GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 50,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 72,
+ "parameters_active_b": 72,
+ "is_multimodal": true
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "gemma-2-2b",
+ "name": "Gemma 2 2B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://ai.google.dev/gemma",
+ "description": "Efficient 2B model by Google, distilled for high performance.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Gemma",
+ "Google",
+ "LLM",
+ "Google DeepMind",
+ "AI"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "gemma-2-9b",
+ "name": "Gemma 2 9B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://ai.google.dev/gemma",
+ "description": "Google's powerful 9B open model, outperforming larger predecessors.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Gemma",
+ "Google",
+ "LLM",
+ "Google DeepMind",
+ "AI"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 9,
+ "parameters_active_b": 9,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "gemma-2-27b",
+ "name": "Gemma 2 27B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://ai.google.dev/gemma",
+ "description": "Large-scale open model from Google designed for complex reasoning.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Gemma",
+ "Google",
+ "LLM",
+ "Google DeepMind",
+ "AI"
+ ],
+ "hardware_req": "19GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 19,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 27,
+ "parameters_active_b": 27,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "codegemma-2b",
+ "name": "CodeGemma 2B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://ai.google.dev/gemma",
+ "description": "Fast, lightweight code completion model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Gemma",
+ "Google",
+ "LLM",
+ "Google DeepMind",
+ "AI"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "codegemma-7b",
+ "name": "CodeGemma 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://ai.google.dev/gemma",
+ "description": "Instruction-tuned coding model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Gemma",
+ "Google",
+ "LLM",
+ "Google DeepMind",
+ "AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "recurrentgemma-2b",
+ "name": "RecurrentGemma 2B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://ai.google.dev/gemma",
+ "description": "Gemma architecture with recurrent neural network efficiency.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Gemma",
+ "Google",
+ "LLM",
+ "Google DeepMind",
+ "AI"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "palette-2b",
+ "name": "Palette 2B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://ai.google.dev/gemma",
+ "description": "Specialized vision-language model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Gemma",
+ "Google",
+ "LLM",
+ "Google DeepMind",
+ "AI"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "phi-3-5-mini",
+ "name": "Phi 3.5 Mini",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://azure.microsoft.com/en-us/products/phi",
+ "description": "Latest lightweight powerhouse from Microsoft, beating many larger models.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Phi",
+ "AI",
+ "LLM",
+ "Microsoft"
+ ],
+ "hardware_req": "3GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 3.8,
+ "parameters_active_b": 3.8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "phi-3-5-moe",
+ "name": "Phi 3.5 MoE",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://azure.microsoft.com/en-us/products/phi",
+ "description": "Mixture-of-Experts model combining 16x3.8B experts.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Phi",
+ "AI",
+ "LLM",
+ "Microsoft"
+ ],
+ "hardware_req": "29GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 29,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 42,
+ "parameters_active_b": 42,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "phi-3-5-vision",
+ "name": "Phi 3.5 Vision",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://azure.microsoft.com/en-us/products/phi",
+ "description": "Multimodal version of Phi 3.5 capable of image analysis.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Phi",
+ "AI",
+ "LLM",
+ "Microsoft"
+ ],
+ "hardware_req": "3GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 4.2,
+ "parameters_active_b": 4.2,
+ "is_multimodal": true
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "phi-3-mini",
+ "name": "Phi 3 Mini",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://azure.microsoft.com/en-us/products/phi",
+ "description": "Highly capable 3.8B model trained on textbook data.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Phi",
+ "AI",
+ "LLM",
+ "Microsoft"
+ ],
+ "hardware_req": "3GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 3.8,
+ "parameters_active_b": 3.8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "phi-3-medium",
+ "name": "Phi 3 Medium",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://azure.microsoft.com/en-us/products/phi",
+ "description": "14B parameter version of the Phi-3 family.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Phi",
+ "AI",
+ "LLM",
+ "Microsoft"
+ ],
+ "hardware_req": "10GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "orca-2-13b",
+ "name": "Orca 2 13B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://azure.microsoft.com/en-us/products/phi",
+ "description": "Microsoft's research model exploring reasoning capabilities.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Phi",
+ "AI",
+ "LLM",
+ "Microsoft"
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "yi-1-5-6b",
+ "name": "Yi 1.5 6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://01.ai",
+ "description": "Strong 6B model from 01.AI.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "01.AI",
+ "Yi"
+ ],
+ "hardware_req": "4GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "yi-1-5-9b",
+ "name": "Yi 1.5 9B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://01.ai",
+ "description": "9B parameter model optimized for coding and math.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "01.AI",
+ "Yi"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 9,
+ "parameters_active_b": 9,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "yi-1-5-34b",
+ "name": "Yi 1.5 34B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://01.ai",
+ "description": "Highly rated 34B model, popular in the community.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "01.AI",
+ "Yi"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 24,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 34,
+ "parameters_active_b": 34,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "yi-large",
+ "name": "Yi Large",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://01.ai",
+ "description": "Proprietary-class open weights model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "01.AI",
+ "Yi"
+ ],
+ "hardware_req": "70GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 70,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 100,
+ "parameters_active_b": 100,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "command-r",
+ "name": "Command R",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://cohere.com",
+ "description": "Optimized for RAG (Retrieval Augmented Generation) and tool use.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Cohere For AI",
+ "Cohere",
+ "LLM",
+ "RAG",
+ "AI"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 24,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 35,
+ "parameters_active_b": 35,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "command-r-plus",
+ "name": "Command R+",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://cohere.com",
+ "description": "Massive RAG-optimized model with advanced reasoning.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Cohere For AI",
+ "Cohere",
+ "LLM",
+ "RAG",
+ "AI"
+ ],
+ "hardware_req": "73GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 73,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 104,
+ "parameters_active_b": 104,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "dolphin-2-9-llama-3-8b",
+ "name": "Dolphin 2.9 Llama 3 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://erichartford.com",
+ "description": "Uncensored fine-tune of Llama 3 8B.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Roleplay",
+ "Uncensored",
+ "LLM",
+ "Cognitive Computations",
+ "AI"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "dolphin-2-9-2-qwen-2-72b",
+ "name": "Dolphin 2.9.2 Qwen 2 72B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://erichartford.com",
+ "description": "Powerful uncensored chat model based on Qwen 2.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Roleplay",
+ "Uncensored",
+ "LLM",
+ "Cognitive Computations",
+ "AI"
+ ],
+ "hardware_req": "50GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 50,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 72,
+ "parameters_active_b": 72,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "dolphin-mixtral-8x7b",
+ "name": "Dolphin Mixtral 8x7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://erichartford.com",
+ "description": "One of the most popular uncensored MoE models.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Roleplay",
+ "Uncensored",
+ "LLM",
+ "Cognitive Computations",
+ "AI"
+ ],
+ "hardware_req": "33GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 33,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 47,
+ "parameters_active_b": 47,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "hermes-3-llama-3-1-8b",
+ "name": "Hermes 3 Llama 3.1 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://nousresearch.com",
+ "description": "Unlock the full potential of Llama 3.1 with advanced agentic capabilities.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Fine-tune",
+ "AI",
+ "LLM",
+ "Nous Research"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "hermes-3-llama-3-1-70b",
+ "name": "Hermes 3 Llama 3.1 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://nousresearch.com",
+ "description": "70B version of the Hermes 3 agentic fine-tune.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Fine-tune",
+ "AI",
+ "LLM",
+ "Nous Research"
+ ],
+ "hardware_req": "49GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "nous-hermes-2-mixtral-8x7b",
+ "name": "Nous Hermes 2 Mixtral 8x7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://nousresearch.com",
+ "description": "High-quality instruction tuned Mixtral.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Fine-tune",
+ "AI",
+ "LLM",
+ "Nous Research"
+ ],
+ "hardware_req": "33GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 33,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 47,
+ "parameters_active_b": 47,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "solar-10-7b",
+ "name": "Solar 10.7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://upstage.ai",
+ "description": "Innovative 10.7B model created using depth up-scaling of Llama 2.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Upstage",
+ "Solar",
+ "LLM",
+ "Depth Upscaling",
+ "AI"
+ ],
+ "hardware_req": "7GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 7,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 10.7,
+ "parameters_active_b": 10.7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "solar-pro",
+ "name": "Solar Pro",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://upstage.ai",
+ "description": "Advanced scale-up of the Solar architecture.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Upstage",
+ "Solar",
+ "LLM",
+ "Depth Upscaling",
+ "AI"
+ ],
+ "hardware_req": "15GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 15,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 22,
+ "parameters_active_b": 22,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "deepseek-coder-v2-16b",
+ "name": "DeepSeek Coder V2 16B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://deepseek.com",
+ "description": "Powerful coding-specific MoE model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Coding",
+ "AI",
+ "LLM",
+ "DeepSeek"
+ ],
+ "hardware_req": "11GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 11,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 16,
+ "parameters_active_b": 16,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "deepseek-coder-v2-236b",
+ "name": "DeepSeek Coder V2 236B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://deepseek.com",
+ "description": "Massive coding model rivaling GPT-4 across benchmarks.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Coding",
+ "AI",
+ "LLM",
+ "DeepSeek"
+ ],
+ "hardware_req": "165GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 165,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 236,
+ "parameters_active_b": 236,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "deepseek-llm-7b",
+ "name": "DeepSeek LLM 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://deepseek.com",
+ "description": "General purpose 7B chat model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Coding",
+ "AI",
+ "LLM",
+ "DeepSeek"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "deepseek-llm-67b",
+ "name": "DeepSeek LLM 67B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://deepseek.com",
+ "description": "Large general purpose model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Coding",
+ "AI",
+ "LLM",
+ "DeepSeek"
+ ],
+ "hardware_req": "47GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 47,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 67,
+ "parameters_active_b": 67,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "stable-lm-2-1-6b",
+ "name": "Stable LM 2 1.6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://stability.ai",
+ "description": "Very small, efficient model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Stability AI",
+ "AI",
+ "LLM"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1.6,
+ "parameters_active_b": 1.6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "stable-lm-2-12b",
+ "name": "Stable LM 2 12B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://stability.ai",
+ "description": "Balanced 12B model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Stability AI",
+ "AI",
+ "LLM"
+ ],
+ "hardware_req": "8GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 12,
+ "parameters_active_b": 12,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "stable-code-3b",
+ "name": "Stable Code 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://stability.ai",
+ "description": "Specialized 3B coding model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Stability AI",
+ "AI",
+ "LLM"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 16384,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "starling-lm-7b-alpha",
+ "name": "Starling LM 7B Alpha",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://nexusflow.ai",
+ "description": "RLHF fine-tune known for high quality responses.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Nexusflow",
+ "RLHF"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "starling-lm-7b-beta",
+ "name": "Starling LM 7B Beta",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://nexusflow.ai",
+ "description": "Improved beta version of the Starling RLHF model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Nexusflow",
+ "RLHF"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "openchat-3-5",
+ "name": "OpenChat 3.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://openchat.team",
+ "description": "Fine-tuned Mistral 7B using C-RLFT strategy.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "C-RLFT",
+ "AI",
+ "LLM",
+ "OpenChat"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "openchat-3-6",
+ "name": "OpenChat 3.6",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://openchat.team",
+ "description": "Updated version based on Llama 3 8B.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "C-RLFT",
+ "AI",
+ "LLM",
+ "OpenChat"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "tinyllama-1-1b",
+ "name": "TinyLlama 1.1B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/jzhang38/TinyLlama",
+ "description": "The most popular ~1B model, trained on 3T tokens.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Small",
+ "AI",
+ "LLM",
+ "TinyLlama"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 1.1,
+ "parameters_active_b": 1.1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "falcon-2-11b",
+ "name": "Falcon 2 11B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://falconllm.tii.ae",
+ "description": "TII's efficient 11B model with strong reasoning capabilities.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Falcon",
+ "LLM",
+ "AI",
+ "TII"
+ ],
+ "hardware_req": "8GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 11,
+ "parameters_active_b": 11,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/falcon.svg"
+ },
+ {
+ "slug": "falcon-180b",
+ "name": "Falcon 180B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://falconllm.tii.ae",
+ "description": "Massive open model, one of the largest available.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Falcon",
+ "LLM",
+ "AI",
+ "TII"
+ ],
+ "hardware_req": "126GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 126,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 180,
+ "parameters_active_b": 180,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/falcon.svg"
+ },
+ {
+ "slug": "falcon-40b",
+ "name": "Falcon 40B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://falconllm.tii.ae",
+ "description": "The original high-performance open model form TII.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Falcon",
+ "LLM",
+ "AI",
+ "TII"
+ ],
+ "hardware_req": "28GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 28,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 40,
+ "parameters_active_b": 40,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/falcon.svg"
+ },
+ {
+ "slug": "falcon-7b",
+ "name": "Falcon 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://falconllm.tii.ae",
+ "description": "Smaller variant of the Falcon family.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Falcon",
+ "LLM",
+ "AI",
+ "TII"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/falcon.svg"
+ },
+ {
+ "slug": "glm-4-9b",
+ "name": "GLM 4 9B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/THUDM/GLM-4",
+ "description": "Powerful multilingual model from Zhipu AI.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "GLM",
+ "Zhipu AI",
+ "LLM",
+ "AI"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 128000,
+ "parameters_total_b": 9,
+ "parameters_active_b": 9,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "chatglm3-6b",
+ "name": "ChatGLM3 6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/THUDM/GLM-4",
+ "description": "Optimized Chinese-English conversational model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "GLM",
+ "Zhipu AI",
+ "LLM",
+ "AI"
+ ],
+ "hardware_req": "4GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "granite-3-0-8b-instruct",
+ "name": "Granite 3.0 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.ibm.com/granite",
+ "description": "IBM's enterprise-grade open model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "IBM",
+ "LLM",
+ "AI",
+ "Enterprise"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "granite-3-0-2b-instruct",
+ "name": "Granite 3.0 2B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.ibm.com/granite",
+ "description": "Efficient enterprise model for lower resource environments.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "IBM",
+ "LLM",
+ "AI",
+ "Enterprise"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "granite-code-3b",
+ "name": "Granite Code 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.ibm.com/granite",
+ "description": "IBM specialized code model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "IBM",
+ "LLM",
+ "AI",
+ "Enterprise"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "granite-code-8b",
+ "name": "Granite Code 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.ibm.com/granite",
+ "description": "Larger coding model from IBM.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "IBM",
+ "LLM",
+ "AI",
+ "Enterprise"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "flux-1-schnell",
+ "name": "Flux.1 Schnell",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://blackforestlabs.ai",
+ "description": "Fastest state-of-the-art open image generation model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Black Forest Labs",
+ "Image Generation",
+ "AI",
+ "Diffusion"
+ ],
+ "hardware_req": "8GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 77,
+ "parameters_total_b": 12,
+ "parameters_active_b": 12,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/flux.svg"
+ },
+ {
+ "slug": "flux-1-dev",
+ "name": "Flux.1 Dev",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://blackforestlabs.ai",
+ "description": "Developer version of the powerful Flux image model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Black Forest Labs",
+ "Image Generation",
+ "AI",
+ "Diffusion"
+ ],
+ "hardware_req": "8GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 77,
+ "parameters_total_b": 12,
+ "parameters_active_b": 12,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/flux.svg"
+ },
+ {
+ "slug": "sdxl-1-0",
+ "name": "SDXL 1.0",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://stability.ai",
+ "description": "The benchmark for open source image generation.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Image Generation",
+ "AI",
+ "Diffusion",
+ "Stability AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 77,
+ "parameters_total_b": 6.6,
+ "parameters_active_b": 6.6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "sd-3-medium",
+ "name": "SD 3 Medium",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://stability.ai",
+ "description": "Stability AI's latest medium-sized image model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Image Generation",
+ "AI",
+ "Diffusion",
+ "Stability AI"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 77,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "stable-cascade",
+ "name": "Stable Cascade",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://stability.ai",
+ "description": "Efficient cascade architecture for high detail images.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Image Generation",
+ "AI",
+ "Diffusion",
+ "Stability AI"
+ ],
+ "hardware_req": "3GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 77,
+ "parameters_total_b": 3.6,
+ "parameters_active_b": 3.6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "internlm-2-5-7b",
+ "name": "InternLM 2.5 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://internlm.intern-ai.org.cn",
+ "description": "High performance 7B model with strong reasoning.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Shanghai AI Lab",
+ "LLM",
+ "AI",
+ "InternLM"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "internlm-2-5-20b",
+ "name": "InternLM 2.5 20B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://internlm.intern-ai.org.cn",
+ "description": "Balanced 20B model filling the gap between 7B and 70B.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Shanghai AI Lab",
+ "LLM",
+ "AI",
+ "InternLM"
+ ],
+ "hardware_req": "14GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 14,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 20,
+ "parameters_active_b": 20,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "baichuan-2-7b",
+ "name": "Baichuan 2 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.baichuan-ai.com",
+ "description": "Top tier Chinese-English bilingual model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Baichuan",
+ "LLM",
+ "AI",
+ "Baichuan Inc."
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "baichuan-2-13b",
+ "name": "Baichuan 2 13B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.baichuan-ai.com",
+ "description": "Larger variant of the popular Baichuan series.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Baichuan",
+ "LLM",
+ "AI",
+ "Baichuan Inc."
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "minicpm-2-4b",
+ "name": "MiniCPM 2.4B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/OpenBMB/MiniCPM",
+ "description": "High efficiency edge model optimization.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "OpenBMB",
+ "Mobile",
+ "LLM",
+ "Edge",
+ "AI"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2.4,
+ "parameters_active_b": 2.4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "minicpm-v-2-6",
+ "name": "MiniCPM V 2.6",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/OpenBMB/MiniCPM",
+ "description": "Powerful multimodal model for mobile devices.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "OpenBMB",
+ "Mobile",
+ "LLM",
+ "Edge",
+ "AI"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "exaone-3-0-7-8b",
+ "name": "Exaone 3.0 7.8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.lgresearch.ai",
+ "description": "LG's competitive open model entry.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LG",
+ "LG AI Research",
+ "LLM",
+ "AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7.8,
+ "parameters_active_b": 7.8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "jamba-v0-1",
+ "name": "Jamba v0.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.ai21.com/jamba",
+ "description": "First production-grade Mamba-Transformer hybrid model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Hybrid",
+ "LLM",
+ "AI",
+ "Mamba",
+ "AI21 Labs"
+ ],
+ "hardware_req": "36GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 36,
+ "context_window_tokens": 256000,
+ "parameters_total_b": 52,
+ "parameters_active_b": 52,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "grok-1",
+ "name": "Grok 1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://x.ai",
+ "description": "Massive 314B parameter open weights model from xAI.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Grok",
+ "LLM",
+ "AI",
+ "xAI"
+ ],
+ "hardware_req": "220GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 220,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 314,
+ "parameters_active_b": 314,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/grok.svg"
+ },
+ {
+ "slug": "deepseek-vl-7b",
+ "name": "DeepSeek VL 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://deepseek.com",
+ "description": "Vision language model from DeepSeek.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "DeepSeek",
+ "Vision",
+ "AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": true
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "deepseek-vl-1-3b",
+ "name": "DeepSeek VL 1.3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://deepseek.com",
+ "description": "Small vision language model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "DeepSeek",
+ "Vision",
+ "AI"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1.3,
+ "parameters_active_b": 1.3,
+ "is_multimodal": true
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "whisper-large-v3",
+ "name": "Whisper Large v3",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/openai/whisper",
+ "description": "State-of-the-art automatic speech recognition model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "ASR",
+ "Audio",
+ "OpenAI"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 0,
+ "parameters_total_b": 1.5,
+ "parameters_active_b": 1.5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "whisper-medium",
+ "name": "Whisper Key",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/openai/whisper",
+ "description": "Balanced speech recognition model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "ASR",
+ "Audio",
+ "OpenAI"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 0,
+ "parameters_total_b": 0.7,
+ "parameters_active_b": 0.7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "seamless-m4t-large",
+ "name": "Seamless M4T Large",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://ai.meta.com/research/seamless-communication/",
+ "description": "Massive multilingual translation and transcription model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "Meta",
+ "Audio",
+ "Translation"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 0,
+ "parameters_total_b": 2.3,
+ "parameters_active_b": 2.3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "starcoder-2-15b",
+ "name": "StarCoder 2 15B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/bigcode",
+ "description": "The successor to the original StarCoder, trained on The Stack v2.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "ServiceNow",
+ "BigCode",
+ "AI",
+ "Coding"
+ ],
+ "hardware_req": "10GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 16384,
+ "parameters_total_b": 15,
+ "parameters_active_b": 15,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "starcoder-2-7b",
+ "name": "StarCoder 2 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/bigcode",
+ "description": "Mid-sized coding model from the BigCode project.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "ServiceNow",
+ "BigCode",
+ "AI",
+ "Coding"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 16384,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "starcoder-2-3b",
+ "name": "StarCoder 2 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/bigcode",
+ "description": "Efficient coding assistant.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "ServiceNow",
+ "BigCode",
+ "AI",
+ "Coding"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 16384,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "llava-1-6-34b",
+ "name": "LLaVA 1.6 34B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llava-vl.github.io",
+ "description": "High performance large multimodal model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Multimodal",
+ "Vision",
+ "LLaVA Team",
+ "AI"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 24,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 34,
+ "parameters_active_b": 34,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "llava-1-6-13b",
+ "name": "LLaVA 1.6 13B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llava-vl.github.io",
+ "description": "Improved visual reasoning capabilities.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Multimodal",
+ "Vision",
+ "LLaVA Team",
+ "AI"
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "llava-1-6-7b",
+ "name": "LLaVA 1.6 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llava-vl.github.io",
+ "description": "Efficient multimodal model base on Vicuna.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Multimodal",
+ "Vision",
+ "LLaVA Team",
+ "AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "bakllava",
+ "name": "BakLLaVA",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://llava-vl.github.io",
+ "description": "Mistral-based LLaVA variant.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Multimodal",
+ "Vision",
+ "LLaVA Team",
+ "AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "bloom-176b",
+ "name": "BLOOM 176B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://bigscience.huggingface.co",
+ "description": "The world's largest open-multilingual language model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Multilingual",
+ "Open Science",
+ "BigScience",
+ "AI"
+ ],
+ "hardware_req": "123GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 123,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 176,
+ "parameters_active_b": 176,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "bloomz-176b",
+ "name": "BLOOMZ 176B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://bigscience.huggingface.co",
+ "description": "Instruction tuned version of BLOOM.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Multilingual",
+ "Open Science",
+ "BigScience",
+ "AI"
+ ],
+ "hardware_req": "123GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 123,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 176,
+ "parameters_active_b": 176,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "bloom-7b",
+ "name": "BLOOM 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://bigscience.huggingface.co",
+ "description": "Smaller variant of the BLOOM family.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Multilingual",
+ "Open Science",
+ "BigScience",
+ "AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "pythia-12b",
+ "name": "Pythia 12B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/EleutherAI/pythia",
+ "description": "Designed to interpret and analyze LLM training dynamics.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Research",
+ "EleutherAI",
+ "AI"
+ ],
+ "hardware_req": "8GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 12,
+ "parameters_active_b": 12,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "pythia-6-9b",
+ "name": "Pythia 6.9B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/EleutherAI/pythia",
+ "description": "Standard research model size.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Research",
+ "EleutherAI",
+ "AI"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 6.9,
+ "parameters_active_b": 6.9,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "opt-175b",
+ "name": "OPT 175B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/facebookresearch/metaseq",
+ "description": "Meta's Open Pre-trained Transformer, matching GPT-3 performance.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "Legacy",
+ "Meta"
+ ],
+ "hardware_req": "122GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 122,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 175,
+ "parameters_active_b": 175,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "opt-66b",
+ "name": "OPT 66B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/facebookresearch/metaseq",
+ "description": "Large scale OPT model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "Legacy",
+ "Meta"
+ ],
+ "hardware_req": "46GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 46,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 66,
+ "parameters_active_b": 66,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "opt-30b",
+ "name": "OPT 30B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/facebookresearch/metaseq",
+ "description": "Mid-range OPT model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "Legacy",
+ "Meta"
+ ],
+ "hardware_req": "21GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "h2o-danube-2-1-8b",
+ "name": "H2O Danube 2 1.8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://h2o.ai",
+ "description": "Highly efficient mobile-class model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "H2O",
+ "AI",
+ "H2O.ai"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 1.8,
+ "parameters_active_b": 1.8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "fuyu-8b",
+ "name": "Fuyu 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://adept.ai",
+ "description": "Simple architecture multimodal model for digital agents.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Multimodal",
+ "Adept",
+ "Agent",
+ "AI"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "nexusraven-v2-13b",
+ "name": "NexusRaven V2 13B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://nexusflow.ai",
+ "description": "Specialized in function calling and tool use.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "Nexusflow",
+ "Raven"
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "xverse-65b",
+ "name": "Xverse 65B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/xverse-ai",
+ "description": "Large multilingual model trained from scratch.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "Shenzhen Yuanxiang",
+ "Multilingual"
+ ],
+ "hardware_req": "46GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 46,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 65,
+ "parameters_active_b": 65,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "xverse-13b",
+ "name": "Xverse 13B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/xverse-ai",
+ "description": "Efficient multilingual model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "Shenzhen Yuanxiang",
+ "Multilingual"
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "aquila2-34b",
+ "name": "Aquila2 34B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/FlagAI-Open/FlagAI",
+ "description": "Strong performance on reasoning benchmarks.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "BAAI",
+ "AI"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 24,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 34,
+ "parameters_active_b": 34,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "telechat-12b",
+ "name": "TeleChat 12B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/Tele-AI/Telechat",
+ "description": "Telecommunications oriented LLM.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Telecom",
+ "China Telecom",
+ "AI"
+ ],
+ "hardware_req": "8GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 8,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 12,
+ "parameters_active_b": 12,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "orion-14b",
+ "name": "Orion 14B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/OrionStarAI/Orion",
+ "description": "Chat and conversational model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Orion",
+ "AI",
+ "OrionStar"
+ ],
+ "hardware_req": "10GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "seallm-7b-v2-5",
+ "name": "SeaLLM 7B v2.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/SeaLLMs",
+ "description": "State-of-the-art multilingual LLM for Southeast Asian languages.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Southeast Asia",
+ "AI",
+ "Alibaba (sea-lion)"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "openbiollm-8b",
+ "name": "OpenBioLLM 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/saama",
+ "description": "Advanced medical LLM outperforming GPT-4 on biomedical benchmarks.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Saama AI",
+ "AI",
+ "LLM",
+ "Medical",
+ "Biology"
+ ],
+ "hardware_req": "6GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "openbiollm-70b",
+ "name": "OpenBioLLM 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/saama",
+ "description": "Massive scale biomedical research model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Saama AI",
+ "AI",
+ "LLM",
+ "Medical",
+ "Biology"
+ ],
+ "hardware_req": "49GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "meditron-70b",
+ "name": "Meditron 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/epfl-llm",
+ "description": "Open-access LLM adapted to the medical domain.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "EPFL",
+ "AI",
+ "Medical"
+ ],
+ "hardware_req": "49GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "meditron-7b",
+ "name": "Meditron 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/epfl-llm",
+ "description": "Efficient medical assistant model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "EPFL",
+ "AI",
+ "Medical"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "gorilla-openfunctions-v2",
+ "name": "Gorilla OpenFunctions v2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://gorilla.cs.berkeley.edu",
+ "description": "The best open source model for API function calling.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "AI",
+ "LLM",
+ "Agents",
+ "Berkeley",
+ "API"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "wizardlm-2-8x22b",
+ "name": "WizardLM 2 8x22B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/nlpxucan/WizardLM",
+ "description": "Top-tier reasoning model from Microsoft using Evol-Instruct.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Microsoft",
+ "AI",
+ "Evol-Instruct"
+ ],
+ "hardware_req": "99GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 99,
+ "context_window_tokens": 65536,
+ "parameters_total_b": 141,
+ "parameters_active_b": 141,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "wizardlm-2-7b",
+ "name": "WizardLM 2 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/nlpxucan/WizardLM",
+ "description": "Fastest and most capable 7B model for complex instructions.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Microsoft",
+ "AI",
+ "Evol-Instruct"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 32000,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "vicuna-13b-v1-5",
+ "name": "Vicuna 13B v1.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://lmsys.org",
+ "description": "The classic open chat model based on Llama 2.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "LMSYS",
+ "Chatbot"
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 16384,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "vicuna-7b-v1-5",
+ "name": "Vicuna 7B v1.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://lmsys.org",
+ "description": "Highly efficient chat model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "LMSYS",
+ "Chatbot"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 16384,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "zephyr-7b-beta",
+ "name": "Zephyr 7B Beta",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/HuggingFaceH4",
+ "description": "Pioneered DPO (Direct Preference Optimization) for better alignment without RLHF.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "Hugging Face H4",
+ "DPO"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 8192,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "zephyr-141b-a39b",
+ "name": "Zephyr 141B A39B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/HuggingFaceH4",
+ "description": "Experimental DPO fine-tune of Mixtral 8x22B.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "Hugging Face H4",
+ "DPO"
+ ],
+ "hardware_req": "99GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 99,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 141,
+ "parameters_active_b": 141,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "openelm-3b",
+ "name": "OpenELM 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/apple/corenet",
+ "description": "Apple's efficiently layered open model for devices.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "On-Device",
+ "Apple"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "openelm-1-1b",
+ "name": "OpenELM 1.1B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/apple/corenet",
+ "description": "Tiny Apple model for extreme edge cases.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "On-Device",
+ "Apple"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 1.1,
+ "parameters_active_b": 1.1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "mamba-2-8b",
+ "name": "Mamba 2.8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/state-spaces/mamba",
+ "description": "Linear-time sequence modeling with state space architecture.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Cartesia",
+ "AI",
+ "SSM",
+ "LLM",
+ "Non-Transformer"
+ ],
+ "hardware_req": "2GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 0,
+ "parameters_total_b": 2.8,
+ "parameters_active_b": 2.8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "mamba-1-4b",
+ "name": "Mamba 1.4B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/state-spaces/mamba",
+ "description": "Efficient non-transformer architecture.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Cartesia",
+ "AI",
+ "SSM",
+ "LLM",
+ "Non-Transformer"
+ ],
+ "hardware_req": "1GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 0,
+ "parameters_total_b": 1.4,
+ "parameters_active_b": 1.4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "rwkv-6-14b",
+ "name": "RWKV 6 14B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.rwkv.com",
+ "description": "RNN with Transformer-level performance and infinite context potential.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "RNN",
+ "BlinkDL"
+ ],
+ "hardware_req": "10GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 0,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "rwkv-6-7b",
+ "name": "RWKV 6 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.rwkv.com",
+ "description": "Efficient RNN language model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "AI",
+ "RNN",
+ "BlinkDL"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 0,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "cerebras-gpt-13b",
+ "name": "Cerebras GPT 13B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://www.cerebras.net",
+ "description": "Trained on the massive CS-2 wafer-scale engine.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "LLM",
+ "Cerebras",
+ "AI",
+ "Wafer-Scale"
+ ],
+ "hardware_req": "9GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 2048,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361"
+ },
+ {
+ "slug": "qwen-audio-chat",
+ "name": "Qwen-Audio-Chat",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://github.com/QwenLM/Qwen-Audio",
+ "description": "Universal audio understanding model.",
+ "pros": [
+ "Open Source",
+ "High Performance",
+ "Run Locally"
+ ],
+ "cons": [
+ "Requires GPU inference",
+ "Management complexity"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "Open Weights",
+ "tags": [
+ "Audio",
+ "AI",
+ "Multimodal",
+ "Alibaba Cloud"
+ ],
+ "hardware_req": "5GB+ VRAM",
+ "hosting_type": "both",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 0,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-7b-instruct",
+ "name": "Qwen2.5 7B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-7B-Instruct. 1073 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1073,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-7B",
+ "base_model:finetune:Qwen/Qwen2.5-7B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-7b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-0.6b",
+ "name": "Qwen3 0.6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-0.6B",
+ "description": "Open source model Qwen/Qwen3-0.6B. 1083 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1083,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-0.6B-Base",
+ "base_model:finetune:Qwen/Qwen3-0.6B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-0.6b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "gpt2",
+ "name": "Gpt2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai-community/gpt2",
+ "description": "Open source model openai-community/gpt2. 3114 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3114,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "tflite",
+ "rust",
+ "onnx",
+ "safetensors",
+ "gpt2",
+ "exbert",
+ "en",
+ "doi:10.57967/hf/0039",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gpt2"
+ },
+ {
+ "slug": "qwen2.5-1.5b-instruct",
+ "name": "Qwen2.5 1.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-1.5B-Instruct. 617 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 617,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-1.5B",
+ "base_model:finetune:Qwen/Qwen2.5-1.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-1.5b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-3b-instruct",
+ "name": "Qwen2.5 3B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-3B-Instruct. 404 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 404,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-3B",
+ "base_model:finetune:Qwen/Qwen2.5-3B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-3b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "llama-3.1-8b-instruct",
+ "name": "Llama 3.1 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.1-8B-Instruct. 5467 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 5467,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "base_model:meta-llama/Llama-3.1-8B",
+ "base_model:finetune:meta-llama/Llama-3.1-8B",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.1-8b-instruct",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "gpt-oss-20b",
+ "name": "Gpt Oss 20B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai/gpt-oss-20b",
+ "description": "Open source model openai/gpt-oss-20b. 4378 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4378,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gpt_oss",
+ "vllm",
+ "conversational",
+ "arxiv:2508.10925",
+ "endpoints_compatible",
+ "8-bit",
+ "mxfp4",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 14,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 20,
+ "parameters_active_b": 20,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gpt-oss-20b"
+ },
+ {
+ "slug": "qwen2.5-0.5b-instruct",
+ "name": "Qwen2.5 0.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-0.5B-Instruct. 463 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 463,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-0.5B",
+ "base_model:finetune:Qwen/Qwen2.5-0.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-0.5b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-4b",
+ "name": "Qwen3 4B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B",
+ "description": "Open source model Qwen/Qwen3-4B. 552 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 552,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-4B-Base",
+ "base_model:finetune:Qwen/Qwen3-4B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-4b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-8b",
+ "name": "Qwen3 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-8B",
+ "description": "Open source model Qwen/Qwen3-8B. 940 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 940,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-8B-Base",
+ "base_model:finetune:Qwen/Qwen3-8B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-8b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-32b-instruct",
+ "name": "Qwen2.5 32B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-32B-Instruct. 328 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 328,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-32B",
+ "base_model:finetune:Qwen/Qwen2.5-32B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-32b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "opt-125m",
+ "name": "Opt 125M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/facebook/opt-125m",
+ "description": "Open source model facebook/opt-125m. 233 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 233,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "opt",
+ "en",
+ "arxiv:2205.01068",
+ "arxiv:2005.14165",
+ "text-generation-inference",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "opt-125m"
+ },
+ {
+ "slug": "qwen3-1.7b",
+ "name": "Qwen3 1.7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-1.7B",
+ "description": "Open source model Qwen/Qwen3-1.7B. 422 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 422,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-1.7B-Base",
+ "base_model:finetune:Qwen/Qwen3-1.7B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-1.7b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "tiny-qwen2forcausallm-2.5",
+ "name": "Tiny Qwen2Forcausallm 2.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
+ "description": "Open source model trl-internal-testing/tiny-Qwen2ForCausalLM-2.5. 3 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "trl",
+ "conversational",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "tiny-qwen2forcausallm-2.5",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "dolphin-2.9.1-yi-1.5-34b",
+ "name": "Dolphin 2.9.1 Yi 1.5 34B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/dphn/dolphin-2.9.1-yi-1.5-34b",
+ "description": "Open source model dphn/dolphin-2.9.1-yi-1.5-34b. 54 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 54,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "generated_from_trainer",
+ "axolotl",
+ "conversational",
+ "dataset:cognitivecomputations/Dolphin-2.9",
+ "dataset:teknium/OpenHermes-2.5",
+ "dataset:m-a-p/CodeFeedback-Filtered-Instruction",
+ "dataset:cognitivecomputations/dolphin-coder",
+ "dataset:cognitivecomputations/samantha-data",
+ "dataset:microsoft/orca-math-word-problems-200k",
+ "dataset:Locutusque/function-calling-chatml",
+ "dataset:internlm/Agent-FLAN",
+ "base_model:01-ai/Yi-1.5-34B",
+ "base_model:finetune:01-ai/Yi-1.5-34B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 24,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 34,
+ "parameters_active_b": 34,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "dolphin-2.9.1-yi-1.5-34b"
+ },
+ {
+ "slug": "qwen3-embedding-0.6b",
+ "name": "Qwen3 Embedding 0.6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Embedding-0.6B",
+ "description": "Open source model Qwen/Qwen3-Embedding-0.6B. 879 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 879,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "sentence-transformers",
+ "safetensors",
+ "qwen3",
+ "transformers",
+ "sentence-similarity",
+ "feature-extraction",
+ "text-embeddings-inference",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-0.6B-Base",
+ "base_model:finetune:Qwen/Qwen3-0.6B-Base",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-embedding-0.6b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "gpt-oss-120b",
+ "name": "Gpt Oss 120B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai/gpt-oss-120b",
+ "description": "Open source model openai/gpt-oss-120b. 4503 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4503,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gpt_oss",
+ "vllm",
+ "conversational",
+ "arxiv:2508.10925",
+ "endpoints_compatible",
+ "8-bit",
+ "mxfp4",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 84,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 120,
+ "parameters_active_b": 120,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gpt-oss-120b"
+ },
+ {
+ "slug": "qwen3-4b-instruct-2507",
+ "name": "Qwen3 4B Instruct 2507",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507",
+ "description": "Open source model Qwen/Qwen3-4B-Instruct-2507. 730 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 730,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-4b-instruct-2507",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "moondream2",
+ "name": "Moondream2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/vikhyatk/moondream2",
+ "description": "Open source model vikhyatk/moondream2. 1373 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1373,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "moondream1",
+ "image-text-to-text",
+ "custom_code",
+ "doi:10.57967/hf/6762",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "moondream2"
+ },
+ {
+ "slug": "llama-3.2-1b-instruct",
+ "name": "Llama 3.2 1B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.2-1B-Instruct. 1292 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1292,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "arxiv:2405.16406",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.2-1b-instruct",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen2-1.5b-instruct",
+ "name": "Qwen2 1.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2-1.5B-Instruct. 158 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 158,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2-1.5b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-0.5b-instruct",
+ "name": "Qwen2.5 Coder 0.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-0.5B-Instruct. 64 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 64,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-0.5B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-0.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-0.5b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "kimi-k2.5",
+ "name": "Kimi K2.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mlx-community/Kimi-K2.5",
+ "description": "Open source model mlx-community/Kimi-K2.5. 28 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 28,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "mlx",
+ "safetensors",
+ "kimi_k25",
+ "conversational",
+ "custom_code",
+ "base_model:moonshotai/Kimi-K2.5",
+ "base_model:quantized:moonshotai/Kimi-K2.5",
+ "4-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "kimi-k2.5"
+ },
+ {
+ "slug": "mistral-7b-instruct-v0.2",
+ "name": "Mistral 7B Instruct V0.2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
+ "description": "Open source model mistralai/Mistral-7B-Instruct-v0.2. 3075 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3075,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "mistral",
+ "finetuned",
+ "mistral-common",
+ "conversational",
+ "arxiv:2310.06825",
+ "text-generation-inference",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "mistral-7b-instruct-v0.2",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "qwen3-30b-a3b-instruct-2507",
+ "name": "Qwen3 30B A3B Instruct 2507",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507",
+ "description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507. 766 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 766,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2402.17463",
+ "arxiv:2407.02490",
+ "arxiv:2501.15383",
+ "arxiv:2404.06654",
+ "arxiv:2505.09388",
+ "eval-results",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-30b-a3b-instruct-2507",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "llm-jp-3-3.7b-instruct",
+ "name": "Llm Jp 3 3.7B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/llm-jp/llm-jp-3-3.7b-instruct",
+ "description": "Open source model llm-jp/llm-jp-3-3.7b-instruct. 13 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 13,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "en",
+ "ja",
+ "text-generation-inference",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llm-jp-3-3.7b-instruct"
+ },
+ {
+ "slug": "llama-3.2-3b-instruct",
+ "name": "Llama 3.2 3B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.2-3B-Instruct. 1986 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1986,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "arxiv:2405.16406",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.2-3b-instruct",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "distilgpt2",
+ "name": "Distilgpt2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/distilbert/distilgpt2",
+ "description": "Open source model distilbert/distilgpt2. 609 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 609,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "tflite",
+ "rust",
+ "coreml",
+ "safetensors",
+ "gpt2",
+ "exbert",
+ "en",
+ "dataset:openwebtext",
+ "arxiv:1910.01108",
+ "arxiv:2201.08542",
+ "arxiv:2203.12574",
+ "arxiv:1910.09700",
+ "arxiv:1503.02531",
+ "model-index",
+ "co2_eq_emissions",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "distilgpt2"
+ },
+ {
+ "slug": "qwen3-embedding-8b",
+ "name": "Qwen3 Embedding 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Embedding-8B",
+ "description": "Open source model Qwen/Qwen3-Embedding-8B. 584 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 584,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "sentence-transformers",
+ "safetensors",
+ "qwen3",
+ "transformers",
+ "sentence-similarity",
+ "feature-extraction",
+ "text-embeddings-inference",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-8B-Base",
+ "base_model:finetune:Qwen/Qwen3-8B-Base",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-embedding-8b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "meta-llama-3-8b",
+ "name": "Meta Llama 3 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B",
+ "description": "Open source model meta-llama/Meta-Llama-3-8B. 6458 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 6458,
+ "language": "Python",
+ "license": "llama3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "meta-llama-3-8b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "tinyllama-1.1b-chat-v1.0",
+ "name": "Tinyllama 1.1B Chat V1.0",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
+ "description": "Open source model TinyLlama/TinyLlama-1.1B-Chat-v1.0. 1526 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1526,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "en",
+ "dataset:cerebras/SlimPajama-627B",
+ "dataset:bigcode/starcoderdata",
+ "dataset:HuggingFaceH4/ultrachat_200k",
+ "dataset:HuggingFaceH4/ultrafeedback_binarized",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "tinyllama-1.1b-chat-v1.0"
+ },
+ {
+ "slug": "glm-4.7-flash",
+ "name": "Glm 4.7 Flash",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/zai-org/GLM-4.7-Flash",
+ "description": "Open source model zai-org/GLM-4.7-Flash. 1538 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1538,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "conversational",
+ "en",
+ "zh",
+ "arxiv:2508.06471",
+ "eval-results",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "glm-4.7-flash"
+ },
+ {
+ "slug": "llama-3.2-1b",
+ "name": "Llama 3.2 1B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.2-1B",
+ "description": "Open source model meta-llama/Llama-3.2-1B. 2295 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2295,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "arxiv:2405.16406",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.2-1b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen3-32b",
+ "name": "Qwen3 32B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-32B",
+ "description": "Open source model Qwen/Qwen3-32B. 656 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 656,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-32b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "llama-3.2-1b-instruct-fp8-dynamic",
+ "name": "Llama 3.2 1B Instruct Fp8 Dynamic",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic",
+ "description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic. 3 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "llama",
+ "fp8",
+ "vllm",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.2-1B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.2-1b-instruct-fp8-dynamic",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-1.5b-instruct",
+ "name": "Qwen2.5 Coder 1.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-1.5B-Instruct. 106 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 106,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-1.5B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-1.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-1.5b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "meta-llama-3-8b-instruct",
+ "name": "Meta Llama 3 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct",
+ "description": "Open source model meta-llama/Meta-Llama-3-8B-Instruct. 4380 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4380,
+ "language": "Python",
+ "license": "llama3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "meta-llama-3-8b-instruct",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "gemma-3-1b-it",
+ "name": "Gemma 3 1B It",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/google/gemma-3-1b-it",
+ "description": "Open source model google/gemma-3-1b-it. 842 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 842,
+ "language": "Python",
+ "license": "gemma",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gemma3_text",
+ "conversational",
+ "arxiv:1905.07830",
+ "arxiv:1905.10044",
+ "arxiv:1911.11641",
+ "arxiv:1904.09728",
+ "arxiv:1705.03551",
+ "arxiv:1911.01547",
+ "arxiv:1907.10641",
+ "arxiv:1903.00161",
+ "arxiv:2009.03300",
+ "arxiv:2304.06364",
+ "arxiv:2103.03874",
+ "arxiv:2110.14168",
+ "arxiv:2311.12022",
+ "arxiv:2108.07732",
+ "arxiv:2107.03374",
+ "arxiv:2210.03057",
+ "arxiv:2106.03193",
+ "arxiv:1910.11856",
+ "arxiv:2502.12404",
+ "arxiv:2502.21228",
+ "arxiv:2404.16816",
+ "arxiv:2104.12756",
+ "arxiv:2311.16502",
+ "arxiv:2203.10244",
+ "arxiv:2404.12390",
+ "arxiv:1810.12440",
+ "arxiv:1908.02660",
+ "arxiv:2312.11805",
+ "base_model:google/gemma-3-1b-pt",
+ "base_model:finetune:google/gemma-3-1b-pt",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gemma-3-1b-it",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "phi-2",
+ "name": "Phi 2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/phi-2",
+ "description": "Open source model microsoft/phi-2. 3425 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3425,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi",
+ "nlp",
+ "code",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "phi-2",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-7b-instruct",
+ "name": "Qwen2.5 Coder 7B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct. 646 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 646,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-7B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-7B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-7b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-7b",
+ "name": "Qwen2.5 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-7B",
+ "description": "Open source model Qwen/Qwen2.5-7B. 264 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 264,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-7b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "deepseek-r1-distill-qwen-1.5b",
+ "name": "Deepseek R1 Distill Qwen 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B. 1446 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1446,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-distill-qwen-1.5b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "deepseek-v3",
+ "name": "Deepseek V3",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3",
+ "description": "Open source model deepseek-ai/DeepSeek-V3. 4024 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4024,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v3",
+ "conversational",
+ "custom_code",
+ "arxiv:2412.19437",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-v3"
+ },
+ {
+ "slug": "gpt2-large",
+ "name": "Gpt2 Large",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai-community/gpt2-large",
+ "description": "Open source model openai-community/gpt2-large. 344 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 344,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "rust",
+ "onnx",
+ "safetensors",
+ "gpt2",
+ "en",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gpt2-large"
+ },
+ {
+ "slug": "glm-4.7-flash-mlx-8bit",
+ "name": "Glm 4.7 Flash Mlx 8Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-8bit",
+ "description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-8bit. 9 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 9,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "mlx",
+ "conversational",
+ "en",
+ "zh",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "8-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "glm-4.7-flash-mlx-8bit"
+ },
+ {
+ "slug": "glm-4.7-flash-mlx-6bit",
+ "name": "Glm 4.7 Flash Mlx 6Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-6bit",
+ "description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-6bit. 7 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 7,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "mlx",
+ "conversational",
+ "en",
+ "zh",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "6-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "glm-4.7-flash-mlx-6bit"
+ },
+ {
+ "slug": "qwen3-0.6b-fp8",
+ "name": "Qwen3 0.6B Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-0.6B-FP8",
+ "description": "Open source model Qwen/Qwen3-0.6B-FP8. 56 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 56,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-0.6B",
+ "base_model:quantized:Qwen/Qwen3-0.6B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-0.6b-fp8",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "llama-3.1-8b",
+ "name": "Llama 3.1 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.1-8B",
+ "description": "Open source model meta-llama/Llama-3.1-8B. 2065 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2065,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.1-8b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "pythia-160m",
+ "name": "Pythia 160M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/EleutherAI/pythia-160m",
+ "description": "Open source model EleutherAI/pythia-160m. 38 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 38,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "gpt_neox",
+ "causal-lm",
+ "pythia",
+ "en",
+ "dataset:EleutherAI/pile",
+ "arxiv:2304.01373",
+ "arxiv:2101.00027",
+ "arxiv:2201.07311",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "pythia-160m"
+ },
+ {
+ "slug": "deepseek-r1-distill-qwen-32b",
+ "name": "Deepseek R1 Distill Qwen 32B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-32B. 1517 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1517,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-distill-qwen-32b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "hunyuanocr",
+ "name": "Hunyuanocr",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tencent/HunyuanOCR",
+ "description": "Open source model tencent/HunyuanOCR. 553 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 553,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "hunyuan_vl",
+ "ocr",
+ "hunyuan",
+ "vision-language",
+ "image-to-text",
+ "1B",
+ "end-to-end",
+ "image-text-to-text",
+ "conversational",
+ "multilingual",
+ "arxiv:2511.19575",
+ "base_model:tencent/HunyuanOCR",
+ "base_model:finetune:tencent/HunyuanOCR",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "hunyuanocr"
+ },
+ {
+ "slug": "qwen3-30b-a3b",
+ "name": "Qwen3 30B A3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B",
+ "description": "Open source model Qwen/Qwen3-30B-A3B. 855 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 855,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-30B-A3B-Base",
+ "base_model:finetune:Qwen/Qwen3-30B-A3B-Base",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-30b-a3b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-0.5b",
+ "name": "Qwen2.5 0.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-0.5B",
+ "description": "Open source model Qwen/Qwen2.5-0.5B. 372 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 372,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-0.5b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-32b-instruct-awq",
+ "name": "Qwen2.5 32B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-32B-Instruct-AWQ. 94 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 94,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-32B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-32B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-32b-instruct-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "nvidia-nemotron-3-nano-30b-a3b-fp8",
+ "name": "Nvidia Nemotron 3 Nano 30B A3B Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8",
+ "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8. 284 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 284,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "nemotron_h",
+ "feature-extraction",
+ "nvidia",
+ "pytorch",
+ "conversational",
+ "custom_code",
+ "en",
+ "es",
+ "fr",
+ "de",
+ "ja",
+ "it",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v1",
+ "dataset:nvidia/Nemotron-CC-v2",
+ "dataset:nvidia/Nemotron-Pretraining-SFT-v1",
+ "dataset:nvidia/Nemotron-CC-Math-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v2",
+ "dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
+ "dataset:nvidia/Nemotron-CC-v2.1",
+ "dataset:nvidia/Nemotron-CC-Code-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
+ "dataset:nvidia/Nemotron-Competitive-Programming-v1",
+ "dataset:nvidia/Nemotron-Math-v2",
+ "dataset:nvidia/Nemotron-Agentic-v1",
+ "dataset:nvidia/Nemotron-Math-Proofs-v1",
+ "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
+ "dataset:nvidia/Nemotron-Science-v1",
+ "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
+ "arxiv:2512.20848",
+ "arxiv:2512.20856",
+ "base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "eval-results",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "nvidia-nemotron-3-nano-30b-a3b-fp8"
+ },
+ {
+ "slug": "qwen2.5-14b-instruct",
+ "name": "Qwen2.5 14B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-14B-Instruct. 312 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 312,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-14B",
+ "base_model:finetune:Qwen/Qwen2.5-14B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-14b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "nvidia-nemotron-3-nano-30b-a3b-bf16",
+ "name": "Nvidia Nemotron 3 Nano 30B A3B Bf16",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16. 634 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 634,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "nemotron_h",
+ "feature-extraction",
+ "nvidia",
+ "pytorch",
+ "conversational",
+ "custom_code",
+ "en",
+ "es",
+ "fr",
+ "de",
+ "ja",
+ "it",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v1",
+ "dataset:nvidia/Nemotron-CC-v2",
+ "dataset:nvidia/Nemotron-Pretraining-SFT-v1",
+ "dataset:nvidia/Nemotron-CC-Math-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v2",
+ "dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
+ "dataset:nvidia/Nemotron-CC-v2.1",
+ "dataset:nvidia/Nemotron-CC-Code-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
+ "dataset:nvidia/Nemotron-Competitive-Programming-v1",
+ "dataset:nvidia/Nemotron-Math-v2",
+ "dataset:nvidia/Nemotron-Agentic-v1",
+ "dataset:nvidia/Nemotron-Math-Proofs-v1",
+ "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
+ "dataset:nvidia/Nemotron-Science-v1",
+ "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
+ "arxiv:2512.20848",
+ "arxiv:2512.20856",
+ "eval-results",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "nvidia-nemotron-3-nano-30b-a3b-bf16"
+ },
+ {
+ "slug": "openelm-1_1b-instruct",
+ "name": "Openelm 1_1B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/apple/OpenELM-1_1B-Instruct",
+ "description": "Open source model apple/OpenELM-1_1B-Instruct. 72 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 72,
+ "language": "Python",
+ "license": "apple-amlr",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "openelm",
+ "custom_code",
+ "arxiv:2404.14619",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "openelm-1_1b-instruct"
+ },
+ {
+ "slug": "tiny-random-llamaforcausallm",
+ "name": "Tiny Random Llamaforcausallm",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/hmellor/tiny-random-LlamaForCausalLM",
+ "description": "Open source model hmellor/tiny-random-LlamaForCausalLM. 0 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "tiny-random-llamaforcausallm",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen3-next-80b-a3b-instruct",
+ "name": "Qwen3 Next 80B A3B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Instruct",
+ "description": "Open source model Qwen/Qwen3-Next-80B-A3B-Instruct. 937 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 937,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_next",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2404.06654",
+ "arxiv:2505.09388",
+ "arxiv:2501.15383",
+ "eval-results",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 56,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 80,
+ "parameters_active_b": 80,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-next-80b-a3b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "h2ovl-mississippi-800m",
+ "name": "H2Ovl Mississippi 800M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/h2oai/h2ovl-mississippi-800m",
+ "description": "Open source model h2oai/h2ovl-mississippi-800m. 39 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 39,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "h2ovl_chat",
+ "feature-extraction",
+ "gpt",
+ "llm",
+ "multimodal large language model",
+ "ocr",
+ "conversational",
+ "custom_code",
+ "en",
+ "arxiv:2410.13611",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "h2ovl-mississippi-800m"
+ },
+ {
+ "slug": "bloomz-560m",
+ "name": "Bloomz 560M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/bigscience/bloomz-560m",
+ "description": "Open source model bigscience/bloomz-560m. 137 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 137,
+ "language": "Python",
+ "license": "bigscience-bloom-rail-1.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tensorboard",
+ "safetensors",
+ "bloom",
+ "ak",
+ "ar",
+ "as",
+ "bm",
+ "bn",
+ "ca",
+ "code",
+ "en",
+ "es",
+ "eu",
+ "fon",
+ "fr",
+ "gu",
+ "hi",
+ "id",
+ "ig",
+ "ki",
+ "kn",
+ "lg",
+ "ln",
+ "ml",
+ "mr",
+ "ne",
+ "nso",
+ "ny",
+ "or",
+ "pa",
+ "pt",
+ "rn",
+ "rw",
+ "sn",
+ "st",
+ "sw",
+ "ta",
+ "te",
+ "tn",
+ "ts",
+ "tum",
+ "tw",
+ "ur",
+ "vi",
+ "wo",
+ "xh",
+ "yo",
+ "zh",
+ "zu",
+ "dataset:bigscience/xP3",
+ "arxiv:2211.01786",
+ "model-index",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "bloomz-560m"
+ },
+ {
+ "slug": "qwen2.5-1.5b-quantized.w8a8",
+ "name": "Qwen2.5 1.5B Quantized.W8A8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/RedHatAI/Qwen2.5-1.5B-quantized.w8a8",
+ "description": "Open source model RedHatAI/Qwen2.5-1.5B-quantized.w8a8. 2 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "neuralmagic",
+ "llmcompressor",
+ "conversational",
+ "en",
+ "base_model:Qwen/Qwen2.5-1.5B",
+ "base_model:quantized:Qwen/Qwen2.5-1.5B",
+ "8-bit",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-1.5b-quantized.w8a8",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "h2ovl-mississippi-2b",
+ "name": "H2Ovl Mississippi 2B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/h2oai/h2ovl-mississippi-2b",
+ "description": "Open source model h2oai/h2ovl-mississippi-2b. 40 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 40,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "h2ovl_chat",
+ "feature-extraction",
+ "gpt",
+ "llm",
+ "multimodal large language model",
+ "ocr",
+ "conversational",
+ "custom_code",
+ "en",
+ "arxiv:2410.13611",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "h2ovl-mississippi-2b"
+ },
+ {
+ "slug": "llava-v1.5-7b",
+ "name": "Llava V1.5 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/liuhaotian/llava-v1.5-7b",
+ "description": "Open source model liuhaotian/llava-v1.5-7b. 537 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 537,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "llava",
+ "image-text-to-text",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llava-v1.5-7b"
+ },
+ {
+ "slug": "t5-3b",
+ "name": "T5 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/google-t5/t5-3b",
+ "description": "Open source model google-t5/t5-3b. 51 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 51,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "safetensors",
+ "t5",
+ "summarization",
+ "translation",
+ "en",
+ "fr",
+ "ro",
+ "de",
+ "multilingual",
+ "dataset:c4",
+ "arxiv:1805.12471",
+ "arxiv:1708.00055",
+ "arxiv:1704.05426",
+ "arxiv:1606.05250",
+ "arxiv:1808.09121",
+ "arxiv:1810.12885",
+ "arxiv:1905.10044",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "t5-3b"
+ },
+ {
+ "slug": "qwen2.5-14b-instruct-awq",
+ "name": "Qwen2.5 14B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-14B-Instruct-AWQ. 27 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 27,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-14B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-14B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-14b-instruct-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "llama-3.2-3b",
+ "name": "Llama 3.2 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.2-3B",
+ "description": "Open source model meta-llama/Llama-3.2-3B. 697 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 697,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "arxiv:2405.16406",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.2-3b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "phi-3-mini-4k-instruct-gptq-4bit",
+ "name": "Phi 3 Mini 4K Instruct Gptq 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/kaitchup/Phi-3-mini-4k-instruct-gptq-4bit",
+ "description": "Open source model kaitchup/Phi-3-mini-4k-instruct-gptq-4bit. 2 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3",
+ "conversational",
+ "custom_code",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "gptq",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "phi-3-mini-4k-instruct-gptq-4bit",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "qwen2.5-72b-instruct-awq",
+ "name": "Qwen2.5 72B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-72B-Instruct-AWQ. 74 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 74,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-72B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-72B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 50,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 72,
+ "parameters_active_b": 72,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-72b-instruct-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "smollm2-135m",
+ "name": "Smollm2 135M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M",
+ "description": "Open source model HuggingFaceTB/SmolLM2-135M. 166 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 166,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "en",
+ "arxiv:2502.02737",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "smollm2-135m"
+ },
+ {
+ "slug": "llama-3.3-70b-instruct",
+ "name": "Llama 3.3 70B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.3-70B-Instruct. 2658 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2658,
+ "language": "Python",
+ "license": "llama3.3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "de",
+ "arxiv:2204.05149",
+ "base_model:meta-llama/Llama-3.1-70B",
+ "base_model:finetune:meta-llama/Llama-3.1-70B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.3-70b-instruct",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen3-30b-a3b-instruct-2507-fp8",
+ "name": "Qwen3 30B A3B Instruct 2507 Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
+ "description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507-FP8. 112 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 112,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-30B-A3B-Instruct-2507",
+ "base_model:quantized:Qwen/Qwen3-30B-A3B-Instruct-2507",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-30b-a3b-instruct-2507-fp8",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-32b-instruct",
+ "name": "Qwen2.5 Coder 32B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct. 1995 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1995,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-32B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-32B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-32b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-235b-a22b-instruct-2507-fp8",
+ "name": "Qwen3 235B A22B Instruct 2507 Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-235B-A22B-Instruct-2507-FP8",
+ "description": "Open source model Qwen/Qwen3-235B-A22B-Instruct-2507-FP8. 145 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 145,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-235B-A22B-Instruct-2507",
+ "base_model:quantized:Qwen/Qwen3-235B-A22B-Instruct-2507",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 164,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 235,
+ "parameters_active_b": 235,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-235b-a22b-instruct-2507-fp8",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "deepseek-r1-distill-qwen-7b",
+ "name": "Deepseek R1 Distill Qwen 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-7B. 787 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 787,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-distill-qwen-7b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "phi-3-mini-4k-instruct",
+ "name": "Phi 3 Mini 4K Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct",
+ "description": "Open source model microsoft/Phi-3-mini-4k-instruct. 1386 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1386,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3",
+ "nlp",
+ "code",
+ "conversational",
+ "custom_code",
+ "en",
+ "fr",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "phi-3-mini-4k-instruct",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "qwen3-14b",
+ "name": "Qwen3 14B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-14B",
+ "description": "Open source model Qwen/Qwen3-14B. 366 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 366,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-14B-Base",
+ "base_model:finetune:Qwen/Qwen3-14B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-14b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-1.5b",
+ "name": "Qwen2.5 Coder 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B",
+ "description": "Open source model Qwen/Qwen2.5-Coder-1.5B. 81 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 81,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "qwen",
+ "qwen-coder",
+ "codeqwen",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-1.5B",
+ "base_model:finetune:Qwen/Qwen2.5-1.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-1.5b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "llama-3.1-70b-instruct",
+ "name": "Llama 3.1 70B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.1-70B-Instruct. 890 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 890,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "base_model:meta-llama/Llama-3.1-70B",
+ "base_model:finetune:meta-llama/Llama-3.1-70B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.1-70b-instruct",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "hunyuanimage-3.0",
+ "name": "Hunyuanimage 3.0",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tencent/HunyuanImage-3.0",
+ "description": "Open source model tencent/HunyuanImage-3.0. 640 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 640,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "hunyuan_image_3_moe",
+ "text-to-image",
+ "custom_code",
+ "arxiv:2509.23951",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "hunyuanimage-3.0"
+ },
+ {
+ "slug": "qwen2.5-coder-7b-instruct-awq",
+ "name": "Qwen2.5 Coder 7B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-AWQ. 19 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 19,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-7B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-7b-instruct-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-coder-30b-a3b-instruct",
+ "name": "Qwen3 Coder 30B A3B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct",
+ "description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct. 945 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 945,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2505.09388",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-coder-30b-a3b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "deepseek-r1-0528",
+ "name": "Deepseek R1 0528",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-0528",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-0528. 2400 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2400,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v3",
+ "conversational",
+ "custom_code",
+ "arxiv:2501.12948",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-0528"
+ },
+ {
+ "slug": "tiny-random-llama-3",
+ "name": "Tiny Random Llama 3",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/llamafactory/tiny-random-Llama-3",
+ "description": "Open source model llamafactory/tiny-random-Llama-3. 3 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "text-generation-inference",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "tiny-random-llama-3",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-32b-instruct-awq",
+ "name": "Qwen2.5 Coder 32B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct-AWQ. 33 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 33,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-32B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-Coder-32B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-32b-instruct-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "mistral-7b-instruct-v0.1",
+ "name": "Mistral 7B Instruct V0.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1",
+ "description": "Open source model mistralai/Mistral-7B-Instruct-v0.1. 1826 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1826,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "mistral",
+ "finetuned",
+ "mistral-common",
+ "conversational",
+ "arxiv:2310.06825",
+ "base_model:mistralai/Mistral-7B-v0.1",
+ "base_model:finetune:mistralai/Mistral-7B-v0.1",
+ "text-generation-inference",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "mistral-7b-instruct-v0.1",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "gpt-oss-20b-mxfp4-q8",
+ "name": "Gpt Oss 20B Mxfp4 Q8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mlx-community/gpt-oss-20b-MXFP4-Q8",
+ "description": "Open source model mlx-community/gpt-oss-20b-MXFP4-Q8. 31 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 31,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "mlx",
+ "safetensors",
+ "gpt_oss",
+ "vllm",
+ "conversational",
+ "base_model:openai/gpt-oss-20b",
+ "base_model:quantized:openai/gpt-oss-20b",
+ "4-bit",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 14,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 20,
+ "parameters_active_b": 20,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gpt-oss-20b-mxfp4-q8"
+ },
+ {
+ "slug": "qwen3-embedding-4b",
+ "name": "Qwen3 Embedding 4B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Embedding-4B",
+ "description": "Open source model Qwen/Qwen3-Embedding-4B. 224 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 224,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "sentence-transformers",
+ "safetensors",
+ "qwen3",
+ "transformers",
+ "sentence-similarity",
+ "feature-extraction",
+ "text-embeddings-inference",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-4B-Base",
+ "base_model:finetune:Qwen/Qwen3-4B-Base",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-embedding-4b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-1.5b-instruct-awq",
+ "name": "Qwen2.5 1.5B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-1.5B-Instruct-AWQ. 6 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 6,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-1.5B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-1.5B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-1.5b-instruct-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct-fp8",
+ "name": "Meta Llama 3.1 8B Instruct Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8",
+ "description": "Open source model RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8. 44 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 44,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "fp8",
+ "vllm",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.1-8B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "meta-llama-3.1-8b-instruct-fp8",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "phi-4",
+ "name": "Phi 4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/phi-4",
+ "description": "Open source model microsoft/phi-4. 2220 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2220,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3",
+ "phi",
+ "nlp",
+ "math",
+ "code",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2412.08905",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "phi-4",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "deepseek-r1",
+ "name": "Deepseek R1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1",
+ "description": "Open source model deepseek-ai/DeepSeek-R1. 13011 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 13011,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v3",
+ "conversational",
+ "custom_code",
+ "arxiv:2501.12948",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1"
+ },
+ {
+ "slug": "llama-3.2-1b-instruct-fp8",
+ "name": "Llama 3.2 1B Instruct Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8",
+ "description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8. 3 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "llama",
+ "llama-3",
+ "neuralmagic",
+ "llmcompressor",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.2-1B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.2-1b-instruct-fp8",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-3.1-405b",
+ "name": "Llama 3.1 405B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.1-405B",
+ "description": "Open source model meta-llama/Llama-3.1-405B. 961 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 961,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 284,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 405,
+ "parameters_active_b": 405,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.1-405b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen3-4b-thinking-2507",
+ "name": "Qwen3 4B Thinking 2507",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B-Thinking-2507",
+ "description": "Open source model Qwen/Qwen3-4B-Thinking-2507. 548 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 548,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-4b-thinking-2507",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "gpt2-medium",
+ "name": "Gpt2 Medium",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai-community/gpt2-medium",
+ "description": "Open source model openai-community/gpt2-medium. 193 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 193,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "rust",
+ "onnx",
+ "safetensors",
+ "gpt2",
+ "en",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gpt2-medium"
+ },
+ {
+ "slug": "tiny-gpt2",
+ "name": "Tiny Gpt2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/sshleifer/tiny-gpt2",
+ "description": "Open source model sshleifer/tiny-gpt2. 34 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 34,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "gpt2",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "tiny-gpt2"
+ },
+ {
+ "slug": "hermes-3-llama-3.1-8b",
+ "name": "Hermes 3 Llama 3.1 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B",
+ "description": "Open source model NousResearch/Hermes-3-Llama-3.1-8B. 385 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 385,
+ "language": "Python",
+ "license": "llama3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "Llama-3",
+ "instruct",
+ "finetune",
+ "chatml",
+ "gpt4",
+ "synthetic data",
+ "distillation",
+ "function calling",
+ "json mode",
+ "axolotl",
+ "roleplaying",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2408.11857",
+ "base_model:meta-llama/Llama-3.1-8B",
+ "base_model:finetune:meta-llama/Llama-3.1-8B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "hermes-3-llama-3.1-8b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "phi-3.5-vision-instruct",
+ "name": "Phi 3.5 Vision Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/Phi-3.5-vision-instruct",
+ "description": "Open source model microsoft/Phi-3.5-vision-instruct. 726 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 726,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3_v",
+ "nlp",
+ "code",
+ "vision",
+ "image-text-to-text",
+ "conversational",
+ "custom_code",
+ "multilingual",
+ "arxiv:2404.14219",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": true
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "phi-3.5-vision-instruct",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "minimax-m2",
+ "name": "Minimax M2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/MiniMaxAI/MiniMax-M2",
+ "description": "Open source model MiniMaxAI/MiniMax-M2. 1485 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1485,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "minimax_m2",
+ "conversational",
+ "custom_code",
+ "arxiv:2504.07164",
+ "arxiv:2509.06501",
+ "arxiv:2509.13160",
+ "eval-results",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "minimax-m2"
+ },
+ {
+ "slug": "deepseek-r1-distill-llama-8b",
+ "name": "Deepseek R1 Distill Llama 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-8B. 843 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 843,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-distill-llama-8b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen3-14b-awq",
+ "name": "Qwen3 14B Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-14B-AWQ",
+ "description": "Open source model Qwen/Qwen3-14B-AWQ. 57 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 57,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-14B",
+ "base_model:quantized:Qwen/Qwen3-14B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-14b-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-235b-a22b",
+ "name": "Qwen3 235B A22B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-235B-A22B",
+ "description": "Open source model Qwen/Qwen3-235B-A22B. 1075 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1075,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 164,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 235,
+ "parameters_active_b": 235,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-235b-a22b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct-awq-int4",
+ "name": "Meta Llama 3.1 8B Instruct Awq Int4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
+ "description": "Open source model hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4. 87 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 87,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "llama-3.1",
+ "meta",
+ "autoawq",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "meta-llama-3.1-8b-instruct-awq-int4",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "lfm2.5-1.2b-instruct-mlx-8bit",
+ "name": "Lfm2.5 1.2B Instruct Mlx 8Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit",
+ "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit. 1 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "lfm2",
+ "liquid",
+ "lfm2.5",
+ "edge",
+ "mlx",
+ "conversational",
+ "en",
+ "ar",
+ "zh",
+ "fr",
+ "de",
+ "ja",
+ "ko",
+ "es",
+ "base_model:LiquidAI/LFM2.5-1.2B-Instruct",
+ "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
+ "endpoints_compatible",
+ "8-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "lfm2.5-1.2b-instruct-mlx-8bit"
+ },
+ {
+ "slug": "glm-4.7-flash-gguf",
+ "name": "Glm 4.7 Flash Gguf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF",
+ "description": "Open source model unsloth/GLM-4.7-Flash-GGUF. 482 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 482,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "gguf",
+ "unsloth",
+ "en",
+ "zh",
+ "arxiv:2508.06471",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us",
+ "imatrix",
+ "conversational"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "glm-4.7-flash-gguf"
+ },
+ {
+ "slug": "deepseek-r1-distill-qwen-14b",
+ "name": "Deepseek R1 Distill Qwen 14B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-14B. 603 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 603,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-distill-qwen-14b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "lfm2.5-1.2b-instruct-mlx-6bit",
+ "name": "Lfm2.5 1.2B Instruct Mlx 6Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit",
+ "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit. 4 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "lfm2",
+ "liquid",
+ "lfm2.5",
+ "edge",
+ "mlx",
+ "conversational",
+ "en",
+ "ar",
+ "zh",
+ "fr",
+ "de",
+ "ja",
+ "ko",
+ "es",
+ "base_model:LiquidAI/LFM2.5-1.2B-Instruct",
+ "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
+ "endpoints_compatible",
+ "6-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "lfm2.5-1.2b-instruct-mlx-6bit"
+ },
+ {
+ "slug": "lfm2.5-1.2b-instruct-mlx-4bit",
+ "name": "Lfm2.5 1.2B Instruct Mlx 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit",
+ "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit. 1 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "lfm2",
+ "liquid",
+ "lfm2.5",
+ "edge",
+ "mlx",
+ "conversational",
+ "en",
+ "ar",
+ "zh",
+ "fr",
+ "de",
+ "ja",
+ "ko",
+ "es",
+ "base_model:LiquidAI/LFM2.5-1.2B-Instruct",
+ "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
+ "endpoints_compatible",
+ "4-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "lfm2.5-1.2b-instruct-mlx-4bit"
+ },
+ {
+ "slug": "vicuna-7b-v1.5",
+ "name": "Vicuna 7B V1.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmsys/vicuna-7b-v1.5",
+ "description": "Open source model lmsys/vicuna-7b-v1.5. 387 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 387,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "llama",
+ "arxiv:2307.09288",
+ "arxiv:2306.05685",
+ "text-generation-inference",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "vicuna-7b-v1.5"
+ },
+ {
+ "slug": "llama-3.2-1b-instruct-q8_0-gguf",
+ "name": "Llama 3.2 1B Instruct Q8_0 Gguf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF",
+ "description": "Open source model hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF. 43 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 43,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "gguf",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama",
+ "llama-3",
+ "llama-cpp",
+ "gguf-my-repo",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.2-1B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
+ "endpoints_compatible",
+ "region:us",
+ "conversational"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.2-1b-instruct-q8_0-gguf",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "llama-3.3-70b-instruct-awq",
+ "name": "Llama 3.3 70B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/kosbu/Llama-3.3-70B-Instruct-AWQ",
+ "description": "Open source model kosbu/Llama-3.3-70B-Instruct-AWQ. 10 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 10,
+ "language": "Python",
+ "license": "llama3.3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "llama-3",
+ "awq",
+ "conversational",
+ "en",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "de",
+ "base_model:meta-llama/Llama-3.3-70B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.3-70B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-3.3-70b-instruct-awq",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen3-32b-fp8",
+ "name": "Qwen3 32B Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-32B-FP8",
+ "description": "Open source model Qwen/Qwen3-32B-FP8. 80 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 80,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-32B",
+ "base_model:quantized:Qwen/Qwen3-32B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-32b-fp8",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "gpt2-xl",
+ "name": "Gpt2 Xl",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai-community/gpt2-xl",
+ "description": "Open source model openai-community/gpt2-xl. 373 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 373,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "rust",
+ "safetensors",
+ "gpt2",
+ "en",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gpt2-xl"
+ },
+ {
+ "slug": "qwen3-4b-instruct-2507-fp8",
+ "name": "Qwen3 4B Instruct 2507 Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507-FP8",
+ "description": "Open source model Qwen/Qwen3-4B-Instruct-2507-FP8. 65 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 65,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-4B-Instruct-2507",
+ "base_model:quantized:Qwen/Qwen3-4B-Instruct-2507",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-4b-instruct-2507-fp8",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "xlnet-base-cased",
+ "name": "Xlnet Base Cased",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/xlnet/xlnet-base-cased",
+ "description": "Open source model xlnet/xlnet-base-cased. 80 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 80,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "rust",
+ "xlnet",
+ "en",
+ "dataset:bookcorpus",
+ "dataset:wikipedia",
+ "arxiv:1906.08237",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "xlnet-base-cased"
+ },
+ {
+ "slug": "llama-2-7b-hf",
+ "name": "Llama 2 7B Hf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-2-7b-hf",
+ "description": "Open source model meta-llama/Llama-2-7b-hf. 2268 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2268,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "llama-2",
+ "en",
+ "arxiv:2307.09288",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-2-7b-hf",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen2.5-math-7b-instruct",
+ "name": "Qwen2.5 Math 7B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Math-7B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Math-7B-Instruct. 89 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 89,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2409.12122",
+ "base_model:Qwen/Qwen2.5-Math-7B",
+ "base_model:finetune:Qwen/Qwen2.5-Math-7B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-math-7b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-reranker-0.6b",
+ "name": "Qwen3 Reranker 0.6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Reranker-0.6B",
+ "description": "Open source model Qwen/Qwen3-Reranker-0.6B. 305 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 305,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "text-ranking",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-0.6B-Base",
+ "base_model:finetune:Qwen/Qwen3-0.6B-Base",
+ "text-embeddings-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-reranker-0.6b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-1.5b",
+ "name": "Qwen2.5 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B",
+ "description": "Open source model Qwen/Qwen2.5-1.5B. 165 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 165,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-1.5b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-30b-a3b-thinking-2507",
+ "name": "Qwen3 30B A3B Thinking 2507",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Thinking-2507",
+ "description": "Open source model Qwen/Qwen3-30B-A3B-Thinking-2507. 359 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 359,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2402.17463",
+ "arxiv:2407.02490",
+ "arxiv:2501.15383",
+ "arxiv:2404.06654",
+ "arxiv:2505.09388",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-30b-a3b-thinking-2507",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "smollm2-135m-instruct",
+ "name": "Smollm2 135M Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct",
+ "description": "Open source model HuggingFaceTB/SmolLM2-135M-Instruct. 292 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 292,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "tensorboard",
+ "onnx",
+ "safetensors",
+ "llama",
+ "transformers.js",
+ "conversational",
+ "en",
+ "arxiv:2502.02737",
+ "base_model:HuggingFaceTB/SmolLM2-135M",
+ "base_model:quantized:HuggingFaceTB/SmolLM2-135M",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "smollm2-135m-instruct"
+ },
+ {
+ "slug": "qwen2.5-math-1.5b",
+ "name": "Qwen2.5 Math 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Math-1.5B",
+ "description": "Open source model Qwen/Qwen2.5-Math-1.5B. 100 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 100,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2409.12122",
+ "base_model:Qwen/Qwen2.5-1.5B",
+ "base_model:finetune:Qwen/Qwen2.5-1.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-math-1.5b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "glm-4.5-air-awq-4bit",
+ "name": "Glm 4.5 Air Awq 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/cyankiwi/GLM-4.5-Air-AWQ-4bit",
+ "description": "Open source model cyankiwi/GLM-4.5-Air-AWQ-4bit. 27 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 27,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe",
+ "conversational",
+ "en",
+ "zh",
+ "arxiv:2508.06471",
+ "base_model:zai-org/GLM-4.5-Air",
+ "base_model:quantized:zai-org/GLM-4.5-Air",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "glm-4.5-air-awq-4bit"
+ },
+ {
+ "slug": "llama-2-7b-chat-hf",
+ "name": "Llama 2 7B Chat Hf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf",
+ "description": "Open source model meta-llama/Llama-2-7b-chat-hf. 4705 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4705,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "llama-2",
+ "conversational",
+ "en",
+ "arxiv:2307.09288",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-2-7b-chat-hf",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-7b-instruct-gptq-int4",
+ "name": "Qwen2.5 Coder 7B Instruct Gptq Int4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4",
+ "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4. 12 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 12,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-7B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "gptq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-7b-instruct-gptq-int4",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-vl-30b-a3b-instruct-awq",
+ "name": "Qwen3 Vl 30B A3B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ",
+ "description": "Open source model QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ. 38 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 38,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_vl_moe",
+ "image-text-to-text",
+ "AWQ",
+ "vLLM",
+ "conversational",
+ "arxiv:2505.09388",
+ "arxiv:2502.13923",
+ "arxiv:2409.12191",
+ "arxiv:2308.12966",
+ "base_model:Qwen/Qwen3-VL-30B-A3B-Instruct",
+ "base_model:quantized:Qwen/Qwen3-VL-30B-A3B-Instruct",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-vl-30b-a3b-instruct-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-8b-base",
+ "name": "Qwen3 8B Base",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-8B-Base",
+ "description": "Open source model Qwen/Qwen3-8B-Base. 82 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 82,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-8b-base",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-14b-instruct",
+ "name": "Qwen2.5 Coder 14B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-14B-Instruct. 140 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 140,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-14B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-14B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-14b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "stories15m_moe",
+ "name": "Stories15M_Moe",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/ggml-org/stories15M_MOE",
+ "description": "Open source model ggml-org/stories15M_MOE. 5 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 5,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gguf",
+ "mixtral",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "stories15m_moe"
+ },
+ {
+ "slug": "opt-1.3b",
+ "name": "Opt 1.3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/facebook/opt-1.3b",
+ "description": "Open source model facebook/opt-1.3b. 182 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 182,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "opt",
+ "en",
+ "arxiv:2205.01068",
+ "arxiv:2005.14165",
+ "text-generation-inference",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "opt-1.3b"
+ },
+ {
+ "slug": "minimax-m2-awq",
+ "name": "Minimax M2 Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/QuantTrio/MiniMax-M2-AWQ",
+ "description": "Open source model QuantTrio/MiniMax-M2-AWQ. 8 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 8,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "mixtral",
+ "vLLM",
+ "AWQ",
+ "conversational",
+ "arxiv:2504.07164",
+ "arxiv:2509.06501",
+ "arxiv:2509.13160",
+ "base_model:MiniMaxAI/MiniMax-M2",
+ "base_model:quantized:MiniMaxAI/MiniMax-M2",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "minimax-m2-awq"
+ },
+ {
+ "slug": "glm-4.7-flash-nvfp4",
+ "name": "Glm 4.7 Flash Nvfp4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/GadflyII/GLM-4.7-Flash-NVFP4",
+ "description": "Open source model GadflyII/GLM-4.7-Flash-NVFP4. 62 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 62,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "moe",
+ "nvfp4",
+ "quantized",
+ "vllm",
+ "glm",
+ "30b",
+ "conversational",
+ "en",
+ "zh",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "glm-4.7-flash-nvfp4"
+ },
+ {
+ "slug": "hy-mt1.5-7b",
+ "name": "Hy Mt1.5 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tencent/HY-MT1.5-7B",
+ "description": "Open source model tencent/HY-MT1.5-7B. 133 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 133,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "hunyuan_v1_dense",
+ "translation",
+ "zh",
+ "en",
+ "fr",
+ "pt",
+ "es",
+ "ja",
+ "tr",
+ "ru",
+ "ar",
+ "ko",
+ "th",
+ "it",
+ "de",
+ "vi",
+ "ms",
+ "id",
+ "tl",
+ "hi",
+ "pl",
+ "cs",
+ "nl",
+ "km",
+ "my",
+ "fa",
+ "gu",
+ "ur",
+ "te",
+ "mr",
+ "he",
+ "bn",
+ "ta",
+ "uk",
+ "bo",
+ "kk",
+ "mn",
+ "ug",
+ "arxiv:2512.24092",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "hy-mt1.5-7b"
+ },
+ {
+ "slug": "gemma-2-27b-it",
+ "name": "Gemma 2 27B It",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/google/gemma-2-27b-it",
+ "description": "Open source model google/gemma-2-27b-it. 559 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 559,
+ "language": "Python",
+ "license": "gemma",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gemma2",
+ "conversational",
+ "arxiv:2009.03300",
+ "arxiv:1905.07830",
+ "arxiv:1911.11641",
+ "arxiv:1904.09728",
+ "arxiv:1905.10044",
+ "arxiv:1907.10641",
+ "arxiv:1811.00937",
+ "arxiv:1809.02789",
+ "arxiv:1911.01547",
+ "arxiv:1705.03551",
+ "arxiv:2107.03374",
+ "arxiv:2108.07732",
+ "arxiv:2110.14168",
+ "arxiv:2009.11462",
+ "arxiv:2101.11718",
+ "arxiv:2110.08193",
+ "arxiv:1804.09301",
+ "arxiv:2109.07958",
+ "arxiv:1804.06876",
+ "arxiv:2103.03874",
+ "arxiv:2304.06364",
+ "arxiv:2206.04615",
+ "arxiv:2203.09509",
+ "base_model:google/gemma-2-27b",
+ "base_model:finetune:google/gemma-2-27b",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 19,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 27,
+ "parameters_active_b": 27,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gemma-2-27b-it",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "qwen3-coder-next-gguf",
+ "name": "Qwen3 Coder Next Gguf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/Qwen3-Coder-Next-GGUF",
+ "description": "Open source model unsloth/Qwen3-Coder-Next-GGUF. 347 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 347,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "gguf",
+ "qwen3_next",
+ "unsloth",
+ "qwen",
+ "qwen3",
+ "base_model:Qwen/Qwen3-Coder-Next",
+ "base_model:quantized:Qwen/Qwen3-Coder-Next",
+ "endpoints_compatible",
+ "region:us",
+ "imatrix",
+ "conversational"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-coder-next-gguf",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "gte-qwen2-1.5b-instruct",
+ "name": "Gte Qwen2 1.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Alibaba-NLP/gte-Qwen2-1.5B-instruct",
+ "description": "Open source model Alibaba-NLP/gte-Qwen2-1.5B-instruct. 229 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 229,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "sentence-transformers",
+ "safetensors",
+ "qwen2",
+ "mteb",
+ "transformers",
+ "Qwen2",
+ "sentence-similarity",
+ "custom_code",
+ "arxiv:2308.03281",
+ "model-index",
+ "text-embeddings-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gte-qwen2-1.5b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "lfm2-1.2b",
+ "name": "Lfm2 1.2B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/LiquidAI/LFM2-1.2B",
+ "description": "Open source model LiquidAI/LFM2-1.2B. 349 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 349,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "lfm2",
+ "liquid",
+ "edge",
+ "conversational",
+ "en",
+ "ar",
+ "zh",
+ "fr",
+ "de",
+ "ja",
+ "ko",
+ "es",
+ "arxiv:2511.23404",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "lfm2-1.2b"
+ },
+ {
+ "slug": "saiga_llama3_8b",
+ "name": "Saiga_Llama3_8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/IlyaGusev/saiga_llama3_8b",
+ "description": "Open source model IlyaGusev/saiga_llama3_8b. 137 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 137,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "ru",
+ "dataset:IlyaGusev/saiga_scored",
+ "doi:10.57967/hf/2368",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "saiga_llama3_8b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen3-1.7b-base",
+ "name": "Qwen3 1.7B Base",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-1.7B-Base",
+ "description": "Open source model Qwen/Qwen3-1.7B-Base. 62 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 62,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-1.7b-base",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "mistral-7b-v0.3-bnb-4bit",
+ "name": "Mistral 7B V0.3 Bnb 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/mistral-7b-v0.3-bnb-4bit",
+ "description": "Open source model unsloth/mistral-7b-v0.3-bnb-4bit. 22 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 22,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "mistral",
+ "unsloth",
+ "mistral-7b",
+ "en",
+ "base_model:mistralai/Mistral-7B-v0.3",
+ "base_model:quantized:mistralai/Mistral-7B-v0.3",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "bitsandbytes",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "mistral-7b-v0.3-bnb-4bit",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "gemma-2-2b-it",
+ "name": "Gemma 2 2B It",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/google/gemma-2-2b-it",
+ "description": "Open source model google/gemma-2-2b-it. 1285 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1285,
+ "language": "Python",
+ "license": "gemma",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gemma2",
+ "conversational",
+ "arxiv:2009.03300",
+ "arxiv:1905.07830",
+ "arxiv:1911.11641",
+ "arxiv:1904.09728",
+ "arxiv:1905.10044",
+ "arxiv:1907.10641",
+ "arxiv:1811.00937",
+ "arxiv:1809.02789",
+ "arxiv:1911.01547",
+ "arxiv:1705.03551",
+ "arxiv:2107.03374",
+ "arxiv:2108.07732",
+ "arxiv:2110.14168",
+ "arxiv:2009.11462",
+ "arxiv:2101.11718",
+ "arxiv:2110.08193",
+ "arxiv:1804.09301",
+ "arxiv:2109.07958",
+ "arxiv:1804.06876",
+ "arxiv:2103.03874",
+ "arxiv:2304.06364",
+ "arxiv:1903.00161",
+ "arxiv:2206.04615",
+ "arxiv:2203.09509",
+ "arxiv:2403.13793",
+ "base_model:google/gemma-2-2b",
+ "base_model:finetune:google/gemma-2-2b",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gemma-2-2b-it",
+ "logo_url": "/logos/gemma.svg"
+ },
+ {
+ "slug": "phi-4-multimodal-instruct",
+ "name": "Phi 4 Multimodal Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/Phi-4-multimodal-instruct",
+ "description": "Open source model microsoft/Phi-4-multimodal-instruct. 1573 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1573,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi4mm",
+ "nlp",
+ "code",
+ "audio",
+ "automatic-speech-recognition",
+ "speech-summarization",
+ "speech-translation",
+ "visual-question-answering",
+ "phi-4-multimodal",
+ "phi",
+ "phi-4-mini",
+ "custom_code",
+ "multilingual",
+ "ar",
+ "zh",
+ "cs",
+ "da",
+ "nl",
+ "en",
+ "fi",
+ "fr",
+ "de",
+ "he",
+ "hu",
+ "it",
+ "ja",
+ "ko",
+ "no",
+ "pl",
+ "pt",
+ "ru",
+ "es",
+ "sv",
+ "th",
+ "tr",
+ "uk",
+ "arxiv:2503.01743",
+ "arxiv:2407.13833",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "phi-4-multimodal-instruct",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "pythia-70m-deduped",
+ "name": "Pythia 70M Deduped",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/EleutherAI/pythia-70m-deduped",
+ "description": "Open source model EleutherAI/pythia-70m-deduped. 27 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 27,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "gpt_neox",
+ "causal-lm",
+ "pythia",
+ "en",
+ "dataset:EleutherAI/the_pile_deduplicated",
+ "arxiv:2304.01373",
+ "arxiv:2101.00027",
+ "arxiv:2201.07311",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "pythia-70m-deduped"
+ },
+ {
+ "slug": "dialogpt-medium",
+ "name": "Dialogpt Medium",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/DialoGPT-medium",
+ "description": "Open source model microsoft/DialoGPT-medium. 433 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 433,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "rust",
+ "gpt2",
+ "conversational",
+ "arxiv:1911.00536",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "dialogpt-medium"
+ },
+ {
+ "slug": "gpt-oss-20b-bf16",
+ "name": "Gpt Oss 20B Bf16",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/gpt-oss-20b-BF16",
+ "description": "Open source model unsloth/gpt-oss-20b-BF16. 29 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 29,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gpt_oss",
+ "vllm",
+ "unsloth",
+ "conversational",
+ "base_model:openai/gpt-oss-20b",
+ "base_model:finetune:openai/gpt-oss-20b",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 14,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 20,
+ "parameters_active_b": 20,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "gpt-oss-20b-bf16"
+ },
+ {
+ "slug": "qwen2.5-72b-instruct",
+ "name": "Qwen2.5 72B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-72B-Instruct. 910 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 910,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-72B",
+ "base_model:finetune:Qwen/Qwen2.5-72B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 50,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 72,
+ "parameters_active_b": 72,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-72b-instruct",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-32b-awq",
+ "name": "Qwen3 32B Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-32B-AWQ",
+ "description": "Open source model Qwen/Qwen3-32B-AWQ. 125 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 125,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-32B",
+ "base_model:quantized:Qwen/Qwen3-32B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-32b-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "mimo-v2-flash",
+ "name": "Mimo V2 Flash",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/XiaomiMiMo/MiMo-V2-Flash",
+ "description": "Open source model XiaomiMiMo/MiMo-V2-Flash. 628 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 628,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "mimo_v2_flash",
+ "conversational",
+ "custom_code",
+ "eval-results",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "mimo-v2-flash"
+ },
+ {
+ "slug": "qwen3-coder-30b-a3b-instruct-fp8",
+ "name": "Qwen3 Coder 30B A3B Instruct Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8",
+ "description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8. 158 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 158,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2505.09388",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-coder-30b-a3b-instruct-fp8",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen3-8b-fp8",
+ "name": "Qwen3 8B Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-8B-FP8",
+ "description": "Open source model Qwen/Qwen3-8B-FP8. 56 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 56,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-8B",
+ "base_model:quantized:Qwen/Qwen3-8B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-8b-fp8",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "deepseek-v3.2",
+ "name": "Deepseek V3.2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3.2",
+ "description": "Open source model deepseek-ai/DeepSeek-V3.2. 1251 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1251,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v32",
+ "conversational",
+ "base_model:deepseek-ai/DeepSeek-V3.2-Exp-Base",
+ "base_model:finetune:deepseek-ai/DeepSeek-V3.2-Exp-Base",
+ "eval-results",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-v3.2"
+ },
+ {
+ "slug": "qwen3-coder-next",
+ "name": "Qwen3 Coder Next",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Coder-Next",
+ "description": "Open source model Qwen/Qwen3-Coder-Next. 912 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 912,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_next",
+ "conversational",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-coder-next",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2-0.5b",
+ "name": "Qwen2 0.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2-0.5B",
+ "description": "Open source model Qwen/Qwen2-0.5B. 164 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 164,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "pretrained",
+ "conversational",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2-0.5b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "mistral-7b-v0.1",
+ "name": "Mistral 7B V0.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mistralai/Mistral-7B-v0.1",
+ "description": "Open source model mistralai/Mistral-7B-v0.1. 4042 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4042,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "mistral",
+ "pretrained",
+ "mistral-common",
+ "en",
+ "arxiv:2310.06825",
+ "text-generation-inference",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "mistral-7b-v0.1",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "kimi-k2-thinking",
+ "name": "Kimi K2 Thinking",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/moonshotai/Kimi-K2-Thinking",
+ "description": "Open source model moonshotai/Kimi-K2-Thinking. 1670 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1670,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "kimi_k2",
+ "conversational",
+ "custom_code",
+ "eval-results",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "kimi-k2-thinking"
+ },
+ {
+ "slug": "deepseek-r1-0528-qwen3-8b-mlx-4bit",
+ "name": "Deepseek R1 0528 Qwen3 8B Mlx 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit",
+ "description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit. 7 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 7,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "mlx",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
+ "base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
+ "4-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-0528-qwen3-8b-mlx-4bit",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-7b-instruct-awq",
+ "name": "Qwen2.5 7B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-7B-Instruct-AWQ. 36 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 36,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-7B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-7B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-7b-instruct-awq",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "points-reader",
+ "name": "Points Reader",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tencent/POINTS-Reader",
+ "description": "Open source model tencent/POINTS-Reader. 100 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 100,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "image-text-to-text",
+ "conversational",
+ "custom_code",
+ "arxiv:2509.01215",
+ "arxiv:2412.08443",
+ "arxiv:2409.04828",
+ "arxiv:2405.11850",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "points-reader"
+ },
+ {
+ "slug": "qwen3-4b-base",
+ "name": "Qwen3 4B Base",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B-Base",
+ "description": "Open source model Qwen/Qwen3-4B-Base. 80 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 80,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-4b-base",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "step-3.5-flash",
+ "name": "Step 3.5 Flash",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/stepfun-ai/Step-3.5-Flash",
+ "description": "Open source model stepfun-ai/Step-3.5-Flash. 621 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 621,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "step3p5",
+ "conversational",
+ "custom_code",
+ "arxiv:2602.10604",
+ "arxiv:2601.05593",
+ "arxiv:2507.19427",
+ "eval-results",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "step-3.5-flash"
+ },
+ {
+ "slug": "kogpt2-base-v2",
+ "name": "Kogpt2 Base V2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/skt/kogpt2-base-v2",
+ "description": "Open source model skt/kogpt2-base-v2. 60 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 60,
+ "language": "Python",
+ "license": "cc-by-nc-sa-4.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "jax",
+ "gpt2",
+ "ko",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "kogpt2-base-v2"
+ },
+ {
+ "slug": "parler-tts-mini-multilingual-v1.1",
+ "name": "Parler Tts Mini Multilingual V1.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/parler-tts/parler-tts-mini-multilingual-v1.1",
+ "description": "Open source model parler-tts/parler-tts-mini-multilingual-v1.1. 54 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 54,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "parler_tts",
+ "text-to-speech",
+ "annotation",
+ "en",
+ "fr",
+ "es",
+ "pt",
+ "pl",
+ "de",
+ "nl",
+ "it",
+ "dataset:facebook/multilingual_librispeech",
+ "dataset:parler-tts/libritts_r_filtered",
+ "dataset:parler-tts/libritts-r-filtered-speaker-descriptions",
+ "dataset:parler-tts/mls_eng",
+ "dataset:parler-tts/mls-eng-speaker-descriptions",
+ "dataset:ylacombe/mls-annotated",
+ "dataset:ylacombe/cml-tts-filtered-annotated",
+ "dataset:PHBJT/cml-tts-filtered",
+ "arxiv:2402.01912",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "parler-tts-mini-multilingual-v1.1"
+ },
+ {
+ "slug": "qwen3-reranker-8b",
+ "name": "Qwen3 Reranker 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Reranker-8B",
+ "description": "Open source model Qwen/Qwen3-Reranker-8B. 213 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 213,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "text-ranking",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-8B-Base",
+ "base_model:finetune:Qwen/Qwen3-8B-Base",
+ "text-embeddings-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-reranker-8b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "deepseek-r1-0528-qwen3-8b-mlx-8bit",
+ "name": "Deepseek R1 0528 Qwen3 8B Mlx 8Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit",
+ "description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit. 13 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 13,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "mlx",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
+ "base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
+ "8-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-0528-qwen3-8b-mlx-8bit",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "powermoe-3b",
+ "name": "Powermoe 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/ibm-research/PowerMoE-3b",
+ "description": "Open source model ibm-research/PowerMoE-3b. 14 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 14,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "granitemoe",
+ "arxiv:2408.13359",
+ "model-index",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "powermoe-3b"
+ },
+ {
+ "slug": "llada-8b-instruct",
+ "name": "Llada 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/GSAI-ML/LLaDA-8B-Instruct",
+ "description": "Open source model GSAI-ML/LLaDA-8B-Instruct. 342 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 342,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llada",
+ "conversational",
+ "custom_code",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llada-8b-instruct"
+ },
+ {
+ "slug": "apertus-8b-instruct-2509",
+ "name": "Apertus 8B Instruct 2509",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/swiss-ai/Apertus-8B-Instruct-2509",
+ "description": "Open source model swiss-ai/Apertus-8B-Instruct-2509. 435 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 435,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "apertus",
+ "multilingual",
+ "compliant",
+ "swiss-ai",
+ "conversational",
+ "arxiv:2509.14233",
+ "base_model:swiss-ai/Apertus-8B-2509",
+ "base_model:finetune:swiss-ai/Apertus-8B-2509",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "apertus-8b-instruct-2509"
+ },
+ {
+ "slug": "qwen3-30b-a3b-gptq-int4",
+ "name": "Qwen3 30B A3B Gptq Int4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-GPTQ-Int4",
+ "description": "Open source model Qwen/Qwen3-30B-A3B-GPTQ-Int4. 45 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 45,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-30B-A3B",
+ "base_model:quantized:Qwen/Qwen3-30B-A3B",
+ "endpoints_compatible",
+ "4-bit",
+ "gptq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-30b-a3b-gptq-int4",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "tinyllama-1.1b-chat-v0.3-gptq",
+ "name": "Tinyllama 1.1B Chat V0.3 Gptq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ",
+ "description": "Open source model TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ. 9 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 9,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "en",
+ "dataset:cerebras/SlimPajama-627B",
+ "dataset:bigcode/starcoderdata",
+ "dataset:OpenAssistant/oasst_top1_2023-08-25",
+ "base_model:TinyLlama/TinyLlama-1.1B-Chat-v0.3",
+ "base_model:quantized:TinyLlama/TinyLlama-1.1B-Chat-v0.3",
+ "text-generation-inference",
+ "4-bit",
+ "gptq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "tinyllama-1.1b-chat-v0.3-gptq"
+ },
+ {
+ "slug": "prot_t5_xl_bfd",
+ "name": "Prot_T5_Xl_Bfd",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Rostlab/prot_t5_xl_bfd",
+ "description": "Open source model Rostlab/prot_t5_xl_bfd. 10 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 10,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "t5",
+ "protein language model",
+ "dataset:BFD",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "prot_t5_xl_bfd"
+ },
+ {
+ "slug": "qwen3-4b-instruct-2507-unsloth-bnb-4bit",
+ "name": "Qwen3 4B Instruct 2507 Unsloth Bnb 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit",
+ "description": "Open source model unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit. 13 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 13,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "unsloth",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-4B-Instruct-2507",
+ "base_model:quantized:Qwen/Qwen3-4B-Instruct-2507",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "bitsandbytes",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-4b-instruct-2507-unsloth-bnb-4bit",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "phi-3.5-mini-instruct",
+ "name": "Phi 3.5 Mini Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/Phi-3.5-mini-instruct",
+ "description": "Open source model microsoft/Phi-3.5-mini-instruct. 963 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 963,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3",
+ "nlp",
+ "code",
+ "conversational",
+ "custom_code",
+ "multilingual",
+ "arxiv:2404.14219",
+ "arxiv:2407.13833",
+ "arxiv:2403.06412",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "phi-3.5-mini-instruct",
+ "logo_url": "/logos/phi.svg"
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct-bnb-4bit",
+ "name": "Meta Llama 3.1 8B Instruct Bnb 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit",
+ "description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit. 95 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 95,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "llama-3",
+ "meta",
+ "facebook",
+ "unsloth",
+ "conversational",
+ "en",
+ "arxiv:2204.05149",
+ "base_model:meta-llama/Llama-3.1-8B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "bitsandbytes",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "meta-llama-3.1-8b-instruct-bnb-4bit",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "glm-4.7-flash-awq-4bit",
+ "name": "Glm 4.7 Flash Awq 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/cyankiwi/GLM-4.7-Flash-AWQ-4bit",
+ "description": "Open source model cyankiwi/GLM-4.7-Flash-AWQ-4bit. 43 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 43,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "conversational",
+ "en",
+ "zh",
+ "arxiv:2508.06471",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "glm-4.7-flash-awq-4bit"
+ },
+ {
+ "slug": "dots.ocr",
+ "name": "Dots.Ocr",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/rednote-hilab/dots.ocr",
+ "description": "Open source model rednote-hilab/dots.ocr. 1243 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1243,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "dots_ocr",
+ "safetensors",
+ "image-to-text",
+ "ocr",
+ "document-parse",
+ "layout",
+ "table",
+ "formula",
+ "transformers",
+ "custom_code",
+ "image-text-to-text",
+ "conversational",
+ "en",
+ "zh",
+ "multilingual",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "dots.ocr"
+ },
+ {
+ "slug": "mistral-7b-bnb-4bit",
+ "name": "Mistral 7B Bnb 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/mistral-7b-bnb-4bit",
+ "description": "Open source model unsloth/mistral-7b-bnb-4bit. 30 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 30,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "mistral",
+ "unsloth",
+ "mistral-7b",
+ "bnb",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "bitsandbytes",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "mistral-7b-bnb-4bit",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "glm-5-fp8",
+ "name": "Glm 5 Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/zai-org/GLM-5-FP8",
+ "description": "Open source model zai-org/GLM-5-FP8. 108 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 108,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm_moe_dsa",
+ "conversational",
+ "en",
+ "zh",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "glm-5-fp8"
+ },
+ {
+ "slug": "qwen-7b",
+ "name": "Qwen 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen-7B",
+ "description": "Open source model Qwen/Qwen-7B. 395 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 395,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen",
+ "custom_code",
+ "zh",
+ "en",
+ "arxiv:2309.16609",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen-7b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwq-32b-awq",
+ "name": "Qwq 32B Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/QwQ-32B-AWQ",
+ "description": "Open source model Qwen/QwQ-32B-AWQ. 133 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 133,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2412.15115",
+ "base_model:Qwen/QwQ-32B",
+ "base_model:quantized:Qwen/QwQ-32B",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwq-32b-awq"
+ },
+ {
+ "slug": "deepseek-r1-distill-llama-70b",
+ "name": "Deepseek R1 Distill Llama 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-70B. 741 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 741,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-r1-distill-llama-70b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "qwen2.5-coder-7b",
+ "name": "Qwen2.5 Coder 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B",
+ "description": "Open source model Qwen/Qwen2.5-Coder-7B. 134 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 134,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "qwen",
+ "qwen-coder",
+ "codeqwen",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-7B",
+ "base_model:finetune:Qwen/Qwen2.5-7B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-coder-7b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "qwen2.5-3b",
+ "name": "Qwen2.5 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-3B",
+ "description": "Open source model Qwen/Qwen2.5-3B. 169 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 169,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen2.5-3b",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "deepseek-v2-lite-chat",
+ "name": "Deepseek V2 Lite Chat",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat",
+ "description": "Open source model deepseek-ai/DeepSeek-V2-Lite-Chat. 133 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 133,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v2",
+ "conversational",
+ "custom_code",
+ "arxiv:2405.04434",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-v2-lite-chat"
+ },
+ {
+ "slug": "tiny-qwen3forcausallm",
+ "name": "Tiny Qwen3Forcausallm",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/trl-internal-testing/tiny-Qwen3ForCausalLM",
+ "description": "Open source model trl-internal-testing/tiny-Qwen3ForCausalLM. 1 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "trl",
+ "conversational",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "tiny-qwen3forcausallm",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "deepseek-coder-v2-lite-instruct",
+ "name": "Deepseek Coder V2 Lite Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct",
+ "description": "Open source model deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct. 539 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 539,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v2",
+ "conversational",
+ "custom_code",
+ "arxiv:2401.06066",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-coder-v2-lite-instruct"
+ },
+ {
+ "slug": "qwen3-0.6b-base",
+ "name": "Qwen3 0.6B Base",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-0.6B-Base",
+ "description": "Open source model Qwen/Qwen3-0.6B-Base. 146 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 146,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen3-0.6b-base",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "diffractgpt_mistral_chemical_formula",
+ "name": "Diffractgpt_Mistral_Chemical_Formula",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/knc6/diffractgpt_mistral_chemical_formula",
+ "description": "Open source model knc6/diffractgpt_mistral_chemical_formula. 1 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "peft",
+ "safetensors",
+ "chemistry",
+ "text-generation-inference",
+ "atomgpt",
+ "diffraction",
+ "en",
+ "base_model:unsloth/mistral-7b-bnb-4bit",
+ "base_model:adapter:unsloth/mistral-7b-bnb-4bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "diffractgpt_mistral_chemical_formula",
+ "logo_url": "/logos/mistral.svg"
+ },
+ {
+ "slug": "qwen-7b-chat",
+ "name": "Qwen 7B Chat",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen-7B-Chat",
+ "description": "Open source model Qwen/Qwen-7B-Chat. 787 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 787,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen",
+ "custom_code",
+ "zh",
+ "en",
+ "arxiv:2309.16609",
+ "arxiv:2305.08322",
+ "arxiv:2009.03300",
+ "arxiv:2305.05280",
+ "arxiv:2210.03629",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "qwen-7b-chat",
+ "logo_url": "/logos/qwen.svg"
+ },
+ {
+ "slug": "nvidia-nemotron-3-nano-30b-a3b-nvfp4",
+ "name": "Nvidia Nemotron 3 Nano 30B A3B Nvfp4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4",
+ "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4. 100 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 100,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "nemotron_h",
+ "feature-extraction",
+ "nvidia",
+ "pytorch",
+ "conversational",
+ "custom_code",
+ "en",
+ "es",
+ "fr",
+ "de",
+ "ja",
+ "it",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v1",
+ "dataset:nvidia/Nemotron-CC-v2",
+ "dataset:nvidia/Nemotron-Pretraining-SFT-v1",
+ "dataset:nvidia/Nemotron-CC-Math-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v2",
+ "dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
+ "dataset:nvidia/Nemotron-CC-v2.1",
+ "dataset:nvidia/Nemotron-CC-Code-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
+ "dataset:nvidia/Nemotron-Competitive-Programming-v1",
+ "dataset:nvidia/Nemotron-Math-v2",
+ "dataset:nvidia/Nemotron-Agentic-v1",
+ "dataset:nvidia/Nemotron-Math-Proofs-v1",
+ "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
+ "dataset:nvidia/Nemotron-Science-v1",
+ "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
+ "arxiv:2512.20848",
+ "arxiv:2512.20856",
+ "arxiv:2601.20088",
+ "base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "nvidia-nemotron-3-nano-30b-a3b-nvfp4"
+ },
+ {
+ "slug": "falcon-h1-tiny-90m-instruct",
+ "name": "Falcon H1 Tiny 90M Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tiiuae/Falcon-H1-Tiny-90M-Instruct",
+ "description": "Open source model tiiuae/Falcon-H1-Tiny-90M-Instruct. 31 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 31,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "falcon_h1",
+ "falcon-h1",
+ "edge",
+ "conversational",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "falcon-h1-tiny-90m-instruct",
+ "logo_url": "/logos/falcon.svg"
+ },
+ {
+ "slug": "hermes-3-llama-3.2-3b",
+ "name": "Hermes 3 Llama 3.2 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.2-3B",
+ "description": "Open source model NousResearch/Hermes-3-Llama-3.2-3B. 174 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 174,
+ "language": "Python",
+ "license": "llama3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "Llama-3",
+ "instruct",
+ "finetune",
+ "chatml",
+ "gpt4",
+ "synthetic data",
+ "distillation",
+ "function calling",
+ "json mode",
+ "axolotl",
+ "roleplaying",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2408.11857",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "hermes-3-llama-3.2-3b",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct",
+ "name": "Meta Llama 3.1 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct",
+ "description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct. 94 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 94,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "llama-3",
+ "meta",
+ "facebook",
+ "unsloth",
+ "conversational",
+ "en",
+ "base_model:meta-llama/Llama-3.1-8B-Instruct",
+ "base_model:finetune:meta-llama/Llama-3.1-8B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "meta-llama-3.1-8b-instruct",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct-gguf",
+ "name": "Meta Llama 3.1 8B Instruct Gguf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF",
+ "description": "Open source model bartowski/Meta-Llama-3.1-8B-Instruct-GGUF. 321 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 321,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "gguf",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.1-8B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
+ "endpoints_compatible",
+ "region:us",
+ "conversational"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "meta-llama-3.1-8b-instruct-gguf",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "deepseek-v3-0324",
+ "name": "Deepseek V3 0324",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3-0324",
+ "description": "Open source model deepseek-ai/DeepSeek-V3-0324. 3087 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3087,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v3",
+ "conversational",
+ "custom_code",
+ "arxiv:2412.19437",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "deepseek-v3-0324"
+ },
+ {
+ "slug": "elm",
+ "name": "Elm",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Joaoffg/ELM",
+ "description": "Open source model Joaoffg/ELM. 2 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "academic",
+ "university",
+ "en",
+ "nl",
+ "arxiv:2408.06931",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "elm"
+ },
+ {
+ "slug": "llama-2-13b-chat-hf",
+ "name": "Llama 2 13B Chat Hf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-2-13b-chat-hf",
+ "description": "Open source model meta-llama/Llama-2-13b-chat-hf. 1109 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1109,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "llama-2",
+ "conversational",
+ "en",
+ "arxiv:2307.09288",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "llama-2-13b-chat-hf",
+ "logo_url": "/logos/meta.svg"
+ },
+ {
+ "slug": "svara-tts-v1",
+ "name": "Svara Tts V1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/kenpath/svara-tts-v1",
+ "description": "Open source model kenpath/svara-tts-v1. 18 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 18,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "text-to-speech",
+ "speech-synthesis",
+ "multilingual",
+ "indic",
+ "orpheus",
+ "lora",
+ "low-latency",
+ "gguf",
+ "zero-shot",
+ "emotions",
+ "discrete-audio-tokens",
+ "hi",
+ "bn",
+ "mr",
+ "te",
+ "kn",
+ "bho",
+ "mag",
+ "hne",
+ "mai",
+ "as",
+ "brx",
+ "doi",
+ "gu",
+ "ml",
+ "pa",
+ "ta",
+ "ne",
+ "sa",
+ "en",
+ "dataset:SYSPIN",
+ "dataset:RASA",
+ "dataset:IndicTTS",
+ "dataset:SPICOR",
+ "base_model:canopylabs/3b-hi-ft-research_release",
+ "base_model:adapter:canopylabs/3b-hi-ft-research_release",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": "https://m.do.co/c/2ed27757a361",
+ "id": "svara-tts-v1"
+ }
+]
diff --git a/data/tools_expanded.json b/data/tools_expanded.json
new file mode 100644
index 0000000..fee37b4
--- /dev/null
+++ b/data/tools_expanded.json
@@ -0,0 +1,9193 @@
+[
+ {
+ "slug": "qwen2.5-7b-instruct",
+ "name": "Qwen2.5 7B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-7B-Instruct. 1073 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1073,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-7B",
+ "base_model:finetune:Qwen/Qwen2.5-7B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-0.6b",
+ "name": "Qwen3 0.6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-0.6B",
+ "description": "Open source model Qwen/Qwen3-0.6B. 1083 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1083,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-0.6B-Base",
+ "base_model:finetune:Qwen/Qwen3-0.6B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gpt2",
+ "name": "Gpt2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai-community/gpt2",
+ "description": "Open source model openai-community/gpt2. 3114 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3114,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "tflite",
+ "rust",
+ "onnx",
+ "safetensors",
+ "gpt2",
+ "exbert",
+ "en",
+ "doi:10.57967/hf/0039",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-1.5b-instruct",
+ "name": "Qwen2.5 1.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-1.5B-Instruct. 617 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 617,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-1.5B",
+ "base_model:finetune:Qwen/Qwen2.5-1.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-3b-instruct",
+ "name": "Qwen2.5 3B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-3B-Instruct. 404 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 404,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-3B",
+ "base_model:finetune:Qwen/Qwen2.5-3B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.1-8b-instruct",
+ "name": "Llama 3.1 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.1-8B-Instruct. 5467 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 5467,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "base_model:meta-llama/Llama-3.1-8B",
+ "base_model:finetune:meta-llama/Llama-3.1-8B",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gpt-oss-20b",
+ "name": "Gpt Oss 20B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai/gpt-oss-20b",
+ "description": "Open source model openai/gpt-oss-20b. 4378 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4378,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gpt_oss",
+ "vllm",
+ "conversational",
+ "arxiv:2508.10925",
+ "endpoints_compatible",
+ "8-bit",
+ "mxfp4",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 14,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 20,
+ "parameters_active_b": 20,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-0.5b-instruct",
+ "name": "Qwen2.5 0.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-0.5B-Instruct. 463 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 463,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-0.5B",
+ "base_model:finetune:Qwen/Qwen2.5-0.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-4b",
+ "name": "Qwen3 4B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B",
+ "description": "Open source model Qwen/Qwen3-4B. 552 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 552,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-4B-Base",
+ "base_model:finetune:Qwen/Qwen3-4B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-8b",
+ "name": "Qwen3 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-8B",
+ "description": "Open source model Qwen/Qwen3-8B. 940 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 940,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-8B-Base",
+ "base_model:finetune:Qwen/Qwen3-8B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-32b-instruct",
+ "name": "Qwen2.5 32B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-32B-Instruct. 328 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 328,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-32B",
+ "base_model:finetune:Qwen/Qwen2.5-32B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "opt-125m",
+ "name": "Opt 125M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/facebook/opt-125m",
+ "description": "Open source model facebook/opt-125m. 233 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 233,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "opt",
+ "en",
+ "arxiv:2205.01068",
+ "arxiv:2005.14165",
+ "text-generation-inference",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-1.7b",
+ "name": "Qwen3 1.7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-1.7B",
+ "description": "Open source model Qwen/Qwen3-1.7B. 422 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 422,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-1.7B-Base",
+ "base_model:finetune:Qwen/Qwen3-1.7B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "tiny-qwen2forcausallm-2.5",
+ "name": "Tiny Qwen2Forcausallm 2.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
+ "description": "Open source model trl-internal-testing/tiny-Qwen2ForCausalLM-2.5. 3 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "trl",
+ "conversational",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "dolphin-2.9.1-yi-1.5-34b",
+ "name": "Dolphin 2.9.1 Yi 1.5 34B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/dphn/dolphin-2.9.1-yi-1.5-34b",
+ "description": "Open source model dphn/dolphin-2.9.1-yi-1.5-34b. 54 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 54,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "generated_from_trainer",
+ "axolotl",
+ "conversational",
+ "dataset:cognitivecomputations/Dolphin-2.9",
+ "dataset:teknium/OpenHermes-2.5",
+ "dataset:m-a-p/CodeFeedback-Filtered-Instruction",
+ "dataset:cognitivecomputations/dolphin-coder",
+ "dataset:cognitivecomputations/samantha-data",
+ "dataset:microsoft/orca-math-word-problems-200k",
+ "dataset:Locutusque/function-calling-chatml",
+ "dataset:internlm/Agent-FLAN",
+ "base_model:01-ai/Yi-1.5-34B",
+ "base_model:finetune:01-ai/Yi-1.5-34B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 24,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 34,
+ "parameters_active_b": 34,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-embedding-0.6b",
+ "name": "Qwen3 Embedding 0.6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Embedding-0.6B",
+ "description": "Open source model Qwen/Qwen3-Embedding-0.6B. 879 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 879,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "sentence-transformers",
+ "safetensors",
+ "qwen3",
+ "transformers",
+ "sentence-similarity",
+ "feature-extraction",
+ "text-embeddings-inference",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-0.6B-Base",
+ "base_model:finetune:Qwen/Qwen3-0.6B-Base",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gpt-oss-120b",
+ "name": "Gpt Oss 120B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai/gpt-oss-120b",
+ "description": "Open source model openai/gpt-oss-120b. 4503 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4503,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gpt_oss",
+ "vllm",
+ "conversational",
+ "arxiv:2508.10925",
+ "endpoints_compatible",
+ "8-bit",
+ "mxfp4",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 84,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 120,
+ "parameters_active_b": 120,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-4b-instruct-2507",
+ "name": "Qwen3 4B Instruct 2507",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507",
+ "description": "Open source model Qwen/Qwen3-4B-Instruct-2507. 730 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 730,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "moondream2",
+ "name": "Moondream2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/vikhyatk/moondream2",
+ "description": "Open source model vikhyatk/moondream2. 1373 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1373,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "moondream1",
+ "image-text-to-text",
+ "custom_code",
+ "doi:10.57967/hf/6762",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.2-1b-instruct",
+ "name": "Llama 3.2 1B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.2-1B-Instruct. 1292 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1292,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "arxiv:2405.16406",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2-1.5b-instruct",
+ "name": "Qwen2 1.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2-1.5B-Instruct. 158 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 158,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-0.5b-instruct",
+ "name": "Qwen2.5 Coder 0.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-0.5B-Instruct. 64 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 64,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-0.5B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-0.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "kimi-k2.5",
+ "name": "Kimi K2.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mlx-community/Kimi-K2.5",
+ "description": "Open source model mlx-community/Kimi-K2.5. 28 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 28,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "mlx",
+ "safetensors",
+ "kimi_k25",
+ "conversational",
+ "custom_code",
+ "base_model:moonshotai/Kimi-K2.5",
+ "base_model:quantized:moonshotai/Kimi-K2.5",
+ "4-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "mistral-7b-instruct-v0.2",
+ "name": "Mistral 7B Instruct V0.2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
+ "description": "Open source model mistralai/Mistral-7B-Instruct-v0.2. 3075 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3075,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "mistral",
+ "finetuned",
+ "mistral-common",
+ "conversational",
+ "arxiv:2310.06825",
+ "text-generation-inference",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-30b-a3b-instruct-2507",
+ "name": "Qwen3 30B A3B Instruct 2507",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507",
+ "description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507. 766 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 766,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2402.17463",
+ "arxiv:2407.02490",
+ "arxiv:2501.15383",
+ "arxiv:2404.06654",
+ "arxiv:2505.09388",
+ "eval-results",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llm-jp-3-3.7b-instruct",
+ "name": "Llm Jp 3 3.7B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/llm-jp/llm-jp-3-3.7b-instruct",
+ "description": "Open source model llm-jp/llm-jp-3-3.7b-instruct. 13 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 13,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "en",
+ "ja",
+ "text-generation-inference",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.2-3b-instruct",
+ "name": "Llama 3.2 3B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.2-3B-Instruct. 1986 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1986,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "arxiv:2405.16406",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "distilgpt2",
+ "name": "Distilgpt2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/distilbert/distilgpt2",
+ "description": "Open source model distilbert/distilgpt2. 609 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 609,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "tflite",
+ "rust",
+ "coreml",
+ "safetensors",
+ "gpt2",
+ "exbert",
+ "en",
+ "dataset:openwebtext",
+ "arxiv:1910.01108",
+ "arxiv:2201.08542",
+ "arxiv:2203.12574",
+ "arxiv:1910.09700",
+ "arxiv:1503.02531",
+ "model-index",
+ "co2_eq_emissions",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-embedding-8b",
+ "name": "Qwen3 Embedding 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Embedding-8B",
+ "description": "Open source model Qwen/Qwen3-Embedding-8B. 584 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 584,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "sentence-transformers",
+ "safetensors",
+ "qwen3",
+ "transformers",
+ "sentence-similarity",
+ "feature-extraction",
+ "text-embeddings-inference",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-8B-Base",
+ "base_model:finetune:Qwen/Qwen3-8B-Base",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "meta-llama-3-8b",
+ "name": "Meta Llama 3 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B",
+ "description": "Open source model meta-llama/Meta-Llama-3-8B. 6458 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 6458,
+ "language": "Python",
+ "license": "llama3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "tinyllama-1.1b-chat-v1.0",
+ "name": "Tinyllama 1.1B Chat V1.0",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
+ "description": "Open source model TinyLlama/TinyLlama-1.1B-Chat-v1.0. 1526 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1526,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "en",
+ "dataset:cerebras/SlimPajama-627B",
+ "dataset:bigcode/starcoderdata",
+ "dataset:HuggingFaceH4/ultrachat_200k",
+ "dataset:HuggingFaceH4/ultrafeedback_binarized",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "glm-4.7-flash",
+ "name": "Glm 4.7 Flash",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/zai-org/GLM-4.7-Flash",
+ "description": "Open source model zai-org/GLM-4.7-Flash. 1538 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1538,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "conversational",
+ "en",
+ "zh",
+ "arxiv:2508.06471",
+ "eval-results",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.2-1b",
+ "name": "Llama 3.2 1B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.2-1B",
+ "description": "Open source model meta-llama/Llama-3.2-1B. 2295 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2295,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "arxiv:2405.16406",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-32b",
+ "name": "Qwen3 32B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-32B",
+ "description": "Open source model Qwen/Qwen3-32B. 656 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 656,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.2-1b-instruct-fp8-dynamic",
+ "name": "Llama 3.2 1B Instruct Fp8 Dynamic",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic",
+ "description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic. 3 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "llama",
+ "fp8",
+ "vllm",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.2-1B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-1.5b-instruct",
+ "name": "Qwen2.5 Coder 1.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-1.5B-Instruct. 106 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 106,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-1.5B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-1.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "meta-llama-3-8b-instruct",
+ "name": "Meta Llama 3 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct",
+ "description": "Open source model meta-llama/Meta-Llama-3-8B-Instruct. 4380 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4380,
+ "language": "Python",
+ "license": "llama3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gemma-3-1b-it",
+ "name": "Gemma 3 1B It",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/google/gemma-3-1b-it",
+ "description": "Open source model google/gemma-3-1b-it. 842 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 842,
+ "language": "Python",
+ "license": "gemma",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gemma3_text",
+ "conversational",
+ "arxiv:1905.07830",
+ "arxiv:1905.10044",
+ "arxiv:1911.11641",
+ "arxiv:1904.09728",
+ "arxiv:1705.03551",
+ "arxiv:1911.01547",
+ "arxiv:1907.10641",
+ "arxiv:1903.00161",
+ "arxiv:2009.03300",
+ "arxiv:2304.06364",
+ "arxiv:2103.03874",
+ "arxiv:2110.14168",
+ "arxiv:2311.12022",
+ "arxiv:2108.07732",
+ "arxiv:2107.03374",
+ "arxiv:2210.03057",
+ "arxiv:2106.03193",
+ "arxiv:1910.11856",
+ "arxiv:2502.12404",
+ "arxiv:2502.21228",
+ "arxiv:2404.16816",
+ "arxiv:2104.12756",
+ "arxiv:2311.16502",
+ "arxiv:2203.10244",
+ "arxiv:2404.12390",
+ "arxiv:1810.12440",
+ "arxiv:1908.02660",
+ "arxiv:2312.11805",
+ "base_model:google/gemma-3-1b-pt",
+ "base_model:finetune:google/gemma-3-1b-pt",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "phi-2",
+ "name": "Phi 2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/phi-2",
+ "description": "Open source model microsoft/phi-2. 3425 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3425,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi",
+ "nlp",
+ "code",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-7b-instruct",
+ "name": "Qwen2.5 Coder 7B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct. 646 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 646,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-7B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-7B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-7b",
+ "name": "Qwen2.5 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-7B",
+ "description": "Open source model Qwen/Qwen2.5-7B. 264 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 264,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-distill-qwen-1.5b",
+ "name": "Deepseek R1 Distill Qwen 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B. 1446 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1446,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-v3",
+ "name": "Deepseek V3",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3",
+ "description": "Open source model deepseek-ai/DeepSeek-V3. 4024 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4024,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v3",
+ "conversational",
+ "custom_code",
+ "arxiv:2412.19437",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gpt2-large",
+ "name": "Gpt2 Large",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai-community/gpt2-large",
+ "description": "Open source model openai-community/gpt2-large. 344 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 344,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "rust",
+ "onnx",
+ "safetensors",
+ "gpt2",
+ "en",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "glm-4.7-flash-mlx-8bit",
+ "name": "Glm 4.7 Flash Mlx 8Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-8bit",
+ "description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-8bit. 9 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 9,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "mlx",
+ "conversational",
+ "en",
+ "zh",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "8-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "glm-4.7-flash-mlx-6bit",
+ "name": "Glm 4.7 Flash Mlx 6Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-6bit",
+ "description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-6bit. 7 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 7,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "mlx",
+ "conversational",
+ "en",
+ "zh",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "6-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-0.6b-fp8",
+ "name": "Qwen3 0.6B Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-0.6B-FP8",
+ "description": "Open source model Qwen/Qwen3-0.6B-FP8. 56 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 56,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-0.6B",
+ "base_model:quantized:Qwen/Qwen3-0.6B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.1-8b",
+ "name": "Llama 3.1 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.1-8B",
+ "description": "Open source model meta-llama/Llama-3.1-8B. 2065 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2065,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "pythia-160m",
+ "name": "Pythia 160M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/EleutherAI/pythia-160m",
+ "description": "Open source model EleutherAI/pythia-160m. 38 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 38,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "gpt_neox",
+ "causal-lm",
+ "pythia",
+ "en",
+ "dataset:EleutherAI/pile",
+ "arxiv:2304.01373",
+ "arxiv:2101.00027",
+ "arxiv:2201.07311",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-distill-qwen-32b",
+ "name": "Deepseek R1 Distill Qwen 32B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-32B. 1517 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1517,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "hunyuanocr",
+ "name": "Hunyuanocr",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tencent/HunyuanOCR",
+ "description": "Open source model tencent/HunyuanOCR. 553 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 553,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "hunyuan_vl",
+ "ocr",
+ "hunyuan",
+ "vision-language",
+ "image-to-text",
+ "1B",
+ "end-to-end",
+ "image-text-to-text",
+ "conversational",
+ "multilingual",
+ "arxiv:2511.19575",
+ "base_model:tencent/HunyuanOCR",
+ "base_model:finetune:tencent/HunyuanOCR",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-30b-a3b",
+ "name": "Qwen3 30B A3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B",
+ "description": "Open source model Qwen/Qwen3-30B-A3B. 855 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 855,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-30B-A3B-Base",
+ "base_model:finetune:Qwen/Qwen3-30B-A3B-Base",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-0.5b",
+ "name": "Qwen2.5 0.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-0.5B",
+ "description": "Open source model Qwen/Qwen2.5-0.5B. 372 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 372,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-32b-instruct-awq",
+ "name": "Qwen2.5 32B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-32B-Instruct-AWQ. 94 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 94,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-32B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-32B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "nvidia-nemotron-3-nano-30b-a3b-fp8",
+ "name": "Nvidia Nemotron 3 Nano 30B A3B Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8",
+ "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8. 284 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 284,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "nemotron_h",
+ "feature-extraction",
+ "nvidia",
+ "pytorch",
+ "conversational",
+ "custom_code",
+ "en",
+ "es",
+ "fr",
+ "de",
+ "ja",
+ "it",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v1",
+ "dataset:nvidia/Nemotron-CC-v2",
+ "dataset:nvidia/Nemotron-Pretraining-SFT-v1",
+ "dataset:nvidia/Nemotron-CC-Math-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v2",
+ "dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
+ "dataset:nvidia/Nemotron-CC-v2.1",
+ "dataset:nvidia/Nemotron-CC-Code-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
+ "dataset:nvidia/Nemotron-Competitive-Programming-v1",
+ "dataset:nvidia/Nemotron-Math-v2",
+ "dataset:nvidia/Nemotron-Agentic-v1",
+ "dataset:nvidia/Nemotron-Math-Proofs-v1",
+ "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
+ "dataset:nvidia/Nemotron-Science-v1",
+ "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
+ "arxiv:2512.20848",
+ "arxiv:2512.20856",
+ "base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "eval-results",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-14b-instruct",
+ "name": "Qwen2.5 14B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-14B-Instruct. 312 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 312,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-14B",
+ "base_model:finetune:Qwen/Qwen2.5-14B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "nvidia-nemotron-3-nano-30b-a3b-bf16",
+ "name": "Nvidia Nemotron 3 Nano 30B A3B Bf16",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16. 634 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 634,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "nemotron_h",
+ "feature-extraction",
+ "nvidia",
+ "pytorch",
+ "conversational",
+ "custom_code",
+ "en",
+ "es",
+ "fr",
+ "de",
+ "ja",
+ "it",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v1",
+ "dataset:nvidia/Nemotron-CC-v2",
+ "dataset:nvidia/Nemotron-Pretraining-SFT-v1",
+ "dataset:nvidia/Nemotron-CC-Math-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v2",
+ "dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
+ "dataset:nvidia/Nemotron-CC-v2.1",
+ "dataset:nvidia/Nemotron-CC-Code-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
+ "dataset:nvidia/Nemotron-Competitive-Programming-v1",
+ "dataset:nvidia/Nemotron-Math-v2",
+ "dataset:nvidia/Nemotron-Agentic-v1",
+ "dataset:nvidia/Nemotron-Math-Proofs-v1",
+ "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
+ "dataset:nvidia/Nemotron-Science-v1",
+ "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
+ "arxiv:2512.20848",
+ "arxiv:2512.20856",
+ "eval-results",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "openelm-1_1b-instruct",
+ "name": "Openelm 1_1B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/apple/OpenELM-1_1B-Instruct",
+ "description": "Open source model apple/OpenELM-1_1B-Instruct. 72 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 72,
+ "language": "Python",
+ "license": "apple-amlr",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "openelm",
+ "custom_code",
+ "arxiv:2404.14619",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "tiny-random-llamaforcausallm",
+ "name": "Tiny Random Llamaforcausallm",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/hmellor/tiny-random-LlamaForCausalLM",
+ "description": "Open source model hmellor/tiny-random-LlamaForCausalLM. 0 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 0,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-next-80b-a3b-instruct",
+ "name": "Qwen3 Next 80B A3B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Instruct",
+ "description": "Open source model Qwen/Qwen3-Next-80B-A3B-Instruct. 937 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 937,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_next",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2404.06654",
+ "arxiv:2505.09388",
+ "arxiv:2501.15383",
+ "eval-results",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 56,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 80,
+ "parameters_active_b": 80,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "h2ovl-mississippi-800m",
+ "name": "H2Ovl Mississippi 800M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/h2oai/h2ovl-mississippi-800m",
+ "description": "Open source model h2oai/h2ovl-mississippi-800m. 39 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 39,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "h2ovl_chat",
+ "feature-extraction",
+ "gpt",
+ "llm",
+ "multimodal large language model",
+ "ocr",
+ "conversational",
+ "custom_code",
+ "en",
+ "arxiv:2410.13611",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "bloomz-560m",
+ "name": "Bloomz 560M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/bigscience/bloomz-560m",
+ "description": "Open source model bigscience/bloomz-560m. 137 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 137,
+ "language": "Python",
+ "license": "bigscience-bloom-rail-1.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tensorboard",
+ "safetensors",
+ "bloom",
+ "ak",
+ "ar",
+ "as",
+ "bm",
+ "bn",
+ "ca",
+ "code",
+ "en",
+ "es",
+ "eu",
+ "fon",
+ "fr",
+ "gu",
+ "hi",
+ "id",
+ "ig",
+ "ki",
+ "kn",
+ "lg",
+ "ln",
+ "ml",
+ "mr",
+ "ne",
+ "nso",
+ "ny",
+ "or",
+ "pa",
+ "pt",
+ "rn",
+ "rw",
+ "sn",
+ "st",
+ "sw",
+ "ta",
+ "te",
+ "tn",
+ "ts",
+ "tum",
+ "tw",
+ "ur",
+ "vi",
+ "wo",
+ "xh",
+ "yo",
+ "zh",
+ "zu",
+ "dataset:bigscience/xP3",
+ "arxiv:2211.01786",
+ "model-index",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-1.5b-quantized.w8a8",
+ "name": "Qwen2.5 1.5B Quantized.W8A8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/RedHatAI/Qwen2.5-1.5B-quantized.w8a8",
+ "description": "Open source model RedHatAI/Qwen2.5-1.5B-quantized.w8a8. 2 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "neuralmagic",
+ "llmcompressor",
+ "conversational",
+ "en",
+ "base_model:Qwen/Qwen2.5-1.5B",
+ "base_model:quantized:Qwen/Qwen2.5-1.5B",
+ "8-bit",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "h2ovl-mississippi-2b",
+ "name": "H2Ovl Mississippi 2B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/h2oai/h2ovl-mississippi-2b",
+ "description": "Open source model h2oai/h2ovl-mississippi-2b. 40 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 40,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "h2ovl_chat",
+ "feature-extraction",
+ "gpt",
+ "llm",
+ "multimodal large language model",
+ "ocr",
+ "conversational",
+ "custom_code",
+ "en",
+ "arxiv:2410.13611",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llava-v1.5-7b",
+ "name": "Llava V1.5 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/liuhaotian/llava-v1.5-7b",
+ "description": "Open source model liuhaotian/llava-v1.5-7b. 537 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 537,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "llava",
+ "image-text-to-text",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "t5-3b",
+ "name": "T5 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/google-t5/t5-3b",
+ "description": "Open source model google-t5/t5-3b. 51 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 51,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "safetensors",
+ "t5",
+ "summarization",
+ "translation",
+ "en",
+ "fr",
+ "ro",
+ "de",
+ "multilingual",
+ "dataset:c4",
+ "arxiv:1805.12471",
+ "arxiv:1708.00055",
+ "arxiv:1704.05426",
+ "arxiv:1606.05250",
+ "arxiv:1808.09121",
+ "arxiv:1810.12885",
+ "arxiv:1905.10044",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-14b-instruct-awq",
+ "name": "Qwen2.5 14B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-14B-Instruct-AWQ. 27 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 27,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-14B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-14B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.2-3b",
+ "name": "Llama 3.2 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.2-3B",
+ "description": "Open source model meta-llama/Llama-3.2-3B. 697 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 697,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "arxiv:2405.16406",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "phi-3-mini-4k-instruct-gptq-4bit",
+ "name": "Phi 3 Mini 4K Instruct Gptq 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/kaitchup/Phi-3-mini-4k-instruct-gptq-4bit",
+ "description": "Open source model kaitchup/Phi-3-mini-4k-instruct-gptq-4bit. 2 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3",
+ "conversational",
+ "custom_code",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "gptq",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-72b-instruct-awq",
+ "name": "Qwen2.5 72B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-72B-Instruct-AWQ. 74 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 74,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-72B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-72B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 50,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 72,
+ "parameters_active_b": 72,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "smollm2-135m",
+ "name": "Smollm2 135M",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M",
+ "description": "Open source model HuggingFaceTB/SmolLM2-135M. 166 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 166,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "en",
+ "arxiv:2502.02737",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.3-70b-instruct",
+ "name": "Llama 3.3 70B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.3-70B-Instruct. 2658 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2658,
+ "language": "Python",
+ "license": "llama3.3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "de",
+ "arxiv:2204.05149",
+ "base_model:meta-llama/Llama-3.1-70B",
+ "base_model:finetune:meta-llama/Llama-3.1-70B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-30b-a3b-instruct-2507-fp8",
+ "name": "Qwen3 30B A3B Instruct 2507 Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
+ "description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507-FP8. 112 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 112,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-30B-A3B-Instruct-2507",
+ "base_model:quantized:Qwen/Qwen3-30B-A3B-Instruct-2507",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-32b-instruct",
+ "name": "Qwen2.5 Coder 32B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct. 1995 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1995,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-32B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-32B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-235b-a22b-instruct-2507-fp8",
+ "name": "Qwen3 235B A22B Instruct 2507 Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-235B-A22B-Instruct-2507-FP8",
+ "description": "Open source model Qwen/Qwen3-235B-A22B-Instruct-2507-FP8. 145 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 145,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-235B-A22B-Instruct-2507",
+ "base_model:quantized:Qwen/Qwen3-235B-A22B-Instruct-2507",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 164,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 235,
+ "parameters_active_b": 235,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-distill-qwen-7b",
+ "name": "Deepseek R1 Distill Qwen 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-7B. 787 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 787,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "phi-3-mini-4k-instruct",
+ "name": "Phi 3 Mini 4K Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct",
+ "description": "Open source model microsoft/Phi-3-mini-4k-instruct. 1386 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1386,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3",
+ "nlp",
+ "code",
+ "conversational",
+ "custom_code",
+ "en",
+ "fr",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-14b",
+ "name": "Qwen3 14B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-14B",
+ "description": "Open source model Qwen/Qwen3-14B. 366 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 366,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-14B-Base",
+ "base_model:finetune:Qwen/Qwen3-14B-Base",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-1.5b",
+ "name": "Qwen2.5 Coder 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B",
+ "description": "Open source model Qwen/Qwen2.5-Coder-1.5B. 81 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 81,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "qwen",
+ "qwen-coder",
+ "codeqwen",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-1.5B",
+ "base_model:finetune:Qwen/Qwen2.5-1.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.1-70b-instruct",
+ "name": "Llama 3.1 70B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct",
+ "description": "Open source model meta-llama/Llama-3.1-70B-Instruct. 890 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 890,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "base_model:meta-llama/Llama-3.1-70B",
+ "base_model:finetune:meta-llama/Llama-3.1-70B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "hunyuanimage-3.0",
+ "name": "Hunyuanimage 3.0",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tencent/HunyuanImage-3.0",
+ "description": "Open source model tencent/HunyuanImage-3.0. 640 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 640,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "hunyuan_image_3_moe",
+ "text-to-image",
+ "custom_code",
+ "arxiv:2509.23951",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-7b-instruct-awq",
+ "name": "Qwen2.5 Coder 7B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-AWQ. 19 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 19,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-7B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-coder-30b-a3b-instruct",
+ "name": "Qwen3 Coder 30B A3B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct",
+ "description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct. 945 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 945,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2505.09388",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-0528",
+ "name": "Deepseek R1 0528",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-0528",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-0528. 2400 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2400,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v3",
+ "conversational",
+ "custom_code",
+ "arxiv:2501.12948",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "tiny-random-llama-3",
+ "name": "Tiny Random Llama 3",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/llamafactory/tiny-random-Llama-3",
+ "description": "Open source model llamafactory/tiny-random-Llama-3. 3 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "text-generation-inference",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-32b-instruct-awq",
+ "name": "Qwen2.5 Coder 32B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct-AWQ. 33 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 33,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-32B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-Coder-32B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "mistral-7b-instruct-v0.1",
+ "name": "Mistral 7B Instruct V0.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1",
+ "description": "Open source model mistralai/Mistral-7B-Instruct-v0.1. 1826 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1826,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "mistral",
+ "finetuned",
+ "mistral-common",
+ "conversational",
+ "arxiv:2310.06825",
+ "base_model:mistralai/Mistral-7B-v0.1",
+ "base_model:finetune:mistralai/Mistral-7B-v0.1",
+ "text-generation-inference",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gpt-oss-20b-mxfp4-q8",
+ "name": "Gpt Oss 20B Mxfp4 Q8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mlx-community/gpt-oss-20b-MXFP4-Q8",
+ "description": "Open source model mlx-community/gpt-oss-20b-MXFP4-Q8. 31 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 31,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "mlx",
+ "safetensors",
+ "gpt_oss",
+ "vllm",
+ "conversational",
+ "base_model:openai/gpt-oss-20b",
+ "base_model:quantized:openai/gpt-oss-20b",
+ "4-bit",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 14,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 20,
+ "parameters_active_b": 20,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-embedding-4b",
+ "name": "Qwen3 Embedding 4B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Embedding-4B",
+ "description": "Open source model Qwen/Qwen3-Embedding-4B. 224 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 224,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "sentence-transformers",
+ "safetensors",
+ "qwen3",
+ "transformers",
+ "sentence-similarity",
+ "feature-extraction",
+ "text-embeddings-inference",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-4B-Base",
+ "base_model:finetune:Qwen/Qwen3-4B-Base",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-1.5b-instruct-awq",
+ "name": "Qwen2.5 1.5B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-1.5B-Instruct-AWQ. 6 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 6,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-1.5B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-1.5B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct-fp8",
+ "name": "Meta Llama 3.1 8B Instruct Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8",
+ "description": "Open source model RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8. 44 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 44,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "fp8",
+ "vllm",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.1-8B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "phi-4",
+ "name": "Phi 4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/phi-4",
+ "description": "Open source model microsoft/phi-4. 2220 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2220,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3",
+ "phi",
+ "nlp",
+ "math",
+ "code",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2412.08905",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1",
+ "name": "Deepseek R1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1",
+ "description": "Open source model deepseek-ai/DeepSeek-R1. 13011 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 13011,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v3",
+ "conversational",
+ "custom_code",
+ "arxiv:2501.12948",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.2-1b-instruct-fp8",
+ "name": "Llama 3.2 1B Instruct Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8",
+ "description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8. 3 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3,
+ "language": "Python",
+ "license": "llama3.2",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "llama",
+ "llama-3",
+ "neuralmagic",
+ "llmcompressor",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.2-1B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.1-405b",
+ "name": "Llama 3.1 405B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-3.1-405B",
+ "description": "Open source model meta-llama/Llama-3.1-405B. 961 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 961,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "arxiv:2204.05149",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 284,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 405,
+ "parameters_active_b": 405,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-4b-thinking-2507",
+ "name": "Qwen3 4B Thinking 2507",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B-Thinking-2507",
+ "description": "Open source model Qwen/Qwen3-4B-Thinking-2507. 548 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 548,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gpt2-medium",
+ "name": "Gpt2 Medium",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai-community/gpt2-medium",
+ "description": "Open source model openai-community/gpt2-medium. 193 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 193,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "rust",
+ "onnx",
+ "safetensors",
+ "gpt2",
+ "en",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "tiny-gpt2",
+ "name": "Tiny Gpt2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/sshleifer/tiny-gpt2",
+ "description": "Open source model sshleifer/tiny-gpt2. 34 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 34,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "gpt2",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "hermes-3-llama-3.1-8b",
+ "name": "Hermes 3 Llama 3.1 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B",
+ "description": "Open source model NousResearch/Hermes-3-Llama-3.1-8B. 385 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 385,
+ "language": "Python",
+ "license": "llama3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "Llama-3",
+ "instruct",
+ "finetune",
+ "chatml",
+ "gpt4",
+ "synthetic data",
+ "distillation",
+ "function calling",
+ "json mode",
+ "axolotl",
+ "roleplaying",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2408.11857",
+ "base_model:meta-llama/Llama-3.1-8B",
+ "base_model:finetune:meta-llama/Llama-3.1-8B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "phi-3.5-vision-instruct",
+ "name": "Phi 3.5 Vision Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/Phi-3.5-vision-instruct",
+ "description": "Open source model microsoft/Phi-3.5-vision-instruct. 726 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 726,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3_v",
+ "nlp",
+ "code",
+ "vision",
+ "image-text-to-text",
+ "conversational",
+ "custom_code",
+ "multilingual",
+ "arxiv:2404.14219",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": true
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "minimax-m2",
+ "name": "Minimax M2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/MiniMaxAI/MiniMax-M2",
+ "description": "Open source model MiniMaxAI/MiniMax-M2. 1485 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1485,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "minimax_m2",
+ "conversational",
+ "custom_code",
+ "arxiv:2504.07164",
+ "arxiv:2509.06501",
+ "arxiv:2509.13160",
+ "eval-results",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-distill-llama-8b",
+ "name": "Deepseek R1 Distill Llama 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-8B. 843 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 843,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-14b-awq",
+ "name": "Qwen3 14B Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-14B-AWQ",
+ "description": "Open source model Qwen/Qwen3-14B-AWQ. 57 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 57,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-14B",
+ "base_model:quantized:Qwen/Qwen3-14B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-235b-a22b",
+ "name": "Qwen3 235B A22B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-235B-A22B",
+ "description": "Open source model Qwen/Qwen3-235B-A22B. 1075 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1075,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 164,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 235,
+ "parameters_active_b": 235,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct-awq-int4",
+ "name": "Meta Llama 3.1 8B Instruct Awq Int4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
+ "description": "Open source model hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4. 87 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 87,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "llama-3.1",
+ "meta",
+ "autoawq",
+ "conversational",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "lfm2.5-1.2b-instruct-mlx-8bit",
+ "name": "Lfm2.5 1.2B Instruct Mlx 8Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit",
+ "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit. 1 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "lfm2",
+ "liquid",
+ "lfm2.5",
+ "edge",
+ "mlx",
+ "conversational",
+ "en",
+ "ar",
+ "zh",
+ "fr",
+ "de",
+ "ja",
+ "ko",
+ "es",
+ "base_model:LiquidAI/LFM2.5-1.2B-Instruct",
+ "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
+ "endpoints_compatible",
+ "8-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "glm-4.7-flash-gguf",
+ "name": "Glm 4.7 Flash Gguf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF",
+ "description": "Open source model unsloth/GLM-4.7-Flash-GGUF. 482 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 482,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "gguf",
+ "unsloth",
+ "en",
+ "zh",
+ "arxiv:2508.06471",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us",
+ "imatrix",
+ "conversational"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-distill-qwen-14b",
+ "name": "Deepseek R1 Distill Qwen 14B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-14B. 603 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 603,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "lfm2.5-1.2b-instruct-mlx-6bit",
+ "name": "Lfm2.5 1.2B Instruct Mlx 6Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit",
+ "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit. 4 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "lfm2",
+ "liquid",
+ "lfm2.5",
+ "edge",
+ "mlx",
+ "conversational",
+ "en",
+ "ar",
+ "zh",
+ "fr",
+ "de",
+ "ja",
+ "ko",
+ "es",
+ "base_model:LiquidAI/LFM2.5-1.2B-Instruct",
+ "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
+ "endpoints_compatible",
+ "6-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "lfm2.5-1.2b-instruct-mlx-4bit",
+ "name": "Lfm2.5 1.2B Instruct Mlx 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit",
+ "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit. 1 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "lfm2",
+ "liquid",
+ "lfm2.5",
+ "edge",
+ "mlx",
+ "conversational",
+ "en",
+ "ar",
+ "zh",
+ "fr",
+ "de",
+ "ja",
+ "ko",
+ "es",
+ "base_model:LiquidAI/LFM2.5-1.2B-Instruct",
+ "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
+ "endpoints_compatible",
+ "4-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "vicuna-7b-v1.5",
+ "name": "Vicuna 7B V1.5",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmsys/vicuna-7b-v1.5",
+ "description": "Open source model lmsys/vicuna-7b-v1.5. 387 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 387,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "llama",
+ "arxiv:2307.09288",
+ "arxiv:2306.05685",
+ "text-generation-inference",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.2-1b-instruct-q8_0-gguf",
+ "name": "Llama 3.2 1B Instruct Q8_0 Gguf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF",
+ "description": "Open source model hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF. 43 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 43,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "gguf",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama",
+ "llama-3",
+ "llama-cpp",
+ "gguf-my-repo",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.2-1B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
+ "endpoints_compatible",
+ "region:us",
+ "conversational"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-3.3-70b-instruct-awq",
+ "name": "Llama 3.3 70B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/kosbu/Llama-3.3-70B-Instruct-AWQ",
+ "description": "Open source model kosbu/Llama-3.3-70B-Instruct-AWQ. 10 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 10,
+ "language": "Python",
+ "license": "llama3.3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "llama-3",
+ "awq",
+ "conversational",
+ "en",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "de",
+ "base_model:meta-llama/Llama-3.3-70B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.3-70B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-32b-fp8",
+ "name": "Qwen3 32B Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-32B-FP8",
+ "description": "Open source model Qwen/Qwen3-32B-FP8. 80 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 80,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-32B",
+ "base_model:quantized:Qwen/Qwen3-32B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gpt2-xl",
+ "name": "Gpt2 Xl",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/openai-community/gpt2-xl",
+ "description": "Open source model openai-community/gpt2-xl. 373 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 373,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "rust",
+ "safetensors",
+ "gpt2",
+ "en",
+ "arxiv:1910.09700",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-4b-instruct-2507-fp8",
+ "name": "Qwen3 4B Instruct 2507 Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507-FP8",
+ "description": "Open source model Qwen/Qwen3-4B-Instruct-2507-FP8. 65 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 65,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-4B-Instruct-2507",
+ "base_model:quantized:Qwen/Qwen3-4B-Instruct-2507",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "xlnet-base-cased",
+ "name": "Xlnet Base Cased",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/xlnet/xlnet-base-cased",
+ "description": "Open source model xlnet/xlnet-base-cased. 80 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 80,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "rust",
+ "xlnet",
+ "en",
+ "dataset:bookcorpus",
+ "dataset:wikipedia",
+ "arxiv:1906.08237",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-2-7b-hf",
+ "name": "Llama 2 7B Hf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-2-7b-hf",
+ "description": "Open source model meta-llama/Llama-2-7b-hf. 2268 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2268,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "llama-2",
+ "en",
+ "arxiv:2307.09288",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-math-7b-instruct",
+ "name": "Qwen2.5 Math 7B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Math-7B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Math-7B-Instruct. 89 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 89,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2409.12122",
+ "base_model:Qwen/Qwen2.5-Math-7B",
+ "base_model:finetune:Qwen/Qwen2.5-Math-7B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-reranker-0.6b",
+ "name": "Qwen3 Reranker 0.6B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Reranker-0.6B",
+ "description": "Open source model Qwen/Qwen3-Reranker-0.6B. 305 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 305,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "text-ranking",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-0.6B-Base",
+ "base_model:finetune:Qwen/Qwen3-0.6B-Base",
+ "text-embeddings-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-1.5b",
+ "name": "Qwen2.5 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B",
+ "description": "Open source model Qwen/Qwen2.5-1.5B. 165 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 165,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-30b-a3b-thinking-2507",
+ "name": "Qwen3 30B A3B Thinking 2507",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Thinking-2507",
+ "description": "Open source model Qwen/Qwen3-30B-A3B-Thinking-2507. 359 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 359,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2402.17463",
+ "arxiv:2407.02490",
+ "arxiv:2501.15383",
+ "arxiv:2404.06654",
+ "arxiv:2505.09388",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "smollm2-135m-instruct",
+ "name": "Smollm2 135M Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct",
+ "description": "Open source model HuggingFaceTB/SmolLM2-135M-Instruct. 292 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 292,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "tensorboard",
+ "onnx",
+ "safetensors",
+ "llama",
+ "transformers.js",
+ "conversational",
+ "en",
+ "arxiv:2502.02737",
+ "base_model:HuggingFaceTB/SmolLM2-135M",
+ "base_model:quantized:HuggingFaceTB/SmolLM2-135M",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-math-1.5b",
+ "name": "Qwen2.5 Math 1.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Math-1.5B",
+ "description": "Open source model Qwen/Qwen2.5-Math-1.5B. 100 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 100,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2409.12122",
+ "base_model:Qwen/Qwen2.5-1.5B",
+ "base_model:finetune:Qwen/Qwen2.5-1.5B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "glm-4.5-air-awq-4bit",
+ "name": "Glm 4.5 Air Awq 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/cyankiwi/GLM-4.5-Air-AWQ-4bit",
+ "description": "Open source model cyankiwi/GLM-4.5-Air-AWQ-4bit. 27 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 27,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe",
+ "conversational",
+ "en",
+ "zh",
+ "arxiv:2508.06471",
+ "base_model:zai-org/GLM-4.5-Air",
+ "base_model:quantized:zai-org/GLM-4.5-Air",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-2-7b-chat-hf",
+ "name": "Llama 2 7B Chat Hf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf",
+ "description": "Open source model meta-llama/Llama-2-7b-chat-hf. 4705 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4705,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "llama-2",
+ "conversational",
+ "en",
+ "arxiv:2307.09288",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-7b-instruct-gptq-int4",
+ "name": "Qwen2.5 Coder 7B Instruct Gptq Int4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4",
+ "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4. 12 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 12,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-7B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "gptq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-vl-30b-a3b-instruct-awq",
+ "name": "Qwen3 Vl 30B A3B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ",
+ "description": "Open source model QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ. 38 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 38,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_vl_moe",
+ "image-text-to-text",
+ "AWQ",
+ "vLLM",
+ "conversational",
+ "arxiv:2505.09388",
+ "arxiv:2502.13923",
+ "arxiv:2409.12191",
+ "arxiv:2308.12966",
+ "base_model:Qwen/Qwen3-VL-30B-A3B-Instruct",
+ "base_model:quantized:Qwen/Qwen3-VL-30B-A3B-Instruct",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-8b-base",
+ "name": "Qwen3 8B Base",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-8B-Base",
+ "description": "Open source model Qwen/Qwen3-8B-Base. 82 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 82,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-14b-instruct",
+ "name": "Qwen2.5 Coder 14B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-Coder-14B-Instruct. 140 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 140,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "codeqwen",
+ "chat",
+ "qwen",
+ "qwen-coder",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-Coder-14B",
+ "base_model:finetune:Qwen/Qwen2.5-Coder-14B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 10,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 14,
+ "parameters_active_b": 14,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "stories15m_moe",
+ "name": "Stories15M_Moe",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/ggml-org/stories15M_MOE",
+ "description": "Open source model ggml-org/stories15M_MOE. 5 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 5,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gguf",
+ "mixtral",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "opt-1.3b",
+ "name": "Opt 1.3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/facebook/opt-1.3b",
+ "description": "Open source model facebook/opt-1.3b. 182 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 182,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "opt",
+ "en",
+ "arxiv:2205.01068",
+ "arxiv:2005.14165",
+ "text-generation-inference",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "minimax-m2-awq",
+ "name": "Minimax M2 Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/QuantTrio/MiniMax-M2-AWQ",
+ "description": "Open source model QuantTrio/MiniMax-M2-AWQ. 8 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 8,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "mixtral",
+ "vLLM",
+ "AWQ",
+ "conversational",
+ "arxiv:2504.07164",
+ "arxiv:2509.06501",
+ "arxiv:2509.13160",
+ "base_model:MiniMaxAI/MiniMax-M2",
+ "base_model:quantized:MiniMaxAI/MiniMax-M2",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "glm-4.7-flash-nvfp4",
+ "name": "Glm 4.7 Flash Nvfp4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/GadflyII/GLM-4.7-Flash-NVFP4",
+ "description": "Open source model GadflyII/GLM-4.7-Flash-NVFP4. 62 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 62,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "moe",
+ "nvfp4",
+ "quantized",
+ "vllm",
+ "glm",
+ "30b",
+ "conversational",
+ "en",
+ "zh",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "hy-mt1.5-7b",
+ "name": "Hy Mt1.5 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tencent/HY-MT1.5-7B",
+ "description": "Open source model tencent/HY-MT1.5-7B. 133 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 133,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "hunyuan_v1_dense",
+ "translation",
+ "zh",
+ "en",
+ "fr",
+ "pt",
+ "es",
+ "ja",
+ "tr",
+ "ru",
+ "ar",
+ "ko",
+ "th",
+ "it",
+ "de",
+ "vi",
+ "ms",
+ "id",
+ "tl",
+ "hi",
+ "pl",
+ "cs",
+ "nl",
+ "km",
+ "my",
+ "fa",
+ "gu",
+ "ur",
+ "te",
+ "mr",
+ "he",
+ "bn",
+ "ta",
+ "uk",
+ "bo",
+ "kk",
+ "mn",
+ "ug",
+ "arxiv:2512.24092",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gemma-2-27b-it",
+ "name": "Gemma 2 27B It",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/google/gemma-2-27b-it",
+ "description": "Open source model google/gemma-2-27b-it. 559 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 559,
+ "language": "Python",
+ "license": "gemma",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gemma2",
+ "conversational",
+ "arxiv:2009.03300",
+ "arxiv:1905.07830",
+ "arxiv:1911.11641",
+ "arxiv:1904.09728",
+ "arxiv:1905.10044",
+ "arxiv:1907.10641",
+ "arxiv:1811.00937",
+ "arxiv:1809.02789",
+ "arxiv:1911.01547",
+ "arxiv:1705.03551",
+ "arxiv:2107.03374",
+ "arxiv:2108.07732",
+ "arxiv:2110.14168",
+ "arxiv:2009.11462",
+ "arxiv:2101.11718",
+ "arxiv:2110.08193",
+ "arxiv:1804.09301",
+ "arxiv:2109.07958",
+ "arxiv:1804.06876",
+ "arxiv:2103.03874",
+ "arxiv:2304.06364",
+ "arxiv:2206.04615",
+ "arxiv:2203.09509",
+ "base_model:google/gemma-2-27b",
+ "base_model:finetune:google/gemma-2-27b",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 19,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 27,
+ "parameters_active_b": 27,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-coder-next-gguf",
+ "name": "Qwen3 Coder Next Gguf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/Qwen3-Coder-Next-GGUF",
+ "description": "Open source model unsloth/Qwen3-Coder-Next-GGUF. 347 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 347,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "gguf",
+ "qwen3_next",
+ "unsloth",
+ "qwen",
+ "qwen3",
+ "base_model:Qwen/Qwen3-Coder-Next",
+ "base_model:quantized:Qwen/Qwen3-Coder-Next",
+ "endpoints_compatible",
+ "region:us",
+ "imatrix",
+ "conversational"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gte-qwen2-1.5b-instruct",
+ "name": "Gte Qwen2 1.5B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Alibaba-NLP/gte-Qwen2-1.5B-instruct",
+ "description": "Open source model Alibaba-NLP/gte-Qwen2-1.5B-instruct. 229 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 229,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "sentence-transformers",
+ "safetensors",
+ "qwen2",
+ "mteb",
+ "transformers",
+ "Qwen2",
+ "sentence-similarity",
+ "custom_code",
+ "arxiv:2308.03281",
+ "model-index",
+ "text-embeddings-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "lfm2-1.2b",
+ "name": "Lfm2 1.2B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/LiquidAI/LFM2-1.2B",
+ "description": "Open source model LiquidAI/LFM2-1.2B. 349 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 349,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "lfm2",
+ "liquid",
+ "edge",
+ "conversational",
+ "en",
+ "ar",
+ "zh",
+ "fr",
+ "de",
+ "ja",
+ "ko",
+ "es",
+ "arxiv:2511.23404",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "saiga_llama3_8b",
+ "name": "Saiga_Llama3_8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/IlyaGusev/saiga_llama3_8b",
+ "description": "Open source model IlyaGusev/saiga_llama3_8b. 137 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 137,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "ru",
+ "dataset:IlyaGusev/saiga_scored",
+ "doi:10.57967/hf/2368",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-1.7b-base",
+ "name": "Qwen3 1.7B Base",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-1.7B-Base",
+ "description": "Open source model Qwen/Qwen3-1.7B-Base. 62 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 62,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "mistral-7b-v0.3-bnb-4bit",
+ "name": "Mistral 7B V0.3 Bnb 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/mistral-7b-v0.3-bnb-4bit",
+ "description": "Open source model unsloth/mistral-7b-v0.3-bnb-4bit. 22 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 22,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "mistral",
+ "unsloth",
+ "mistral-7b",
+ "en",
+ "base_model:mistralai/Mistral-7B-v0.3",
+ "base_model:quantized:mistralai/Mistral-7B-v0.3",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "bitsandbytes",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gemma-2-2b-it",
+ "name": "Gemma 2 2B It",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/google/gemma-2-2b-it",
+ "description": "Open source model google/gemma-2-2b-it. 1285 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1285,
+ "language": "Python",
+ "license": "gemma",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gemma2",
+ "conversational",
+ "arxiv:2009.03300",
+ "arxiv:1905.07830",
+ "arxiv:1911.11641",
+ "arxiv:1904.09728",
+ "arxiv:1905.10044",
+ "arxiv:1907.10641",
+ "arxiv:1811.00937",
+ "arxiv:1809.02789",
+ "arxiv:1911.01547",
+ "arxiv:1705.03551",
+ "arxiv:2107.03374",
+ "arxiv:2108.07732",
+ "arxiv:2110.14168",
+ "arxiv:2009.11462",
+ "arxiv:2101.11718",
+ "arxiv:2110.08193",
+ "arxiv:1804.09301",
+ "arxiv:2109.07958",
+ "arxiv:1804.06876",
+ "arxiv:2103.03874",
+ "arxiv:2304.06364",
+ "arxiv:1903.00161",
+ "arxiv:2206.04615",
+ "arxiv:2203.09509",
+ "arxiv:2403.13793",
+ "base_model:google/gemma-2-2b",
+ "base_model:finetune:google/gemma-2-2b",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 2,
+ "parameters_active_b": 2,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "phi-4-multimodal-instruct",
+ "name": "Phi 4 Multimodal Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/Phi-4-multimodal-instruct",
+ "description": "Open source model microsoft/Phi-4-multimodal-instruct. 1573 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1573,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi4mm",
+ "nlp",
+ "code",
+ "audio",
+ "automatic-speech-recognition",
+ "speech-summarization",
+ "speech-translation",
+ "visual-question-answering",
+ "phi-4-multimodal",
+ "phi",
+ "phi-4-mini",
+ "custom_code",
+ "multilingual",
+ "ar",
+ "zh",
+ "cs",
+ "da",
+ "nl",
+ "en",
+ "fi",
+ "fr",
+ "de",
+ "he",
+ "hu",
+ "it",
+ "ja",
+ "ko",
+ "no",
+ "pl",
+ "pt",
+ "ru",
+ "es",
+ "sv",
+ "th",
+ "tr",
+ "uk",
+ "arxiv:2503.01743",
+ "arxiv:2407.13833",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "pythia-70m-deduped",
+ "name": "Pythia 70M Deduped",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/EleutherAI/pythia-70m-deduped",
+ "description": "Open source model EleutherAI/pythia-70m-deduped. 27 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 27,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "gpt_neox",
+ "causal-lm",
+ "pythia",
+ "en",
+ "dataset:EleutherAI/the_pile_deduplicated",
+ "arxiv:2304.01373",
+ "arxiv:2101.00027",
+ "arxiv:2201.07311",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "dialogpt-medium",
+ "name": "Dialogpt Medium",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/DialoGPT-medium",
+ "description": "Open source model microsoft/DialoGPT-medium. 433 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 433,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "jax",
+ "rust",
+ "gpt2",
+ "conversational",
+ "arxiv:1911.00536",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "gpt-oss-20b-bf16",
+ "name": "Gpt Oss 20B Bf16",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/gpt-oss-20b-BF16",
+ "description": "Open source model unsloth/gpt-oss-20b-BF16. 29 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 29,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "gpt_oss",
+ "vllm",
+ "unsloth",
+ "conversational",
+ "base_model:openai/gpt-oss-20b",
+ "base_model:finetune:openai/gpt-oss-20b",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 14,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 20,
+ "parameters_active_b": 20,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-72b-instruct",
+ "name": "Qwen2.5 72B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct",
+ "description": "Open source model Qwen/Qwen2.5-72B-Instruct. 910 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 910,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-72B",
+ "base_model:finetune:Qwen/Qwen2.5-72B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 50,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 72,
+ "parameters_active_b": 72,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-32b-awq",
+ "name": "Qwen3 32B Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-32B-AWQ",
+ "description": "Open source model Qwen/Qwen3-32B-AWQ. 125 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 125,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-32B",
+ "base_model:quantized:Qwen/Qwen3-32B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "mimo-v2-flash",
+ "name": "Mimo V2 Flash",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/XiaomiMiMo/MiMo-V2-Flash",
+ "description": "Open source model XiaomiMiMo/MiMo-V2-Flash. 628 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 628,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "mimo_v2_flash",
+ "conversational",
+ "custom_code",
+ "eval-results",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-coder-30b-a3b-instruct-fp8",
+ "name": "Qwen3 Coder 30B A3B Instruct Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8",
+ "description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8. 158 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 158,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2505.09388",
+ "endpoints_compatible",
+ "fp8",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-8b-fp8",
+ "name": "Qwen3 8B Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-8B-FP8",
+ "description": "Open source model Qwen/Qwen3-8B-FP8. 56 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 56,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-8B",
+ "base_model:quantized:Qwen/Qwen3-8B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-v3.2",
+ "name": "Deepseek V3.2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3.2",
+ "description": "Open source model deepseek-ai/DeepSeek-V3.2. 1251 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1251,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v32",
+ "conversational",
+ "base_model:deepseek-ai/DeepSeek-V3.2-Exp-Base",
+ "base_model:finetune:deepseek-ai/DeepSeek-V3.2-Exp-Base",
+ "eval-results",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-coder-next",
+ "name": "Qwen3 Coder Next",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Coder-Next",
+ "description": "Open source model Qwen/Qwen3-Coder-Next. 912 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 912,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_next",
+ "conversational",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2-0.5b",
+ "name": "Qwen2 0.5B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2-0.5B",
+ "description": "Open source model Qwen/Qwen2-0.5B. 164 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 164,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "pretrained",
+ "conversational",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 5,
+ "parameters_active_b": 5,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "mistral-7b-v0.1",
+ "name": "Mistral 7B V0.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/mistralai/Mistral-7B-v0.1",
+ "description": "Open source model mistralai/Mistral-7B-v0.1. 4042 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 4042,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "mistral",
+ "pretrained",
+ "mistral-common",
+ "en",
+ "arxiv:2310.06825",
+ "text-generation-inference",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "kimi-k2-thinking",
+ "name": "Kimi K2 Thinking",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/moonshotai/Kimi-K2-Thinking",
+ "description": "Open source model moonshotai/Kimi-K2-Thinking. 1670 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1670,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "kimi_k2",
+ "conversational",
+ "custom_code",
+ "eval-results",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-0528-qwen3-8b-mlx-4bit",
+ "name": "Deepseek R1 0528 Qwen3 8B Mlx 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit",
+ "description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit. 7 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 7,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "mlx",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
+ "base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
+ "4-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-7b-instruct-awq",
+ "name": "Qwen2.5 7B Instruct Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-AWQ",
+ "description": "Open source model Qwen/Qwen2.5-7B-Instruct-AWQ. 36 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 36,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-7B-Instruct",
+ "base_model:quantized:Qwen/Qwen2.5-7B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "points-reader",
+ "name": "Points Reader",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tencent/POINTS-Reader",
+ "description": "Open source model tencent/POINTS-Reader. 100 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 100,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "image-text-to-text",
+ "conversational",
+ "custom_code",
+ "arxiv:2509.01215",
+ "arxiv:2412.08443",
+ "arxiv:2409.04828",
+ "arxiv:2405.11850",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-4b-base",
+ "name": "Qwen3 4B Base",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-4B-Base",
+ "description": "Open source model Qwen/Qwen3-4B-Base. 80 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 80,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "step-3.5-flash",
+ "name": "Step 3.5 Flash",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/stepfun-ai/Step-3.5-Flash",
+ "description": "Open source model stepfun-ai/Step-3.5-Flash. 621 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 621,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "step3p5",
+ "conversational",
+ "custom_code",
+ "arxiv:2602.10604",
+ "arxiv:2601.05593",
+ "arxiv:2507.19427",
+ "eval-results",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "kogpt2-base-v2",
+ "name": "Kogpt2 Base V2",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/skt/kogpt2-base-v2",
+ "description": "Open source model skt/kogpt2-base-v2. 60 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 60,
+ "language": "Python",
+ "license": "cc-by-nc-sa-4.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "jax",
+ "gpt2",
+ "ko",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "parler-tts-mini-multilingual-v1.1",
+ "name": "Parler Tts Mini Multilingual V1.1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/parler-tts/parler-tts-mini-multilingual-v1.1",
+ "description": "Open source model parler-tts/parler-tts-mini-multilingual-v1.1. 54 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 54,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "parler_tts",
+ "text-to-speech",
+ "annotation",
+ "en",
+ "fr",
+ "es",
+ "pt",
+ "pl",
+ "de",
+ "nl",
+ "it",
+ "dataset:facebook/multilingual_librispeech",
+ "dataset:parler-tts/libritts_r_filtered",
+ "dataset:parler-tts/libritts-r-filtered-speaker-descriptions",
+ "dataset:parler-tts/mls_eng",
+ "dataset:parler-tts/mls-eng-speaker-descriptions",
+ "dataset:ylacombe/mls-annotated",
+ "dataset:ylacombe/cml-tts-filtered-annotated",
+ "dataset:PHBJT/cml-tts-filtered",
+ "arxiv:2402.01912",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-reranker-8b",
+ "name": "Qwen3 Reranker 8B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-Reranker-8B",
+ "description": "Open source model Qwen/Qwen3-Reranker-8B. 213 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 213,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "text-ranking",
+ "arxiv:2506.05176",
+ "base_model:Qwen/Qwen3-8B-Base",
+ "base_model:finetune:Qwen/Qwen3-8B-Base",
+ "text-embeddings-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-0528-qwen3-8b-mlx-8bit",
+ "name": "Deepseek R1 0528 Qwen3 8B Mlx 8Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit",
+ "description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit. 13 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 13,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "mlx",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
+ "base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
+ "8-bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "powermoe-3b",
+ "name": "Powermoe 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/ibm-research/PowerMoE-3b",
+ "description": "Open source model ibm-research/PowerMoE-3b. 14 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 14,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "granitemoe",
+ "arxiv:2408.13359",
+ "model-index",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llada-8b-instruct",
+ "name": "Llada 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/GSAI-ML/LLaDA-8B-Instruct",
+ "description": "Open source model GSAI-ML/LLaDA-8B-Instruct. 342 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 342,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llada",
+ "conversational",
+ "custom_code",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "apertus-8b-instruct-2509",
+ "name": "Apertus 8B Instruct 2509",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/swiss-ai/Apertus-8B-Instruct-2509",
+ "description": "Open source model swiss-ai/Apertus-8B-Instruct-2509. 435 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 435,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "apertus",
+ "multilingual",
+ "compliant",
+ "swiss-ai",
+ "conversational",
+ "arxiv:2509.14233",
+ "base_model:swiss-ai/Apertus-8B-2509",
+ "base_model:finetune:swiss-ai/Apertus-8B-2509",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-30b-a3b-gptq-int4",
+ "name": "Qwen3 30B A3B Gptq Int4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-GPTQ-Int4",
+ "description": "Open source model Qwen/Qwen3-30B-A3B-GPTQ-Int4. 45 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 45,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3_moe",
+ "conversational",
+ "arxiv:2309.00071",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-30B-A3B",
+ "base_model:quantized:Qwen/Qwen3-30B-A3B",
+ "endpoints_compatible",
+ "4-bit",
+ "gptq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "tinyllama-1.1b-chat-v0.3-gptq",
+ "name": "Tinyllama 1.1B Chat V0.3 Gptq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ",
+ "description": "Open source model TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ. 9 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 9,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "en",
+ "dataset:cerebras/SlimPajama-627B",
+ "dataset:bigcode/starcoderdata",
+ "dataset:OpenAssistant/oasst_top1_2023-08-25",
+ "base_model:TinyLlama/TinyLlama-1.1B-Chat-v0.3",
+ "base_model:quantized:TinyLlama/TinyLlama-1.1B-Chat-v0.3",
+ "text-generation-inference",
+ "4-bit",
+ "gptq",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 1,
+ "parameters_active_b": 1,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "prot_t5_xl_bfd",
+ "name": "Prot_T5_Xl_Bfd",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Rostlab/prot_t5_xl_bfd",
+ "description": "Open source model Rostlab/prot_t5_xl_bfd. 10 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 10,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "tf",
+ "t5",
+ "protein language model",
+ "dataset:BFD",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-4b-instruct-2507-unsloth-bnb-4bit",
+ "name": "Qwen3 4B Instruct 2507 Unsloth Bnb 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit",
+ "description": "Open source model unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit. 13 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 13,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "unsloth",
+ "conversational",
+ "arxiv:2505.09388",
+ "base_model:Qwen/Qwen3-4B-Instruct-2507",
+ "base_model:quantized:Qwen/Qwen3-4B-Instruct-2507",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "bitsandbytes",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "phi-3.5-mini-instruct",
+ "name": "Phi 3.5 Mini Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/microsoft/Phi-3.5-mini-instruct",
+ "description": "Open source model microsoft/Phi-3.5-mini-instruct. 963 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 963,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "phi3",
+ "nlp",
+ "code",
+ "conversational",
+ "custom_code",
+ "multilingual",
+ "arxiv:2404.14219",
+ "arxiv:2407.13833",
+ "arxiv:2403.06412",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct-bnb-4bit",
+ "name": "Meta Llama 3.1 8B Instruct Bnb 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit",
+ "description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit. 95 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 95,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "llama-3",
+ "meta",
+ "facebook",
+ "unsloth",
+ "conversational",
+ "en",
+ "arxiv:2204.05149",
+ "base_model:meta-llama/Llama-3.1-8B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "bitsandbytes",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "glm-4.7-flash-awq-4bit",
+ "name": "Glm 4.7 Flash Awq 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/cyankiwi/GLM-4.7-Flash-AWQ-4bit",
+ "description": "Open source model cyankiwi/GLM-4.7-Flash-AWQ-4bit. 43 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 43,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm4_moe_lite",
+ "conversational",
+ "en",
+ "zh",
+ "arxiv:2508.06471",
+ "base_model:zai-org/GLM-4.7-Flash",
+ "base_model:quantized:zai-org/GLM-4.7-Flash",
+ "endpoints_compatible",
+ "compressed-tensors",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 3,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 4,
+ "parameters_active_b": 4,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "dots.ocr",
+ "name": "Dots.Ocr",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/rednote-hilab/dots.ocr",
+ "description": "Open source model rednote-hilab/dots.ocr. 1243 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1243,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "dots_ocr",
+ "safetensors",
+ "image-to-text",
+ "ocr",
+ "document-parse",
+ "layout",
+ "table",
+ "formula",
+ "transformers",
+ "custom_code",
+ "image-text-to-text",
+ "conversational",
+ "en",
+ "zh",
+ "multilingual",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "mistral-7b-bnb-4bit",
+ "name": "Mistral 7B Bnb 4Bit",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/mistral-7b-bnb-4bit",
+ "description": "Open source model unsloth/mistral-7b-bnb-4bit. 30 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 30,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "mistral",
+ "unsloth",
+ "mistral-7b",
+ "bnb",
+ "en",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "4-bit",
+ "bitsandbytes",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "glm-5-fp8",
+ "name": "Glm 5 Fp8",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/zai-org/GLM-5-FP8",
+ "description": "Open source model zai-org/GLM-5-FP8. 108 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 108,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "glm_moe_dsa",
+ "conversational",
+ "en",
+ "zh",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen-7b",
+ "name": "Qwen 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen-7B",
+ "description": "Open source model Qwen/Qwen-7B. 395 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 395,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen",
+ "custom_code",
+ "zh",
+ "en",
+ "arxiv:2309.16609",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwq-32b-awq",
+ "name": "Qwq 32B Awq",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/QwQ-32B-AWQ",
+ "description": "Open source model Qwen/QwQ-32B-AWQ. 133 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 133,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "qwen2",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2309.00071",
+ "arxiv:2412.15115",
+ "base_model:Qwen/QwQ-32B",
+ "base_model:quantized:Qwen/QwQ-32B",
+ "4-bit",
+ "awq",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 22,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 32,
+ "parameters_active_b": 32,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-r1-distill-llama-70b",
+ "name": "Deepseek R1 Distill Llama 70B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
+ "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-70B. 741 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 741,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "conversational",
+ "arxiv:2501.12948",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 49,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 70,
+ "parameters_active_b": 70,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-coder-7b",
+ "name": "Qwen2.5 Coder 7B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B",
+ "description": "Open source model Qwen/Qwen2.5-Coder-7B. 134 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 134,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen2",
+ "code",
+ "qwen",
+ "qwen-coder",
+ "codeqwen",
+ "conversational",
+ "en",
+ "arxiv:2409.12186",
+ "arxiv:2309.00071",
+ "arxiv:2407.10671",
+ "base_model:Qwen/Qwen2.5-7B",
+ "base_model:finetune:Qwen/Qwen2.5-7B",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen2.5-3b",
+ "name": "Qwen2.5 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen2.5-3B",
+ "description": "Open source model Qwen/Qwen2.5-3B. 169 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 169,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "safetensors",
+ "qwen2",
+ "conversational",
+ "en",
+ "arxiv:2407.10671",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-v2-lite-chat",
+ "name": "Deepseek V2 Lite Chat",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat",
+ "description": "Open source model deepseek-ai/DeepSeek-V2-Lite-Chat. 133 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 133,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v2",
+ "conversational",
+ "custom_code",
+ "arxiv:2405.04434",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "tiny-qwen3forcausallm",
+ "name": "Tiny Qwen3Forcausallm",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/trl-internal-testing/tiny-Qwen3ForCausalLM",
+ "description": "Open source model trl-internal-testing/tiny-Qwen3ForCausalLM. 1 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1,
+ "language": "Python",
+ "license": "unknown",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "trl",
+ "conversational",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-coder-v2-lite-instruct",
+ "name": "Deepseek Coder V2 Lite Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct",
+ "description": "Open source model deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct. 539 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 539,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v2",
+ "conversational",
+ "custom_code",
+ "arxiv:2401.06066",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen3-0.6b-base",
+ "name": "Qwen3 0.6B Base",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen3-0.6B-Base",
+ "description": "Open source model Qwen/Qwen3-0.6B-Base. 146 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 146,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen3",
+ "conversational",
+ "arxiv:2505.09388",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 4,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 6,
+ "parameters_active_b": 6,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "diffractgpt_mistral_chemical_formula",
+ "name": "Diffractgpt_Mistral_Chemical_Formula",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/knc6/diffractgpt_mistral_chemical_formula",
+ "description": "Open source model knc6/diffractgpt_mistral_chemical_formula. 1 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "peft",
+ "safetensors",
+ "chemistry",
+ "text-generation-inference",
+ "atomgpt",
+ "diffraction",
+ "en",
+ "base_model:unsloth/mistral-7b-bnb-4bit",
+ "base_model:adapter:unsloth/mistral-7b-bnb-4bit",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "qwen-7b-chat",
+ "name": "Qwen 7B Chat",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Qwen/Qwen-7B-Chat",
+ "description": "Open source model Qwen/Qwen-7B-Chat. 787 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 787,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "qwen",
+ "custom_code",
+ "zh",
+ "en",
+ "arxiv:2309.16609",
+ "arxiv:2305.08322",
+ "arxiv:2009.03300",
+ "arxiv:2305.05280",
+ "arxiv:2210.03629",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 5,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 7,
+ "parameters_active_b": 7,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "nvidia-nemotron-3-nano-30b-a3b-nvfp4",
+ "name": "Nvidia Nemotron 3 Nano 30B A3B Nvfp4",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4",
+ "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4. 100 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 100,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "nemotron_h",
+ "feature-extraction",
+ "nvidia",
+ "pytorch",
+ "conversational",
+ "custom_code",
+ "en",
+ "es",
+ "fr",
+ "de",
+ "ja",
+ "it",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v1",
+ "dataset:nvidia/Nemotron-CC-v2",
+ "dataset:nvidia/Nemotron-Pretraining-SFT-v1",
+ "dataset:nvidia/Nemotron-CC-Math-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Code-v2",
+ "dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
+ "dataset:nvidia/Nemotron-CC-v2.1",
+ "dataset:nvidia/Nemotron-CC-Code-v1",
+ "dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
+ "dataset:nvidia/Nemotron-Competitive-Programming-v1",
+ "dataset:nvidia/Nemotron-Math-v2",
+ "dataset:nvidia/Nemotron-Agentic-v1",
+ "dataset:nvidia/Nemotron-Math-Proofs-v1",
+ "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
+ "dataset:nvidia/Nemotron-Science-v1",
+ "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
+ "arxiv:2512.20848",
+ "arxiv:2512.20856",
+ "arxiv:2601.20088",
+ "base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
+ "region:us"
+ ],
+ "hardware_req": "24GB+ VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 21,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 30,
+ "parameters_active_b": 30,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "falcon-h1-tiny-90m-instruct",
+ "name": "Falcon H1 Tiny 90M Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/tiiuae/Falcon-H1-Tiny-90M-Instruct",
+ "description": "Open source model tiiuae/Falcon-H1-Tiny-90M-Instruct. 31 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 31,
+ "language": "Python",
+ "license": "other",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "falcon_h1",
+ "falcon-h1",
+ "edge",
+ "conversational",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "hermes-3-llama-3.2-3b",
+ "name": "Hermes 3 Llama 3.2 3B",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.2-3B",
+ "description": "Open source model NousResearch/Hermes-3-Llama-3.2-3B. 174 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 174,
+ "language": "Python",
+ "license": "llama3",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "Llama-3",
+ "instruct",
+ "finetune",
+ "chatml",
+ "gpt4",
+ "synthetic data",
+ "distillation",
+ "function calling",
+ "json mode",
+ "axolotl",
+ "roleplaying",
+ "chat",
+ "conversational",
+ "en",
+ "arxiv:2408.11857",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "deploy:azure",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 2,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 3,
+ "parameters_active_b": 3,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct",
+ "name": "Meta Llama 3.1 8B Instruct",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct",
+ "description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct. 94 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 94,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "llama-3",
+ "meta",
+ "facebook",
+ "unsloth",
+ "conversational",
+ "en",
+ "base_model:meta-llama/Llama-3.1-8B-Instruct",
+ "base_model:finetune:meta-llama/Llama-3.1-8B-Instruct",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "meta-llama-3.1-8b-instruct-gguf",
+ "name": "Meta Llama 3.1 8B Instruct Gguf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF",
+ "description": "Open source model bartowski/Meta-Llama-3.1-8B-Instruct-GGUF. 321 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 321,
+ "language": "Python",
+ "license": "llama3.1",
+ "tags": [
+ "AI",
+ "LLM",
+ "gguf",
+ "facebook",
+ "meta",
+ "pytorch",
+ "llama",
+ "llama-3",
+ "en",
+ "de",
+ "fr",
+ "it",
+ "pt",
+ "hi",
+ "es",
+ "th",
+ "base_model:meta-llama/Llama-3.1-8B-Instruct",
+ "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
+ "endpoints_compatible",
+ "region:us",
+ "conversational"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 6,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 8,
+ "parameters_active_b": 8,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "deepseek-v3-0324",
+ "name": "Deepseek V3 0324",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3-0324",
+ "description": "Open source model deepseek-ai/DeepSeek-V3-0324. 3087 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 3087,
+ "language": "Python",
+ "license": "mit",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "deepseek_v3",
+ "conversational",
+ "custom_code",
+ "arxiv:2412.19437",
+ "eval-results",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "fp8",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "elm",
+ "name": "Elm",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/Joaoffg/ELM",
+ "description": "Open source model Joaoffg/ELM. 2 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 2,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "academic",
+ "university",
+ "en",
+ "nl",
+ "arxiv:2408.06931",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "llama-2-13b-chat-hf",
+ "name": "Llama 2 13B Chat Hf",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/meta-llama/Llama-2-13b-chat-hf",
+ "description": "Open source model meta-llama/Llama-2-13b-chat-hf. 1109 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 1109,
+ "language": "Python",
+ "license": "llama2",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "pytorch",
+ "safetensors",
+ "llama",
+ "facebook",
+ "meta",
+ "llama-2",
+ "conversational",
+ "en",
+ "arxiv:2307.09288",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "16GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 9,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 13,
+ "parameters_active_b": 13,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ },
+ {
+ "slug": "svara-tts-v1",
+ "name": "Svara Tts V1",
+ "category": "AI Models",
+ "is_open_source": true,
+ "website": "https://huggingface.co/kenpath/svara-tts-v1",
+ "description": "Open source model kenpath/svara-tts-v1. 18 likes on Hugging Face.",
+ "pros": [
+ "Open Source",
+ "Running Locally"
+ ],
+ "cons": [
+ "Requires GPU"
+ ],
+ "stars": 18,
+ "language": "Python",
+ "license": "apache-2.0",
+ "tags": [
+ "AI",
+ "LLM",
+ "transformers",
+ "safetensors",
+ "llama",
+ "text-to-speech",
+ "speech-synthesis",
+ "multilingual",
+ "indic",
+ "orpheus",
+ "lora",
+ "low-latency",
+ "gguf",
+ "zero-shot",
+ "emotions",
+ "discrete-audio-tokens",
+ "hi",
+ "bn",
+ "mr",
+ "te",
+ "kn",
+ "bho",
+ "mag",
+ "hne",
+ "mai",
+ "as",
+ "brx",
+ "doi",
+ "gu",
+ "ml",
+ "pa",
+ "ta",
+ "ne",
+ "sa",
+ "en",
+ "dataset:SYSPIN",
+ "dataset:RASA",
+ "dataset:IndicTTS",
+ "dataset:SPICOR",
+ "base_model:canopylabs/3b-hi-ft-research_release",
+ "base_model:adapter:canopylabs/3b-hi-ft-research_release",
+ "text-generation-inference",
+ "endpoints_compatible",
+ "region:us"
+ ],
+ "hardware_req": "8GB VRAM",
+ "hosting_type": "self-hosted",
+ "ai_metadata": {
+ "vram_inference_gb": 1,
+ "context_window_tokens": 4096,
+ "parameters_total_b": 0,
+ "parameters_active_b": 0,
+ "is_multimodal": false
+ },
+ "referral_url": ""
+ }
+]
\ No newline at end of file
diff --git a/docs/app/_meta.ts b/docs/app/_meta.ts
new file mode 100644
index 0000000..e58811a
--- /dev/null
+++ b/docs/app/_meta.ts
@@ -0,0 +1,36 @@
+import type { MetaRecord } from 'nextra'
+
+const meta: MetaRecord = {
+ index: {
+ title: 'Home',
+ type: 'page',
+ display: 'hidden',
+ },
+ why: {
+ title: '📜 Why These Docs Exist',
+ },
+ 'quick-start': {
+ title: '🚀 Quick Start',
+ },
+ deploy: {
+ title: '📦 Deploy Guides',
+ },
+ stacks: {
+ title: '🔥 Stacks',
+ },
+ concepts: {
+ title: '🧠 Concepts',
+ },
+ // -- External links --
+ directory: {
+ title: '← Back to Directory',
+ href: 'https://thealtstack.com',
+ type: 'page',
+ },
+ contact: {
+ title: 'Contact Us',
+ display: 'hidden'
+ },
+}
+
+export default meta
diff --git a/docs/app/concepts/_meta.ts b/docs/app/concepts/_meta.ts
new file mode 100644
index 0000000..33cd761
--- /dev/null
+++ b/docs/app/concepts/_meta.ts
@@ -0,0 +1,33 @@
+import type { MetaRecord } from 'nextra'
+
+const meta: MetaRecord = {
+ 'docker-basics': {
+ title: 'Docker in 10 Minutes',
+ },
+ networking: {
+ title: 'Networking for Self-Hosters',
+ },
+ 'reverse-proxies': {
+ title: 'Reverse Proxies Explained',
+ },
+ 'ssl-tls': {
+ title: 'SSL/TLS for Self-Hosters',
+ },
+ 'env-secrets': {
+ title: 'Environment Variables & Secrets',
+ },
+ monitoring: {
+ title: 'Monitoring & Observability',
+ },
+ updates: {
+ title: 'Updating & Maintaining Containers',
+ },
+ backups: {
+ title: 'Backups That Actually Work',
+ },
+ hardware: {
+ title: 'Hardware & VPS Sizing',
+ },
+}
+
+export default meta
diff --git a/docs/app/concepts/backups/page.mdx b/docs/app/concepts/backups/page.mdx
new file mode 100644
index 0000000..3cd6d39
--- /dev/null
+++ b/docs/app/concepts/backups/page.mdx
@@ -0,0 +1,103 @@
+---
+title: Backups That Actually Work
+description: "How to back up your self-hosted tools. Docker volumes, database dumps, and automated backup scripts that run while you sleep."
+---
+
+# Backups That Actually Work
+
+Self-hosting means *you're* responsible for your data. No "Contact Support to restore from backup." **You are the support.**
+
+The good news: backing up Docker-based tools is simple once you set up a system.
+
+## What to Back Up
+
+| Component | Where It Lives | How to Back Up |
+|---|---|---|
+| **Docker volumes** | `/var/lib/docker/volumes/` | Volume export or rsync |
+| **Databases (Postgres)** | Inside a Docker container | `pg_dump` |
+| **Config files** | Your `docker-compose.yml` and `.env` | Git or file copy |
+
+> ⚠️ **Heads Up:** `docker-compose.yml` files are easy to recreate. Database data is not. Prioritize database backups above everything else.
+
+## Method 1: Database Dumps (Essential)
+
+Most self-hosted tools use PostgreSQL. Here's how to dump it:
+
+```bash
+# Dump a Postgres database running in a container
+docker exec your-db-container \
+ pg_dump -U postgres your_database > backup_$(date +%Y%m%d).sql
+```
+
+To restore:
+
+```bash
+cat backup_20260218.sql | docker exec -i your-db-container \
+ psql -U postgres your_database
+```
+
+## Method 2: Volume Backup
+
+For tools that store data in Docker volumes:
+
+```bash
+# Find your volumes
+docker volume ls
+
+# Backup a volume to a tar file
+docker run --rm \
+ -v my_volume:/data \
+ -v $(pwd)/backups:/backup \
+ alpine tar czf /backup/my_volume_backup.tar.gz /data
+```
+
+## Method 3: Automated Script
+
+Create a backup script that runs daily via cron:
+
+```bash
+#!/bin/bash
+# /opt/backup.sh
+
+BACKUP_DIR="/opt/backups"
+DATE=$(date +%Y%m%d_%H%M)
+mkdir -p $BACKUP_DIR
+
+# Dump Postgres databases
+docker exec supabase-db pg_dump -U postgres postgres > $BACKUP_DIR/supabase_$DATE.sql
+docker exec plausible_db pg_dump -U postgres plausible_db > $BACKUP_DIR/plausible_$DATE.sql
+
+# Clean backups older than 7 days
+find $BACKUP_DIR -name "*.sql" -mtime +7 -delete
+
+echo "Backup complete: $DATE"
+```
+
+Add to cron:
+
+```bash
+# Run at 3 AM every day
+crontab -e
+# Add this line:
+0 3 * * * /opt/backup.sh >> /var/log/backup.log 2>&1
+```
+
+## The 3-2-1 Rule
+
+For serious setups, follow the **3-2-1 backup rule**:
+
+- **3** copies of your data
+- **2** different storage types (local + remote)
+- **1** offsite copy (rsync to another server, or upload to B2/S3)
+
+```bash
+# Sync backups to a remote server
+rsync -avz /opt/backups/ user@backup-server:/backups/
+```
+
+## Next Steps
+
+You now have the four foundational concepts: Docker, reverse proxies, SSL, and backups. Time to build:
+
+→ [Deploy Guides](/deploy) — 65+ tools ready to deploy
+→ [The Bootstrapper Stack](/stacks/bootstrapper) — A complete SaaS toolkit
diff --git a/docs/app/concepts/docker-basics/page.mdx b/docs/app/concepts/docker-basics/page.mdx
new file mode 100644
index 0000000..c1343d2
--- /dev/null
+++ b/docs/app/concepts/docker-basics/page.mdx
@@ -0,0 +1,127 @@
+---
+title: Understanding Docker in 10 Minutes
+description: "Docker explained for self-hosters. No CS degree required. Containers, images, volumes, and Docker Compose — the only concepts you actually need."
+---
+
+# Understanding Docker in 10 Minutes
+
+Docker is the reason self-hosting went from "sysadmin hobby" to "anyone can do it." It packages software into neat, isolated containers that run the same everywhere.
+
+You don't need to become a Docker expert. You need to understand **four concepts**.
+
+## Concept 1: Images
+
+An **image** is a snapshot of software — pre-built, pre-configured, ready to run. Think of it like an `.iso` file, but for apps.
+
+```bash
+# Download the Plausible Analytics image
+docker pull plausible/analytics:latest
+```
+
+Images live on [Docker Hub](https://hub.docker.com) — a public registry of 100,000+ images. When our deploy guides say `image: plausible/analytics:latest`, they're pulling from here.
+
+## Concept 2: Containers
+
+A **container** is a running instance of an image. Image = blueprint. Container = the actual building.
+
+```bash
+# Start a container from an image
+docker run -d --name my-plausible plausible/analytics:latest
+
+# See running containers
+docker ps
+
+# Stop a container
+docker stop my-plausible
+
+# Remove a container (data in volumes is safe)
+docker rm my-plausible
+```
+
+> 💡 **Why?** Containers are isolated from each other and from your host system. Breaking one container doesn't break anything else.
+
+## Concept 3: Volumes
+
+**Volumes** store your data *outside* the container. This is critical because containers are disposable — when you update an image, you destroy the old container and create a new one. Volumes survive this process.
+
+```bash
+# Mount a volume called "plausible-data"
+docker run -v plausible-data:/var/lib/clickhouse plausible/analytics
+```
+
+Without volumes, your data dies when the container dies. **Always use volumes.**
+
+```bash
+# List all volumes
+docker volume ls
+
+# Backup a volume (copy to local tar)
+docker run --rm -v plausible-data:/data -v $(pwd):/backup alpine \
+ tar czf /backup/plausible-backup.tar.gz /data
+```
+
+## Concept 4: Docker Compose
+
+This is the big one. **Docker Compose** lets you define multi-container setups in a single YAML file. Most real-world tools need multiple containers (app + database + cache), and Docker Compose handles that.
+
+```yaml
+# docker-compose.yml
+version: '3.8'
+
+services:
+ app:
+ image: plausible/analytics:latest
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+
+ db:
+ image: postgres:14-alpine
+ volumes:
+ - db_data:/var/lib/postgresql/data
+ environment:
+ POSTGRES_PASSWORD: supersecret
+
+volumes:
+ db_data:
+```
+
+Then run it:
+
+```bash
+# Start everything
+docker compose up -d
+
+# See logs
+docker compose logs -f
+
+# Stop everything
+docker compose down
+
+# Update to latest images
+docker compose pull && docker compose up -d
+```
+
+That's the pattern for **every single deploy guide** in these docs:
+1. Copy the `docker-compose.yml`
+2. Tweak the environment variables
+3. Run `docker compose up -d`
+4. Done.
+
+## The 5 Commands You'll Actually Use
+
+| Command | What it does |
+|---|---|
+| `docker compose up -d` | Start all services in the background |
+| `docker compose down` | Stop all services |
+| `docker compose logs -f` | Watch live logs (Ctrl+C to exit) |
+| `docker compose pull` | Download latest images |
+| `docker ps` | List running containers |
+
+That's it. That's Docker for self-hosters.
+
+## Next Steps
+
+→ [Reverse Proxies Explained](/concepts/reverse-proxies) — How to access your tools via `app.yourdomain.com`
+→ [Your First Deployment](/quick-start/first-deployment) — Put this knowledge to use
diff --git a/docs/app/concepts/env-secrets/page.mdx b/docs/app/concepts/env-secrets/page.mdx
new file mode 100644
index 0000000..9009be3
--- /dev/null
+++ b/docs/app/concepts/env-secrets/page.mdx
@@ -0,0 +1,153 @@
+---
+title: "Environment Variables & Secrets"
+description: "How to manage .env files, Docker secrets, and sensitive configuration for self-hosted tools. Stop hardcoding passwords."
+---
+
+# Environment Variables & Secrets
+
+Every self-hosted tool needs configuration: database passwords, API keys, admin emails. The **wrong** way is hardcoding them in `docker-compose.yml`. The **right** way is environment variables.
+
+## The Basics: `.env` Files
+
+Docker Compose automatically reads a `.env` file in the same directory as your `docker-compose.yml`:
+
+```bash
+# .env
+POSTGRES_PASSWORD=super_secret_password_123
+ADMIN_EMAIL=you@yourdomain.com
+SECRET_KEY=a1b2c3d4e5f6g7h8i9j0
+```
+
+```yaml
+# docker-compose.yml
+services:
+ db:
+ image: postgres:16
+ environment:
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
+```
+
+Docker Compose substitutes `${POSTGRES_PASSWORD}` with the value from `.env`. Your secrets stay out of your Compose file.
+
+> ⚠️ **Critical:** Add `.env` to your `.gitignore` immediately. Never commit secrets to Git.
+
+```bash
+echo ".env" >> .gitignore
+```
+
+## Generating Strong Passwords
+
+Don't use `password123`. Generate proper secrets:
+
+```bash
+# Generate a 32-character random string
+openssl rand -base64 32
+
+# Generate a hex string (great for SECRET_KEY)
+openssl rand -hex 32
+
+# Generate a URL-safe string
+python3 -c "import secrets; print(secrets.token_urlsafe(32))"
+```
+
+### Template for Common Tools
+
+Most self-hosted tools need similar variables. Here's a reusable `.env` template:
+
+```bash
+# .env template — generate all values before first run
+
+# Database
+POSTGRES_USER=app
+POSTGRES_PASSWORD= # openssl rand -base64 32
+POSTGRES_DB=app_db
+
+# App
+SECRET_KEY= # openssl rand -hex 32
+ADMIN_EMAIL=you@yourdomain.com
+ADMIN_PASSWORD= # openssl rand -base64 24
+BASE_URL=https://app.yourdomain.com
+
+# SMTP (for email notifications)
+SMTP_HOST=smtp.gmail.com
+SMTP_PORT=587
+SMTP_USER=you@gmail.com
+SMTP_PASSWORD= # Use app-specific password
+```
+
+## Default Values (Fallbacks)
+
+Use the `:-` syntax for non-sensitive defaults:
+
+```yaml
+environment:
+ NODE_ENV: ${NODE_ENV:-production} # Defaults to "production"
+ LOG_LEVEL: ${LOG_LEVEL:-info} # Defaults to "info"
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} # No default — MUST be set
+```
+
+## Docker Secrets (Advanced)
+
+For production setups, Docker Secrets are more secure than environment variables — they're stored encrypted and mounted as files:
+
+```yaml
+services:
+ db:
+ image: postgres:16
+ environment:
+ POSTGRES_PASSWORD_FILE: /run/secrets/db_password
+ secrets:
+ - db_password
+
+secrets:
+ db_password:
+ file: ./secrets/db_password.txt
+```
+
+```bash
+# Create the secret file
+mkdir -p secrets
+openssl rand -base64 32 > secrets/db_password.txt
+chmod 600 secrets/db_password.txt
+```
+
+> 💡 Not all images support `_FILE` suffix variables. Check the image's documentation on Docker Hub.
+
+## Multiple Environments
+
+Keep separate `.env` files for different environments:
+
+```bash
+.env # Production (default)
+.env.local # Local development
+.env.staging # Staging server
+```
+
+Use them explicitly:
+
+```bash
+# Use a specific env file
+docker compose --env-file .env.staging up -d
+```
+
+## Security Checklist
+
+- [ ] `.env` is in `.gitignore`
+- [ ] No secrets are hardcoded in `docker-compose.yml`
+- [ ] All passwords are randomly generated (32+ characters)
+- [ ] Database ports are NOT exposed to the internet
+- [ ] Secret files have `chmod 600` permissions
+- [ ] Default passwords from docs have been changed
+
+## Common Mistakes
+
+**"Variable is empty in the container"** → Check for typos. Variable names are case-sensitive. `POSTGRES_password` ≠ `POSTGRES_PASSWORD`.
+
+**"Changes to .env aren't applying"** → You need to recreate the container: `docker compose up -d --force-recreate`.
+
+**"I committed my .env to Git"** → Even after removing it, it's in Git history. Rotate ALL secrets immediately and use `git filter-branch` or BFG Repo Cleaner.
+
+## Next Steps
+
+→ [Monitoring & Observability](/concepts/monitoring) — Know when things break
+→ [Docker in 10 Minutes](/concepts/docker-basics) — Review the fundamentals
diff --git a/docs/app/concepts/hardware/page.mdx b/docs/app/concepts/hardware/page.mdx
new file mode 100644
index 0000000..a4b4282
--- /dev/null
+++ b/docs/app/concepts/hardware/page.mdx
@@ -0,0 +1,145 @@
+---
+title: "Hardware & VPS Sizing"
+description: "How much RAM, CPU, and disk you actually need for self-hosting. VPS provider comparison and scaling strategies."
+---
+
+# Hardware & VPS Sizing
+
+The #1 question new self-hosters ask: **"What server do I need?"**
+
+Short answer: less than you think to start, more than you think once you're hooked.
+
+## Quick Sizing Guide
+
+### How Much RAM Do I Need?
+
+| Setup | RAM | What You Can Run |
+|---|---|---|
+| **Starter** | 2 GB | 1–2 lightweight tools (Uptime Kuma, Plausible) |
+| **Hobbyist** | 4 GB | 3–5 tools + a database + reverse proxy |
+| **Power User** | 8 GB | 8–12 tools + multiple databases |
+| **Homelab** | 16 GB | Everything + AI models (small ones) |
+| **AI Workloads** | 32+ GB | LLMs, image generation, video AI |
+
+> 💡 **Start with 4 GB.** You can always upgrade. Most VPS providers let you resize without downtime.
+
+### CPU Guidelines
+
+| Workload | vCPUs Needed |
+|---|---|
+| Static tools (Uptime Kuma, PocketBase) | 1 vCPU |
+| Web apps (Plausible, Outline, n8n) | 2 vCPUs |
+| Heavy apps (PostHog, Supabase, Metabase) | 4 vCPUs |
+| AI inference (Ollama, Stable Diffusion) | 4+ vCPUs + GPU |
+
+### Disk Space
+
+| Component | Typical Usage |
+|---|---|
+| Base OS + Docker | 5–8 GB |
+| Each Docker image | 100 MB – 2 GB |
+| PostgreSQL database (small app) | 500 MB – 5 GB |
+| Log files (unmanaged) | 1–10 GB |
+| AI models (per model) | 4–70 GB |
+
+**Minimum recommended:** 50 GB SSD.
+**Comfortable:** 80–160 GB SSD.
+**AI workloads:** 200+ GB NVMe.
+
+## VPS Provider Comparison
+
+| Provider | Starting At | Pros | Best For |
+|---|---|---|---|
+| [**DigitalOcean**](https://m.do.co/c/2ed27757a361) | $6/mo (1 GB) | Simple UI, great docs, predictable pricing | Beginners |
+| **Hetzner** | €3.79/mo (2 GB) | Best price-to-performance in EU | Power users, EU hosting |
+| **Contabo** | €5.99/mo (4 GB) | Cheapest for RAM-heavy setups | Budget homelab |
+| **Linode (Akamai)** | $5/mo (1 GB) | Reliable, good network | Small projects |
+| **Vultr** | $5/mo (1 GB) | Global locations, hourly billing | Testing and experimentation |
+| **Oracle Cloud** | Free (4 vCPUs, 24 GB ARM) | Unbeatable free tier | Zero-budget hosting |
+| **Home Server** | One-time cost | Full control, unlimited bandwidth | Privacy maximalists |
+
+> 🏆 **Our Pick:** [DigitalOcean](https://m.do.co/c/2ed27757a361) for beginners (simple, reliable, [$200 free credit](https://m.do.co/c/2ed27757a361)). **Hetzner** for best value. **Oracle Cloud free tier** if you want to pay nothing.
+
+## Real-World Stack Sizing
+
+Here's what actual AltStack setups typically need:
+
+### The Bootstrapper Stack (4 GB RAM)
+- Coolify (deployment platform)
+- Plausible (analytics)
+- Uptime Kuma (monitoring)
+- Listmonk (newsletters)
+- Caddy (reverse proxy)
+
+### The Privacy Stack (4 GB RAM)
+- Vaultwarden (passwords)
+- Jitsi Meet (video calls)
+- Mattermost (messaging)
+- Caddy (reverse proxy)
+
+### The AI Stack (16–32 GB RAM)
+- Ollama (LLM inference)
+- Stable Diffusion (image generation)
+- TabbyML (code completion)
+- Continue.dev (AI coding)
+
+## Scaling Strategies
+
+### Vertical Scaling (Bigger Server)
+
+The simplest approach. Just resize your VPS:
+
+- **DigitalOcean:** Resize droplet (takes ~1 minute)
+- **Hetzner:** Rescale server (may require reboot)
+- **Home server:** Add RAM sticks
+
+### Horizontal Scaling (More Servers)
+
+When one server isn't enough:
+
+```
+Server 1: Databases (Postgres, Redis)
+Server 2: Application containers
+Server 3: AI workloads (GPU)
+```
+
+Connect them with a private network (most VPS providers offer this for free) or a VPN like WireGuard.
+
+### The "Start Small" Strategy
+
+1. **Month 1:** $6/mo droplet (1 GB) — Deploy 1–2 tools
+2. **Month 3:** Resize to $12/mo (2 GB) — Add more tools
+3. **Month 6:** Resize to $24/mo (4 GB) — Running your full stack
+4. **Month 12+:** Add a second server or move to Hetzner for better value
+
+## Monitoring Your Resources
+
+Always know how much headroom you have:
+
+```bash
+# Quick resource check
+free -h # RAM usage
+df -h # Disk usage
+nproc # CPU cores
+uptime # Load average
+
+# Docker resource usage
+docker stats # Live container metrics
+docker system df # Docker disk usage
+```
+
+## Red Flags
+
+🚩 **RAM constantly above 90%** → Resize or move a service to another server.
+
+🚩 **Disk above 80%** → Clean Docker images (`docker system prune -f`) or resize disk.
+
+🚩 **CPU at 100% for extended periods** → Check which container is the culprit with `docker stats`.
+
+🚩 **Swap usage above 1 GB** → You need more RAM. Swap is a band-aid, not a solution.
+
+## Next Steps
+
+→ [Quick Start](/quick-start) — Deploy your first tool
+→ [Deploy Guides](/deploy) — Browse 65+ tools
+→ [Docker in 10 Minutes](/concepts/docker-basics) — Foundation knowledge
diff --git a/docs/app/concepts/monitoring/page.mdx b/docs/app/concepts/monitoring/page.mdx
new file mode 100644
index 0000000..4546167
--- /dev/null
+++ b/docs/app/concepts/monitoring/page.mdx
@@ -0,0 +1,163 @@
+---
+title: "Monitoring & Observability"
+description: "Know when things break before your users do. Uptime monitoring, disk alerts, log aggregation, and observability for self-hosters."
+---
+
+# Monitoring & Observability
+
+You deployed 5 tools. They're running great. You go to bed. At 3 AM, the disk fills up, Postgres crashes, and everything dies. You find out at 9 AM when a user emails you.
+
+**Monitoring prevents this.**
+
+## The Three Layers
+
+| Layer | What It Watches | Tool |
+|---|---|---|
+| **Uptime** | "Is the service responding?" | Uptime Kuma |
+| **System** | CPU, RAM, disk, network | Node Exporter + Grafana |
+| **Logs** | What's actually happening inside | Docker logs, Dozzle, SigNoz |
+
+You need **at least** the first layer. The other two are for when you get serious.
+
+## Layer 1: Uptime Monitoring (Essential)
+
+[Uptime Kuma](/deploy/uptime-kuma) is the single best tool for self-hosters. Deploy it first, always.
+
+```yaml
+# docker-compose.yml
+services:
+ uptime-kuma:
+ image: louislam/uptime-kuma:1
+ container_name: uptime-kuma
+ restart: unless-stopped
+ ports:
+ - "3001:3001"
+ volumes:
+ - uptime_data:/app/data
+
+volumes:
+ uptime_data:
+```
+
+### What to Monitor
+
+Add a monitor for **every** service you run:
+
+| Type | Target | Check Interval |
+|---|---|---|
+| HTTP(s) | `https://plausible.yourdomain.com` | 60s |
+| HTTP(s) | `https://uptime.yourdomain.com` | 60s |
+| TCP Port | `localhost:5432` (Postgres) | 120s |
+| Docker Container | Container name | 60s |
+| DNS | `yourdomain.com` | 300s |
+
+### Notifications
+
+Uptime Kuma supports 90+ notification channels. Set up **at least two**:
+
+- **Email** — For non-urgent alerts
+- **Telegram/Discord/Slack** — For instant mobile alerts
+
+> 🔥 **Pro Tip:** Monitor your monitoring. Set up an external free ping service (like [UptimeRobot](https://uptimerobot.com)) to watch your Uptime Kuma instance.
+
+## Layer 2: System Metrics
+
+### Quick Disk Alert Script
+
+The #1 cause of self-hosting outages is **running out of disk space**. This script sends an alert when disk usage exceeds 80%:
+
+```bash
+#!/bin/bash
+# /opt/scripts/disk-alert.sh
+
+THRESHOLD=80
+USAGE=$(df / | tail -1 | awk '{print $5}' | sed 's/%//')
+
+if [ "$USAGE" -gt "$THRESHOLD" ]; then
+ echo "⚠️ Disk usage is at ${USAGE}% on $(hostname)" | \
+ mail -s "Disk Alert: ${USAGE}%" you@yourdomain.com
+fi
+```
+
+Add to cron:
+
+```bash
+# Check every hour
+0 * * * * /opt/scripts/disk-alert.sh
+```
+
+### What to Watch
+
+| Metric | Warning Threshold | Critical Threshold |
+|---|---|---|
+| Disk usage | 70% | 85% |
+| RAM usage | 80% | 95% |
+| CPU sustained | 80% for 5 min | 95% for 5 min |
+| Container restarts | 3 in 1 hour | 10 in 1 hour |
+
+### Docker Resource Monitoring
+
+Quick commands to check what's eating your resources:
+
+```bash
+# Live resource usage per container
+docker stats
+
+# Show container sizes (disk)
+docker system df -v
+
+# Find large volumes
+du -sh /var/lib/docker/volumes/*/
+```
+
+## Layer 3: Log Aggregation
+
+Docker captures all stdout/stderr from your containers. Use it:
+
+```bash
+# Live logs for a service
+docker compose logs -f plausible
+
+# Last 100 lines
+docker compose logs --tail=100 plausible
+
+# Logs since a specific time
+docker compose logs --since="2h" plausible
+```
+
+### Dozzle (Docker Log Viewer)
+
+For a beautiful web-based log viewer:
+
+```yaml
+services:
+ dozzle:
+ image: amir20/dozzle:latest
+ container_name: dozzle
+ ports:
+ - "8080:8080"
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+```
+
+### For Serious Setups: SigNoz
+
+If you need traces, metrics, **and** logs in one place, deploy [SigNoz](/deploy/signoz). It's an open-source Datadog alternative built on OpenTelemetry.
+
+## Maintenance Routine
+
+Set a weekly calendar reminder:
+
+```
+☐ Check Uptime Kuma — all green?
+☐ Run `docker stats` — anything hogging resources?
+☐ Run `df -h` — disk space OK?
+☐ Run `docker system prune -f` — clean unused images
+☐ Check logs for any errors — `docker compose logs --since=168h | grep -i error`
+```
+
+## Next Steps
+
+→ [Updating & Maintaining Containers](/concepts/updates) — Keep your tools up to date safely
+→ [Backups That Actually Work](/concepts/backups) — Protect your data
+→ [Deploy Uptime Kuma](/deploy/uptime-kuma) — Set up monitoring now
diff --git a/docs/app/concepts/networking/page.mdx b/docs/app/concepts/networking/page.mdx
new file mode 100644
index 0000000..26f0539
--- /dev/null
+++ b/docs/app/concepts/networking/page.mdx
@@ -0,0 +1,160 @@
+---
+title: "Networking for Self-Hosters"
+description: "Ports, DNS, firewalls, and private networks — the networking basics every self-hoster needs to know."
+---
+
+# Networking for Self-Hosters
+
+You deployed a tool. It works on `localhost:3000`. You try to access it from your phone. Nothing. Welcome to networking.
+
+This guide covers the **four things** standing between your server and the outside world.
+
+## 1. Ports
+
+Every network service listens on a **port** — a numbered door on your server. Some well-known ones:
+
+| Port | Service |
+|---|---|
+| `22` | SSH |
+| `80` | HTTP |
+| `443` | HTTPS |
+| `5432` | PostgreSQL |
+| `3000–9000` | Where most self-hosted tools live |
+
+When Docker maps `-p 8080:3000`, it's saying: "When traffic hits port 8080 on the host, send it to port 3000 inside the container."
+
+```yaml
+# In docker-compose.yml
+ports:
+ - "8080:3000" # host:container
+```
+
+> ⚠️ **Never expose database ports** (5432, 3306, 27017) to the internet. Keep them internal to Docker networks.
+
+## 2. DNS (Domain Name System)
+
+DNS translates human-readable names to IP addresses:
+
+```
+plausible.yourdomain.com → 203.0.113.42
+```
+
+### Setting Up DNS Records
+
+In your domain registrar (Cloudflare, Namecheap, etc.):
+
+| Type | Name | Value | What it does |
+|---|---|---|---|
+| **A** | `@` | `203.0.113.42` | Points root domain to your server |
+| **A** | `plausible` | `203.0.113.42` | Points subdomain to your server |
+| **CNAME** | `www` | `yourdomain.com` | Aliases `www` to root |
+| **A** | `*` | `203.0.113.42` | Wildcard — catch-all for any subdomain |
+
+> 💡 **Pro Tip:** A wildcard `*` A record + Caddy reverse proxy = unlimited subdomains with zero DNS management. Just add entries to your Caddyfile.
+
+### DNS Propagation
+
+After changing DNS records, it can take **5 minutes to 48 hours** to propagate globally. Use [dnschecker.org](https://dnschecker.org) to verify.
+
+## 3. Firewalls (UFW)
+
+A firewall controls which ports are open to the internet. On Ubuntu/Debian, use **UFW** (Uncomplicated Firewall):
+
+```bash
+# Check current status
+ufw status
+
+# Allow essential ports
+ufw allow 22/tcp # SSH — DON'T lock yourself out
+ufw allow 80/tcp # HTTP
+ufw allow 443/tcp # HTTPS
+
+# Enable the firewall
+ufw enable
+
+# Deny everything else by default
+ufw default deny incoming
+ufw default allow outgoing
+```
+
+### The Golden Rule
+
+Only open three ports to the internet: **22** (SSH), **80** (HTTP), **443** (HTTPS).
+
+Your reverse proxy (Caddy/Nginx) handles port 80/443 and routes traffic internally to your containers. Individual tool ports (3000, 8080, etc.) should **never** be exposed publicly.
+
+```
+Internet → Port 443 → Caddy → Internal Docker Network → Your Tools
+```
+
+### Common Mistakes
+
+**"I can't SSH into my server"** → You blocked port 22 before enabling UFW. Contact your hosting provider for console access.
+
+**"My tool works locally but not remotely"** → Port 80/443 isn't open. Run `ufw allow 80/tcp && ufw allow 443/tcp`.
+
+**"I opened port 8080 and got hacked"** → Never expose app ports directly. Use a reverse proxy instead.
+
+## 4. Docker Networks
+
+Docker creates isolated **networks** for your containers. By default, containers in the same `docker-compose.yml` can talk to each other by service name:
+
+```yaml
+services:
+ app:
+ image: myapp:latest
+ depends_on:
+ - db # Can reach the database at "db:5432"
+
+ db:
+ image: postgres:16
+ # No "ports:" = not accessible from outside Docker
+```
+
+### When to Create Custom Networks
+
+If you need containers from **different** Compose files to communicate (e.g., a shared Caddy reverse proxy):
+
+```yaml
+# In your Caddyfile's docker-compose.yml
+networks:
+ proxy:
+ external: true
+
+# In your app's docker-compose.yml
+networks:
+ default:
+ name: proxy
+ external: true
+```
+
+Create the shared network first:
+
+```bash
+docker network create proxy
+```
+
+Now all containers on the `proxy` network can reach each other by service name — across different Compose files.
+
+## Quick Reference
+
+```bash
+# See what's listening on which port
+ss -tlnp
+
+# Test if a port is open from outside
+nc -zv your-server-ip 443
+
+# See Docker networks
+docker network ls
+
+# Check DNS resolution
+dig plausible.yourdomain.com
+nslookup plausible.yourdomain.com
+```
+
+## Next Steps
+
+→ [Reverse Proxies Explained](/concepts/reverse-proxies) — Route traffic from domains to containers
+→ [SSL/TLS for Self-Hosters](/concepts/ssl-tls) — Encrypt your traffic
+→ [Environment Variables & Secrets](/concepts/env-secrets) — Secure your configuration
diff --git a/docs/app/concepts/page.mdx b/docs/app/concepts/page.mdx
new file mode 100644
index 0000000..e3484bb
--- /dev/null
+++ b/docs/app/concepts/page.mdx
@@ -0,0 +1,56 @@
+---
+title: "Concepts"
+description: "The foundational knowledge for self-hosting. Docker, networking, security, backups — explained like you're a human, not a sysadmin."
+---
+
+# Concepts
+
+Before you deploy anything, understand the building blocks. These guides cover the **why** and **how** behind self-hosting infrastructure — no fluff, no PhD required.
+
+> 📖 **Reading order matters.** Start from the top and work down. Each article builds on the one before it.
+
+---
+
+## The Foundations
+
+These four are non-negotiable. Read them before your first deploy.
+
+| # | Guide | What You'll Learn |
+|---|---|---|
+| 1 | [Docker in 10 Minutes](/concepts/docker-basics) | Images, containers, volumes, Docker Compose — the only 4 concepts you need |
+| 2 | [Networking for Self-Hosters](/concepts/networking) | Ports, DNS, firewalls, and why your tool isn't accessible from the internet |
+| 3 | [Reverse Proxies Explained](/concepts/reverse-proxies) | Map `app.yourdomain.com` to your containers with Caddy |
+| 4 | [SSL/TLS for Self-Hosters](/concepts/ssl-tls) | HTTPS, Let's Encrypt, and why it matters |
+
+---
+
+## Running in Production
+
+Once your tools are deployed, keep them alive and healthy.
+
+| # | Guide | What You'll Learn |
+|---|---|---|
+| 5 | [Environment Variables & Secrets](/concepts/env-secrets) | `.env` files, Docker secrets, and never hardcoding passwords again |
+| 6 | [Monitoring & Observability](/concepts/monitoring) | Know when things break before your users do |
+| 7 | [Updating & Maintaining Containers](/concepts/updates) | Safe update workflows, rollbacks, and automating the boring parts |
+| 8 | [Backups That Actually Work](/concepts/backups) | Database dumps, volume backups, and the 3-2-1 rule |
+
+---
+
+## Planning & Scaling
+
+Before you buy a server (or a bigger one).
+
+| # | Guide | What You'll Learn |
+|---|---|---|
+| 9 | [Hardware & VPS Sizing](/concepts/hardware) | How much RAM/CPU you actually need, and which providers are worth it |
+
+---
+
+## Ready to Deploy?
+
+You've got the knowledge. Now put it to work:
+
+→ [Deploy Guides](/deploy) — 65+ tools with Docker Compose configs
+→ [Quick Start](/quick-start) — Your first deployment in 5 minutes
+→ [Curated Stacks](/stacks) — Pre-built tool bundles for specific use cases
diff --git a/docs/app/concepts/reverse-proxies/page.mdx b/docs/app/concepts/reverse-proxies/page.mdx
new file mode 100644
index 0000000..9bfa495
--- /dev/null
+++ b/docs/app/concepts/reverse-proxies/page.mdx
@@ -0,0 +1,113 @@
+---
+title: Reverse Proxies Explained
+description: "What a reverse proxy does and why you need one. Set up Caddy or Nginx to serve your self-hosted tools on proper domains with automatic HTTPS."
+---
+
+# Reverse Proxies Explained
+
+Right now your tools run on ports like `:3001`, `:8000`, `:8080`. That's fine for testing, but you don't want users visiting `http://your-ip:8000`.
+
+A **reverse proxy** maps clean domains to those ugly ports:
+
+```
+plausible.yourdomain.com → localhost:8000
+uptime.yourdomain.com → localhost:3001
+supabase.yourdomain.com → localhost:8443
+```
+
+It also handles **HTTPS** (SSL certificates) automatically.
+
+## Which One to Use?
+
+| Proxy | Our Take |
+|---|---|
+| **Caddy** ✅ | **Use this.** Automatic HTTPS, zero-config SSL, human-readable config. Built for self-hosters. |
+| **Nginx Proxy Manager** | GUI-first option. Great if you hate config files. Slightly more resource-heavy. |
+| **Traefik** | Powerful but complex. Built for Kubernetes. Overkill for most self-hosting setups. |
+| **Nginx (raw)** | The classic. Fine but verbose. No auto-SSL without certbot scripts. |
+
+> 🏆 **The Verdict:** Start with Caddy. Seriously. The config file is 6 lines.
+
+## Setting Up Caddy (Recommended)
+
+### Step 1: Deploy Caddy
+
+```yaml
+# docker-compose.yml
+version: '3.8'
+
+services:
+ caddy:
+ image: caddy:2-alpine
+ container_name: caddy
+ restart: unless-stopped
+ ports:
+ - "80:80"
+ - "443:443"
+ volumes:
+ - ./Caddyfile:/etc/caddy/Caddyfile
+ - caddy_data:/data
+ - caddy_config:/config
+
+volumes:
+ caddy_data:
+ caddy_config:
+```
+
+### Step 2: Configure Your Domains
+
+Create a `Caddyfile` in the same directory:
+
+```
+plausible.yourdomain.com {
+ reverse_proxy localhost:8000
+}
+
+uptime.yourdomain.com {
+ reverse_proxy localhost:3001
+}
+
+git.yourdomain.com {
+ reverse_proxy localhost:3000
+}
+```
+
+That's the entire config. Caddy automatically obtains and renews Let's Encrypt SSL certificates for every domain listed.
+
+### Step 3: Point DNS
+
+In your domain registrar (Cloudflare, Namecheap, etc.), add A records:
+
+| Type | Name | Value |
+|---|---|---|
+| A | `plausible` | `your-server-ip` |
+| A | `uptime` | `your-server-ip` |
+| A | `git` | `your-server-ip` |
+
+### Step 4: Start
+
+```bash
+docker compose up -d
+```
+
+Within 60 seconds, Caddy will obtain SSL certificates and your tools will be live on proper HTTPS domains.
+
+## How It Works (Simplified)
+
+```
+User visits plausible.yourdomain.com
+ ↓
+ DNS resolves to your server IP
+ ↓
+ Caddy receives the request on port 443
+ ↓
+ Caddy reads Caddyfile: "plausible.yourdomain.com → localhost:8000"
+ ↓
+ Caddy forwards the request to your Plausible container
+ ↓
+ User sees Plausible dashboard over HTTPS 🔒
+```
+
+→ [Setting Up a Reverse Proxy (Practical Guide)](/quick-start/reverse-proxy) — Get Nginx, Caddy, or Traefik running now
+→ [SSL/TLS for Self-Hosters](/concepts/ssl-tls) — Deep dive into certificates and security
+→ [Deploy Guides](/deploy) — All our guides include reverse proxy config
diff --git a/docs/app/concepts/ssl-tls/page.mdx b/docs/app/concepts/ssl-tls/page.mdx
new file mode 100644
index 0000000..9c801ea
--- /dev/null
+++ b/docs/app/concepts/ssl-tls/page.mdx
@@ -0,0 +1,56 @@
+---
+title: "SSL/TLS for Self-Hosters"
+description: "HTTPS for your self-hosted tools. How SSL works, why you need it, and how to set it up with Caddy or Let's Encrypt."
+---
+
+# SSL/TLS for Self-Hosters
+
+**SSL/TLS** is what makes the padlock appear in your browser. It encrypts traffic between your users and your server so nobody can snoop on it.
+
+Every self-hosted tool accessible from the internet **must** have HTTPS. No exceptions.
+
+## The Easy Way: Caddy (Automatic)
+
+If you followed our [reverse proxy guide](/concepts/reverse-proxies) and are using Caddy, **you already have SSL**. Caddy obtains and renews Let's Encrypt certificates automatically for every domain in your Caddyfile.
+
+No config needed. No cron jobs. No certbot. It just works.
+
+> 🔥 **Pro Tip:** This is the #1 reason we recommend Caddy over Nginx.
+
+## The Manual Way: Let's Encrypt + Certbot
+
+If you're using raw Nginx, you'll need certbot:
+
+```bash
+# Install certbot
+apt install certbot python3-certbot-nginx -y
+
+# Obtain a certificate
+certbot --nginx -d plausible.yourdomain.com
+
+# Verify auto-renewal
+certbot renew --dry-run
+```
+
+Certbot will modify your Nginx config automatically and set up a cron job for renewal.
+
+## SSL Checklist
+
+After setting up SSL, verify:
+
+- [ ] Site loads on `https://` (padlock visible)
+- [ ] `http://` redirects to `https://` automatically
+- [ ] Certificate is from Let's Encrypt (click padlock → "Certificate")
+- [ ] No mixed-content warnings in browser console
+
+## Common Gotchas
+
+**"Certificate not found"** → Your DNS hasn't propagated yet. Wait 5–10 minutes and try again.
+
+**"Too many requests"** → Let's Encrypt rate-limits to 50 certificates/week per domain. If you're testing, use `--staging` flag first.
+
+**"Connection refused on port 443"** → Port 443 isn't open in your firewall. Run: `ufw allow 443/tcp`
+
+## Next Steps
+
+→ [Backups That Actually Work](/concepts/backups) — Protect the data you're securing with SSL
diff --git a/docs/app/concepts/updates/page.mdx b/docs/app/concepts/updates/page.mdx
new file mode 100644
index 0000000..3aec444
--- /dev/null
+++ b/docs/app/concepts/updates/page.mdx
@@ -0,0 +1,153 @@
+---
+title: "Updating & Maintaining Containers"
+description: "How to safely update self-hosted tools running in Docker. Update workflows, rollbacks, and optional automation with Watchtower."
+---
+
+# Updating & Maintaining Containers
+
+Your tools need updates — security patches, bug fixes, new features. But updating a self-hosted tool isn't like clicking "Update" in an app store. You need a process.
+
+## The Safe Update Workflow
+
+Follow this **every time** you update a tool:
+
+```bash
+# 1. Backup first (ALWAYS)
+docker exec my-db pg_dump -U postgres mydb > backup_$(date +%Y%m%d).sql
+
+# 2. Pull the new image
+docker compose pull
+
+# 3. Recreate containers with new image
+docker compose up -d
+
+# 4. Check logs for errors
+docker compose logs -f --tail=50
+
+# 5. Verify the tool works
+curl -I https://app.yourdomain.com
+```
+
+> ⚠️ **Golden Rule:** Never update without a backup. If something breaks, you can roll back in 60 seconds.
+
+## Rolling Back
+
+Something went wrong? Here's how to revert to the previous version:
+
+### Option 1: Pin to Previous Version
+
+```yaml
+# docker-compose.yml — change the tag
+services:
+ app:
+ image: plausible/analytics:v2.0.0 # Was :v2.1.0
+```
+
+```bash
+docker compose up -d
+```
+
+### Option 2: Restore From Backup
+
+```bash
+# Stop the broken service
+docker compose down
+
+# Restore the database backup
+cat backup_20260218.sql | docker exec -i my-db psql -U postgres mydb
+
+# Start with the old image
+docker compose up -d
+```
+
+## Image Tags: `latest` vs Pinned Versions
+
+| Approach | Pros | Cons |
+|---|---|---|
+| `image: app:latest` | Always gets newest | Can break unexpectedly |
+| `image: app:v2.1.0` | Predictable, reproducible | Manual updates required |
+| `image: app:2` | Gets patches within major version | Some risk of breaking changes |
+
+> 🏆 **Our Recommendation:** Use **major version tags** (`image: postgres:16`) for databases and **pinned versions** (`image: plausible/analytics:v2.1.0`) for applications. Avoid `latest` in production.
+
+## Automated Updates with Watchtower
+
+If you want hands-off updates (with some risk), **Watchtower** watches your containers and auto-updates them:
+
+```yaml
+services:
+ watchtower:
+ image: containrrr/watchtower
+ container_name: watchtower
+ restart: unless-stopped
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ environment:
+ WATCHTOWER_CLEANUP: "true"
+ WATCHTOWER_SCHEDULE: "0 0 4 * * *" # 4 AM daily
+ WATCHTOWER_NOTIFICATIONS: "email"
+ command: --include-restarting
+```
+
+### Watchtower Caveats
+
+- It updates **all** containers by default. Use labels to control which ones:
+
+```yaml
+services:
+ plausible:
+ image: plausible/analytics:latest
+ labels:
+ - "com.centurylinklabs.watchtower.enable=true"
+
+ database:
+ image: postgres:16
+ labels:
+ - "com.centurylinklabs.watchtower.enable=false" # NEVER auto-update databases
+```
+
+- It doesn't run migrations. Some tools need `docker exec app migrate` after updates.
+- It can't roll back automatically.
+
+> ⚠️ **Never auto-update databases.** Postgres, MySQL, and Redis major version upgrades require manual migration steps. Always pin database images.
+
+## Cleanup: Reclaiming Disk Space
+
+Old images pile up. Docker doesn't clean them automatically:
+
+```bash
+# See how much space Docker is using
+docker system df
+
+# Remove unused images (safe)
+docker image prune -f
+
+# Nuclear option: remove ALL unused data
+docker system prune -a -f --volumes
+# ⚠️ This deletes stopped containers, unused images, AND orphaned volumes
+```
+
+### Automate Cleanup
+
+Add to your crontab:
+
+```bash
+# Weekly cleanup at 3 AM Sunday
+0 3 * * 0 docker image prune -f >> /var/log/docker-cleanup.log 2>&1
+```
+
+## Update Checklist
+
+Before updating any tool:
+
+- [ ] Database backed up
+- [ ] Current version noted (in case of rollback)
+- [ ] Changelog reviewed for breaking changes
+- [ ] `.env` file backed up
+- [ ] Update applied and logs checked
+- [ ] Service verified working
+
+## Next Steps
+
+→ [Backups That Actually Work](/concepts/backups) — Make sure you can actually roll back
+→ [Monitoring & Observability](/concepts/monitoring) — Catch failed updates automatically
diff --git a/docs/app/contact/page.mdx b/docs/app/contact/page.mdx
new file mode 100644
index 0000000..c3444ca
--- /dev/null
+++ b/docs/app/contact/page.mdx
@@ -0,0 +1,9 @@
+import ContactForm from '../../components/ContactForm'
+
+# Contact Us
+
+Have a question regarding self-hosting, a suggestion for a new stack, or just want to say hello? We're here to help.
+
+Fill out the form below and we'll get back to you as soon as possible.
+
+
diff --git a/docs/app/deploy/_meta.ts b/docs/app/deploy/_meta.ts
new file mode 100644
index 0000000..6faf178
--- /dev/null
+++ b/docs/app/deploy/_meta.ts
@@ -0,0 +1,201 @@
+import type { MetaRecord } from 'nextra'
+
+const meta: MetaRecord = {
+ "activepieces": {
+ "title": "Activepieces"
+ },
+ "affine": {
+ "title": "AFFiNE"
+ },
+ "akaunting": {
+ "title": "Akaunting"
+ },
+ "appflowy": {
+ "title": "AppFlowy"
+ },
+ "appwrite": {
+ "title": "Appwrite"
+ },
+ "authentik": {
+ "title": "Authentik"
+ },
+ "bitwarden": {
+ "title": "Bitwarden"
+ },
+ "calcom": {
+ "title": "Cal.com"
+ },
+ "chaskiq": {
+ "title": "Chaskiq"
+ },
+ "coder": {
+ "title": "Coder"
+ },
+ "continue-dev": {
+ "title": "Continue"
+ },
+ "coolify": {
+ "title": "Coolify"
+ },
+ "deepseek": {
+ "title": "DeepSeek-V3 / R1"
+ },
+ "documenso": {
+ "title": "Documenso"
+ },
+ "dokku": {
+ "title": "Dokku"
+ },
+ "erpnext": {
+ "title": "ERPNext"
+ },
+ "flux": {
+ "title": "FLUX"
+ },
+ "freecad": {
+ "title": "FreeCAD"
+ },
+ "gemma": {
+ "title": "Google Gemma 2"
+ },
+ "gimp": {
+ "title": "GIMP"
+ },
+ "glitchtip": {
+ "title": "GlitchTip"
+ },
+ "gpt4all": {
+ "title": "GPT4All"
+ },
+ "hunyuan-video": {
+ "title": "HunyuanVideo 1.5"
+ },
+ "jitsi-meet": {
+ "title": "Jitsi Meet"
+ },
+ "jitsu": {
+ "title": "Jitsu"
+ },
+ "kdenlive": {
+ "title": "Kdenlive"
+ },
+ "keepassxc": {
+ "title": "KeePassXC"
+ },
+ "keycloak": {
+ "title": "Keycloak"
+ },
+ "krita": {
+ "title": "Krita"
+ },
+ "librecad": {
+ "title": "LibreCAD"
+ },
+ "listmonk": {
+ "title": "Listmonk"
+ },
+ "llama": {
+ "title": "Meta Llama 3.1"
+ },
+ "matomo": {
+ "title": "Matomo"
+ },
+ "mattermost": {
+ "title": "Mattermost"
+ },
+ "mautic": {
+ "title": "Mautic"
+ },
+ "medusa": {
+ "title": "Medusa.js"
+ },
+ "metabase": {
+ "title": "Metabase"
+ },
+ "minio": {
+ "title": "MinIO"
+ },
+ "mistral": {
+ "title": "Mistral Large 2"
+ },
+ "mixpost": {
+ "title": "Mixpost"
+ },
+ "mochi-1": {
+ "title": "Mochi-1"
+ },
+ "n8n": {
+ "title": "n8n"
+ },
+ "odoo": {
+ "title": "Odoo"
+ },
+ "ollama": {
+ "title": "Ollama"
+ },
+ "onlyoffice": {
+ "title": "ONLYOFFICE"
+ },
+ "orangehrm": {
+ "title": "OrangeHRM"
+ },
+ "outline": {
+ "title": "Outline"
+ },
+ "penpot": {
+ "title": "Penpot"
+ },
+ "plane": {
+ "title": "Plane"
+ },
+ "plausible": {
+ "title": "Plausible"
+ },
+ "pocketbase": {
+ "title": "PocketBase"
+ },
+ "postal": {
+ "title": "Postal"
+ },
+ "posthog": {
+ "title": "PostHog"
+ },
+ "qwen": {
+ "title": "Qwen 2.5"
+ },
+ "rocketchat": {
+ "title": "Rocket.Chat"
+ },
+ "signoz": {
+ "title": "SigNoz"
+ },
+ "stable-diffusion": {
+ "title": "Stable Diffusion 3.5"
+ },
+ "supabase": {
+ "title": "Supabase"
+ },
+ "superset": {
+ "title": "Apache Superset"
+ },
+ "tabby": {
+ "title": "TabbyML"
+ },
+ "taiga": {
+ "title": "Taiga"
+ },
+ "twenty": {
+ "title": "Twenty"
+ },
+ "uptime-kuma": {
+ "title": "Uptime Kuma"
+ },
+ "vaultwarden": {
+ "title": "Vaultwarden"
+ },
+ "zammad": {
+ "title": "Zammad"
+ }
+}
+
+export default meta
diff --git a/docs/app/deploy/activepieces/page.mdx b/docs/app/deploy/activepieces/page.mdx
new file mode 100644
index 0000000..08da280
--- /dev/null
+++ b/docs/app/deploy/activepieces/page.mdx
@@ -0,0 +1,158 @@
+---
+title: "Deploy Activepieces Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Activepieces with Docker Compose. "
+---
+
+# Deploy Activepieces
+
+Open source alternative to Zapier. Automate your work with 200+ apps.
+
+
+ ⭐ 11.0k stars
+ 📜 MIT
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Activepieces instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Activepieces and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ activepieces:
+ image: activepieces/activepieces:latest
+ container_name: activepieces
+ restart: unless-stopped
+ depends_on:
+ - db
+ - redis
+ ports:
+ - "8080:80"
+ environment:
+ - AP_FRONTEND_URL=http://localhost:8080
+ - AP_POSTGRES_DATABASE=activepieces
+ - AP_POSTGRES_HOST=db
+ - AP_POSTGRES_PORT=5432
+ - AP_POSTGRES_USERNAME=activepieces
+ - AP_POSTGRES_PASSWORD=activepieces
+ - AP_REDIS_HOST=redis
+ - AP_REDIS_PORT=6379
+
+ db:
+ image: postgres:14-alpine
+ container_name: activepieces-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=activepieces
+ - POSTGRES_PASSWORD=activepieces
+ - POSTGRES_DB=activepieces
+ volumes:
+ - activepieces_db_data:/var/lib/postgresql/data
+
+ redis:
+ image: redis:alpine
+ container_name: activepieces-redis
+ restart: unless-stopped
+
+volumes:
+ activepieces_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/activepieces && cd /opt/activepieces
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `AP_FRONTEND_URL` | `http://localhost:8080` | No |
+| `AP_POSTGRES_DATABASE` | `activepieces` | No |
+| `AP_POSTGRES_HOST` | `db` | No |
+| `AP_POSTGRES_PORT` | `5432` | No |
+| `AP_POSTGRES_USERNAME` | `activepieces` | No |
+| `AP_POSTGRES_PASSWORD` | `activepieces` | No |
+| `AP_REDIS_HOST` | `redis` | No |
+| `AP_REDIS_PORT` | `6379` | No |
+| `POSTGRES_USER` | `activepieces` | No |
+| `POSTGRES_PASSWORD` | `activepieces` | No |
+| `POSTGRES_DB` | `activepieces` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs activepieces | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Activepieces on AltStack Directory](https://thealtstack.com/alternative-to/activepieces)
+- [Activepieces Self-Hosted Guide](https://thealtstack.com/self-hosted/activepieces)
+- [Official Documentation](https://www.activepieces.com)
+- [GitHub Repository](https://github.com/activepieces/activepieces)
diff --git a/docs/app/deploy/affine/page.mdx b/docs/app/deploy/affine/page.mdx
new file mode 100644
index 0000000..d932eca
--- /dev/null
+++ b/docs/app/deploy/affine/page.mdx
@@ -0,0 +1,171 @@
+---
+title: "Deploy AFFiNE Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting AFFiNE with Docker Compose. "
+---
+
+# Deploy AFFiNE
+
+There can be more than Notion and Miro. AFFiNE(pronounced [ə‘fain]) is a next-gen knowledge base that brings planning, sorting and creating all together. Privacy first, open-source, customizable and ready to use.
+
+
+ ⭐ 62.7k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working AFFiNE instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for AFFiNE and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for AFFiNE
+version: '3.8'
+
+services:
+ affine:
+ image: ghcr.io/toeverything/affine-graphql:latest # Using official as fallback but custom build setup exists in Dockerfile
+ container_name: affine
+ ports:
+ - "3000:3000"
+ environment:
+ - DATABASE_URL=postgres://affine:affine@db:5432/affine
+ - REDIS_URL=redis://redis:6379
+ - NODE_ENV=production
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ networks:
+ - affine_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:3000/" ]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+
+ db:
+ image: postgres:15-alpine
+ container_name: affine-db
+ environment:
+ POSTGRES_USER: affine
+ POSTGRES_PASSWORD: affine
+ POSTGRES_DB: affine
+ volumes:
+ - affine_db_data:/var/lib/postgresql/data
+ networks:
+ - affine_net
+ healthcheck:
+ test: [ "CMD-SHELL", "pg_isready -U affine" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+ redis:
+ image: redis:7-alpine
+ container_name: affine-redis
+ networks:
+ - affine_net
+ healthcheck:
+ test: [ "CMD", "redis-cli", "ping" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+networks:
+ affine_net:
+ driver: bridge
+
+volumes:
+ affine_db_data:
+ name: affine_db_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/affine && cd /opt/affine
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `DATABASE_URL` | `postgres://affine:affine@db:5432/affine` | No |
+| `REDIS_URL` | `redis://redis:6379` | No |
+| `NODE_ENV` | `production` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs affine | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [AFFiNE on AltStack Directory](https://thealtstack.com/alternative-to/affine)
+- [AFFiNE Self-Hosted Guide](https://thealtstack.com/self-hosted/affine)
+- [Official Documentation](https://affine.pro)
+- [GitHub Repository](https://github.com/toeverything/AFFiNE)
diff --git a/docs/app/deploy/akaunting/page.mdx b/docs/app/deploy/akaunting/page.mdx
new file mode 100644
index 0000000..fbf61ee
--- /dev/null
+++ b/docs/app/deploy/akaunting/page.mdx
@@ -0,0 +1,146 @@
+---
+title: "Deploy Akaunting Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Akaunting with Docker Compose. "
+---
+
+# Deploy Akaunting
+
+Free and open source online accounting software for small businesses and freelancers.
+
+
+ ⭐ 12.0k stars
+ 📜 GPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Akaunting instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Akaunting and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ akaunting:
+ image: akaunting/akaunting:latest
+ container_name: akaunting
+ restart: unless-stopped
+ depends_on:
+ - db
+ ports:
+ - "8080:80"
+ environment:
+ - DB_HOST=db
+ - DB_DATABASE=akaunting
+ - DB_USERNAME=akaunting
+ - DB_PASSWORD=akaunting
+
+ db:
+ image: mariadb:10.6
+ container_name: akaunting-db
+ restart: unless-stopped
+ environment:
+ - MYSQL_DATABASE=akaunting
+ - MYSQL_USER=akaunting
+ - MYSQL_PASSWORD=akaunting
+ - MYSQL_ROOT_PASSWORD=root
+ volumes:
+ - akaunting_db_data:/var/lib/mysql
+
+volumes:
+ akaunting_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/akaunting && cd /opt/akaunting
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `DB_HOST` | `db` | No |
+| `DB_DATABASE` | `akaunting` | No |
+| `DB_USERNAME` | `akaunting` | No |
+| `DB_PASSWORD` | `akaunting` | No |
+| `MYSQL_DATABASE` | `akaunting` | No |
+| `MYSQL_USER` | `akaunting` | No |
+| `MYSQL_PASSWORD` | `akaunting` | No |
+| `MYSQL_ROOT_PASSWORD` | `root` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs akaunting | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Akaunting on AltStack Directory](https://thealtstack.com/alternative-to/akaunting)
+- [Akaunting Self-Hosted Guide](https://thealtstack.com/self-hosted/akaunting)
+- [Official Documentation](https://akaunting.com)
+- [GitHub Repository](https://github.com/akaunting/akaunting)
diff --git a/docs/app/deploy/appflowy/page.mdx b/docs/app/deploy/appflowy/page.mdx
new file mode 100644
index 0000000..6169147
--- /dev/null
+++ b/docs/app/deploy/appflowy/page.mdx
@@ -0,0 +1,171 @@
+---
+title: "Deploy AppFlowy Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting AppFlowy with Docker Compose. "
+---
+
+# Deploy AppFlowy
+
+Bring projects, wikis, and teams together with AI. AppFlowy is the AI collaborative workspace where you achieve more without losing control of your data. The leading open source Notion alternative.
+
+
+ ⭐ 68.0k stars
+ 📜 GNU Affero General Public License v3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working AppFlowy instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for AppFlowy and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for AppFlowy Cloud
+version: '3.8'
+
+services:
+ appflowy:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ container_name: appflowy-cloud
+ ports:
+ - "8080:8080"
+ environment:
+ - DATABASE_URL=postgres://postgres:${POSTGRES_PASSWORD:-password}@db:5432/appflowy
+ - REDIS_URL=redis://redis:6379
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ networks:
+ - appflowy_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:8080/health" ]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ db:
+ image: postgres:15-alpine
+ container_name: appflowy-db
+ environment:
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
+ POSTGRES_DB: appflowy
+ volumes:
+ - appflowy_db_data:/var/lib/postgresql/data
+ networks:
+ - appflowy_net
+ healthcheck:
+ test: [ "CMD-SHELL", "pg_isready -U postgres" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+ redis:
+ image: redis:7-alpine
+ container_name: appflowy-redis
+ networks:
+ - appflowy_net
+ healthcheck:
+ test: [ "CMD", "redis-cli", "ping" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+networks:
+ appflowy_net:
+ driver: bridge
+
+volumes:
+ appflowy_db_data:
+ name: appflowy_db_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/appflowy && cd /opt/appflowy
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `DATABASE_URL` | `postgres://postgres:${POSTGRES_PASSWORD:-password}@db:5432/appflowy` | No |
+| `REDIS_URL` | `redis://redis:6379` | No |
+| `POSTGRES_PASSWORD` | `password` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs appflowy | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [AppFlowy on AltStack Directory](https://thealtstack.com/alternative-to/appflowy)
+- [AppFlowy Self-Hosted Guide](https://thealtstack.com/self-hosted/appflowy)
+- [Official Documentation](https://www.appflowy.io)
+- [GitHub Repository](https://github.com/AppFlowy-IO/AppFlowy)
diff --git a/docs/app/deploy/appwrite/page.mdx b/docs/app/deploy/appwrite/page.mdx
new file mode 100644
index 0000000..2483ea6
--- /dev/null
+++ b/docs/app/deploy/appwrite/page.mdx
@@ -0,0 +1,181 @@
+---
+title: "Deploy Appwrite Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Appwrite with Docker Compose. "
+---
+
+# Deploy Appwrite
+
+Appwrite® - complete cloud infrastructure for your web, mobile and AI apps. Including Auth, Databases, Storage, Functions, Messaging, Hosting, Realtime and more
+
+
+ ⭐ 54.7k stars
+ 📜 BSD 3-Clause "New" or "Revised" License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Appwrite instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Appwrite and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for Appwrite
+# Note: Appwrite is a complex multi-service system.
+# This is a production-ready configuration for the core services.
+version: '3.8'
+
+services:
+ appwrite:
+ image: appwrite/appwrite:1.5.4
+ container_name: appwrite
+ ports:
+ - "80:80"
+ - "443:443"
+ environment:
+ - _APP_ENV=production
+ - _APP_DB_HOST=db
+ - _APP_DB_USER=appwrite
+ - _APP_DB_PASS=${DB_PASSWORD:-password}
+ - _APP_REDIS_HOST=redis
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ networks:
+ - appwrite_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost/v1/health" ]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+
+ db:
+ image: mariadb:10.11 # Appwrite uses MariaDB by default
+ container_name: appwrite-db
+ environment:
+ MARIADB_USER: appwrite
+ MARIADB_PASSWORD: ${DB_PASSWORD:-password}
+ MARIADB_DATABASE: appwrite
+ MARIADB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD:-rootpassword}
+ volumes:
+ - appwrite_db_data:/var/lib/mysql
+ networks:
+ - appwrite_net
+ healthcheck:
+ test: [ "CMD-SHELL", "mysqladmin ping -h localhost -u root -p${DB_ROOT_PASSWORD:-rootpassword}" ]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ redis:
+ image: redis:7-alpine
+ container_name: appwrite-redis
+ networks:
+ - appwrite_net
+ healthcheck:
+ test: [ "CMD", "redis-cli", "ping" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+networks:
+ appwrite_net:
+ driver: bridge
+
+volumes:
+ appwrite_db_data:
+ name: appwrite_db_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/appwrite && cd /opt/appwrite
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `_APP_ENV` | `production` | No |
+| `_APP_DB_HOST` | `db` | No |
+| `_APP_DB_USER` | `appwrite` | No |
+| `_APP_DB_PASS` | `${DB_PASSWORD:-password}` | No |
+| `_APP_REDIS_HOST` | `redis` | No |
+| `DB_PASSWORD` | `password` | No |
+| `DB_ROOT_PASSWORD` | `rootpassword` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs appwrite | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Appwrite on AltStack Directory](https://thealtstack.com/alternative-to/appwrite)
+- [Appwrite Self-Hosted Guide](https://thealtstack.com/self-hosted/appwrite)
+- [Official Documentation](https://appwrite.io)
+- [GitHub Repository](https://github.com/appwrite/appwrite)
diff --git a/docs/app/deploy/authentik/page.mdx b/docs/app/deploy/authentik/page.mdx
new file mode 100644
index 0000000..2a8e8a3
--- /dev/null
+++ b/docs/app/deploy/authentik/page.mdx
@@ -0,0 +1,172 @@
+---
+title: "Deploy Authentik Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Authentik with Docker Compose. "
+---
+
+# Deploy Authentik
+
+The overall-best open-source identity provider, focused on flexibility and versatility.
+
+
+ ⭐ 15.0k stars
+ 📜 MIT
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Authentik instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Authentik and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ server:
+ image: ghcr.io/goauthentik/server:latest
+ container_name: authentik-server
+ restart: unless-stopped
+ command: server
+ depends_on:
+ - db
+ - redis
+ ports:
+ - "9000:9000"
+ - "9443:9443"
+ environment:
+ - AUTHENTIK_REDIS__HOST=redis
+ - AUTHENTIK_POSTGRESQL__HOST=db
+ - AUTHENTIK_POSTGRESQL__USER=authentik
+ - AUTHENTIK_POSTGRESQL__NAME=authentik
+ - AUTHENTIK_POSTGRESQL__PASSWORD=authentik
+ - AUTHENTIK_SECRET_KEY=generate-a-random-secret-key
+
+ worker:
+ image: ghcr.io/goauthentik/server:latest
+ container_name: authentik-worker
+ restart: unless-stopped
+ command: worker
+ depends_on:
+ - db
+ - redis
+ environment:
+ - AUTHENTIK_REDIS__HOST=redis
+ - AUTHENTIK_POSTGRESQL__HOST=db
+ - AUTHENTIK_POSTGRESQL__USER=authentik
+ - AUTHENTIK_POSTGRESQL__NAME=authentik
+ - AUTHENTIK_POSTGRESQL__PASSWORD=authentik
+ - AUTHENTIK_SECRET_KEY=generate-a-random-secret-key
+
+ db:
+ image: postgres:12-alpine
+ container_name: authentik-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_PASSWORD=authentik
+ - POSTGRES_USER=authentik
+ - POSTGRES_DB=authentik
+ volumes:
+ - authentik_db_data:/var/lib/postgresql/data
+
+ redis:
+ image: redis:6-alpine
+ container_name: authentik-redis
+ restart: unless-stopped
+
+volumes:
+ authentik_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/authentik && cd /opt/authentik
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `AUTHENTIK_REDIS__HOST` | `redis` | No |
+| `AUTHENTIK_POSTGRESQL__HOST` | `db` | No |
+| `AUTHENTIK_POSTGRESQL__USER` | `authentik` | No |
+| `AUTHENTIK_POSTGRESQL__NAME` | `authentik` | No |
+| `AUTHENTIK_POSTGRESQL__PASSWORD` | `authentik` | No |
+| `AUTHENTIK_SECRET_KEY` | `generate-a-random-secret-key` | No |
+| `POSTGRES_PASSWORD` | `authentik` | No |
+| `POSTGRES_USER` | `authentik` | No |
+| `POSTGRES_DB` | `authentik` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs authentik | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Authentik on AltStack Directory](https://thealtstack.com/alternative-to/authentik)
+- [Authentik Self-Hosted Guide](https://thealtstack.com/self-hosted/authentik)
+- [Official Documentation](https://goauthentik.io)
+- [GitHub Repository](https://github.com/goauthentik/authentik)
diff --git a/docs/app/deploy/bitwarden/page.mdx b/docs/app/deploy/bitwarden/page.mdx
new file mode 100644
index 0000000..7acadc5
--- /dev/null
+++ b/docs/app/deploy/bitwarden/page.mdx
@@ -0,0 +1,117 @@
+---
+title: "Deploy Bitwarden Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Bitwarden with Docker Compose. "
+---
+
+# Deploy Bitwarden
+
+Bitwarden infrastructure/backend (API, database, Docker, etc).
+
+
+ ⭐ 18.0k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Bitwarden instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Bitwarden and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ bitwarden:
+ image: vaultwarden/server:latest
+ container_name: bitwarden
+ restart: unless-stopped
+ ports:
+ - "8088:80"
+ volumes:
+ - bw-data:/data
+
+volumes:
+ bw-data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/bitwarden && cd /opt/bitwarden
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs bitwarden | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Bitwarden on AltStack Directory](https://thealtstack.com/alternative-to/bitwarden)
+- [Bitwarden Self-Hosted Guide](https://thealtstack.com/self-hosted/bitwarden)
+- [Official Documentation](https://bitwarden.com)
+- [GitHub Repository](https://github.com/bitwarden/server)
diff --git a/docs/app/deploy/calcom/page.mdx b/docs/app/deploy/calcom/page.mdx
new file mode 100644
index 0000000..52b02c3
--- /dev/null
+++ b/docs/app/deploy/calcom/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Cal.com Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Cal.com with Docker Compose. "
+---
+
+# Deploy Cal.com
+
+The open-source Calendly alternative. Take control of your scheduling.
+
+
+ ⭐ 30.0k stars
+ 📜 AGPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Cal.com instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Cal.com and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ calcom:
+ image: calcom/cal.com:latest
+ container_name: calcom
+ restart: unless-stopped
+ ports:
+ - "3000:3000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/calcom && cd /opt/calcom
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs calcom | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Cal.com on AltStack Directory](https://thealtstack.com/alternative-to/calcom)
+- [Cal.com Self-Hosted Guide](https://thealtstack.com/self-hosted/calcom)
+- [Official Documentation](https://cal.com)
+- [GitHub Repository](https://github.com/calcom/cal.com)
diff --git a/docs/app/deploy/chaskiq/page.mdx b/docs/app/deploy/chaskiq/page.mdx
new file mode 100644
index 0000000..0d0ee48
--- /dev/null
+++ b/docs/app/deploy/chaskiq/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Chaskiq Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Chaskiq with Docker Compose. "
+---
+
+# Deploy Chaskiq
+
+Open source conversational marketing platform alternative to Intercom and Drift.
+
+
+ ⭐ 4.0k stars
+ 📜 GPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Chaskiq instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Chaskiq and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ chaskiq:
+ image: chaskiq/chaskiq:latest
+ container_name: chaskiq
+ restart: unless-stopped
+ ports:
+ - "3000:3000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/chaskiq && cd /opt/chaskiq
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs chaskiq | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Chaskiq on AltStack Directory](https://thealtstack.com/alternative-to/chaskiq)
+- [Chaskiq Self-Hosted Guide](https://thealtstack.com/self-hosted/chaskiq)
+- [Official Documentation](https://chaskiq.io)
+- [GitHub Repository](https://github.com/chaskiq/chaskiq)
diff --git a/docs/app/deploy/coder/page.mdx b/docs/app/deploy/coder/page.mdx
new file mode 100644
index 0000000..e1ca29c
--- /dev/null
+++ b/docs/app/deploy/coder/page.mdx
@@ -0,0 +1,144 @@
+---
+title: "Deploy Coder Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Coder with Docker Compose. "
+---
+
+# Deploy Coder
+
+Provision software development environments as code on your infrastructure.
+
+
+ ⭐ 20.0k stars
+ 📜 AGPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Coder instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Coder and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ coder:
+ image: ghcr.io/coder/coder:latest
+ container_name: coder
+ restart: unless-stopped
+ depends_on:
+ - db
+ ports:
+ - "7080:7080"
+ environment:
+ - CODER_PG_CONNECTION_URL=postgresql://coder:coder@db:5432/coder
+ - CODER_ACCESS_URL=http://localhost:7080
+ - CODER_HTTP_ADDRESS=0.0.0.0:7080
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+
+ db:
+ image: postgres:13
+ container_name: coder-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=coder
+ - POSTGRES_PASSWORD=coder
+ - POSTGRES_DB=coder
+ volumes:
+ - coder_db_data:/var/lib/postgresql/data
+
+volumes:
+ coder_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/coder && cd /opt/coder
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `CODER_PG_CONNECTION_URL` | `postgresql://coder:coder@db:5432/coder` | No |
+| `CODER_ACCESS_URL` | `http://localhost:7080` | No |
+| `CODER_HTTP_ADDRESS` | `0.0.0.0:7080` | No |
+| `POSTGRES_USER` | `coder` | No |
+| `POSTGRES_PASSWORD` | `coder` | No |
+| `POSTGRES_DB` | `coder` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs coder | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Coder on AltStack Directory](https://thealtstack.com/alternative-to/coder)
+- [Coder Self-Hosted Guide](https://thealtstack.com/self-hosted/coder)
+- [Official Documentation](https://coder.com)
+- [GitHub Repository](https://github.com/coder/coder)
diff --git a/docs/app/deploy/continue-dev/page.mdx b/docs/app/deploy/continue-dev/page.mdx
new file mode 100644
index 0000000..8697d87
--- /dev/null
+++ b/docs/app/deploy/continue-dev/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Continue Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Continue with Docker Compose. "
+---
+
+# Deploy Continue
+
+Open-source AI code assistant for VS Code and JetBrains. Use any model (local or API).
+
+
+ ⭐ 25.0k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Continue instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Continue and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ continue:
+ image: continuedev/continue:latest
+ container_name: continue
+ restart: unless-stopped
+ ports:
+ - "8080:8080"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/continue-dev && cd /opt/continue-dev
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs continue-dev | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Continue on AltStack Directory](https://thealtstack.com/alternative-to/continue-dev)
+- [Continue Self-Hosted Guide](https://thealtstack.com/self-hosted/continue-dev)
+- [Official Documentation](https://continue.dev)
+- [GitHub Repository](https://github.com/continuedev/continue)
diff --git a/docs/app/deploy/coolify/page.mdx b/docs/app/deploy/coolify/page.mdx
new file mode 100644
index 0000000..4c59634
--- /dev/null
+++ b/docs/app/deploy/coolify/page.mdx
@@ -0,0 +1,171 @@
+---
+title: "Deploy Coolify Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Coolify with Docker Compose. "
+---
+
+# Deploy Coolify
+
+An open-source, self-hostable PaaS alternative to Vercel, Heroku & Netlify that lets you easily deploy static sites, databases, full-stack applications and 280+ one-click services on your own servers.
+
+
+ ⭐ 50.4k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+A fully operational Coolify instance. Think of Coolify as a self-hosted Vercel or Heroku. Once installed, it manages your other Docker containers, handles deployments from GitHub/GitLab, and provides an integrated reverse proxy.
+
+> 🚀 **Self-Hosting Level:** If you only deploy one thing, let it be Coolify. It makes deploying everything else 10x easier.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Coolify and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for Coolify
+# Note: Coolify is a self-hosted PaaS.
+version: '3.8'
+
+services:
+ coolify:
+ image: ghcr.io/coollabsio/coolify:latest
+ container_name: coolify
+ ports:
+ - "8000:8000"
+ environment:
+ - APP_ENV=production
+ - DB_CONNECTION=pgsql
+ - DB_HOST=db
+ - DB_DATABASE=coolify
+ - DB_USERNAME=coolify
+ - DB_PASSWORD=${DB_PASSWORD:-password}
+ volumes:
+ - coolify_data:/var/www/html/storage
+ - /var/run/docker.sock:/var/run/docker.sock # Essential for controlling Docker
+ depends_on:
+ db:
+ condition: service_healthy
+ networks:
+ - coolify_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:8000/api/health" ]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ restart: unless-stopped
+
+ db:
+ image: postgres:15-alpine
+ container_name: coolify-db
+ environment:
+ POSTGRES_USER: coolify
+ POSTGRES_PASSWORD: ${DB_PASSWORD:-password}
+ POSTGRES_DB: coolify
+ volumes:
+ - coolify_db_data:/var/lib/postgresql/data
+ networks:
+ - coolify_net
+ healthcheck:
+ test: [ "CMD-SHELL", "pg_isready -U coolify" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+networks:
+ coolify_net:
+ driver: bridge
+
+volumes:
+ coolify_data:
+ name: coolify_data
+ coolify_db_data:
+ name: coolify_db_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/coolify && cd /opt/coolify
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `APP_ENV` | `production` | No |
+| `DB_CONNECTION` | `pgsql` | No |
+| `DB_HOST` | `db` | No |
+| `DB_DATABASE` | `coolify` | No |
+| `DB_USERNAME` | `coolify` | No |
+| `DB_PASSWORD` | `${DB_PASSWORD:-password}` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs coolify | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Coolify on AltStack Directory](https://thealtstack.com/alternative-to/coolify)
+- [Coolify Self-Hosted Guide](https://thealtstack.com/self-hosted/coolify)
+- [Official Documentation](https://coolify.io)
+- [GitHub Repository](https://github.com/coollabsio/coolify)
diff --git a/docs/app/deploy/deepseek/page.mdx b/docs/app/deploy/deepseek/page.mdx
new file mode 100644
index 0000000..7b1176b
--- /dev/null
+++ b/docs/app/deploy/deepseek/page.mdx
@@ -0,0 +1,117 @@
+---
+title: "Deploy DeepSeek-V3 / R1 Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting DeepSeek-V3 / R1 with Docker Compose. Replaces: meta-llama-3-1, mistral, qwen-2-5."
+---
+
+# Deploy DeepSeek-V3 / R1
+
+Powerful open-source models including V3 (671B) and R1 (Reasoning). Rivals GPT-4o and o1.
+
+
+ ⭐ 110.0k stars
+ 📜 MIT License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working DeepSeek-V3 / R1 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for DeepSeek-V3 / R1 and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ ollama-deepseek:
+ image: ollama/ollama:latest
+ container_name: ollama-deepseek
+ restart: unless-stopped
+ ports:
+ - "11435:11434"
+ volumes:
+ - ollama_deepseek:/root/.ollama
+
+volumes:
+ ollama_deepseek:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/deepseek && cd /opt/deepseek
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs deepseek | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [DeepSeek-V3 / R1 on AltStack Directory](https://thealtstack.com/alternative-to/deepseek)
+- [DeepSeek-V3 / R1 Self-Hosted Guide](https://thealtstack.com/self-hosted/deepseek)
+- [Official Documentation](https://deepseek.com)
+- [GitHub Repository](https://github.com/deepseek-ai/DeepSeek-V3)
diff --git a/docs/app/deploy/documenso/page.mdx b/docs/app/deploy/documenso/page.mdx
new file mode 100644
index 0000000..57c6b96
--- /dev/null
+++ b/docs/app/deploy/documenso/page.mdx
@@ -0,0 +1,142 @@
+---
+title: "Deploy Documenso Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Documenso with Docker Compose. "
+---
+
+# Deploy Documenso
+
+The open-source DocuSign alternative. We aim to be the world's most trusted document signing platform.
+
+
+ ⭐ 8.0k stars
+ 📜 AGPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Documenso instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Documenso and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ documenso:
+ image: documenso/documenso:latest
+ container_name: documenso
+ restart: unless-stopped
+ depends_on:
+ - db
+ ports:
+ - "3000:3000"
+ environment:
+ - DATABASE_URL=postgresql://documenso:documenso@db:5432/documenso
+ - NEXTAUTH_URL=http://localhost:3000
+ - NEXTAUTH_SECRET=supersecret
+
+ db:
+ image: postgres:15-alpine
+ container_name: documenso-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=documenso
+ - POSTGRES_PASSWORD=documenso
+ - POSTGRES_DB=documenso
+ volumes:
+ - documenso_db_data:/var/lib/postgresql/data
+
+volumes:
+ documenso_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/documenso && cd /opt/documenso
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `DATABASE_URL` | `postgresql://documenso:documenso@db:5432/documenso` | No |
+| `NEXTAUTH_URL` | `http://localhost:3000` | No |
+| `NEXTAUTH_SECRET` | `supersecret` | No |
+| `POSTGRES_USER` | `documenso` | No |
+| `POSTGRES_PASSWORD` | `documenso` | No |
+| `POSTGRES_DB` | `documenso` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs documenso | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Documenso on AltStack Directory](https://thealtstack.com/alternative-to/documenso)
+- [Documenso Self-Hosted Guide](https://thealtstack.com/self-hosted/documenso)
+- [Official Documentation](https://documenso.com)
+- [GitHub Repository](https://github.com/documenso/documenso)
diff --git a/docs/app/deploy/dokku/page.mdx b/docs/app/deploy/dokku/page.mdx
new file mode 100644
index 0000000..b3bc8b8
--- /dev/null
+++ b/docs/app/deploy/dokku/page.mdx
@@ -0,0 +1,114 @@
+---
+title: "Deploy Dokku Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Dokku with Docker Compose. "
+---
+
+# Deploy Dokku
+
+A docker-powered PaaS that helps you build and manage the lifecycle of applications
+
+
+ ⭐ 31.9k stars
+ 📜 MIT License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Dokku instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Dokku and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ dokku:
+ image: dokku/dokku:latest
+ container_name: dokku
+ restart: unless-stopped
+ ports:
+ - "80:80"
+ - "443:443"
+ - "22:22"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/dokku && cd /opt/dokku
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs dokku | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Dokku on AltStack Directory](https://thealtstack.com/alternative-to/dokku)
+- [Dokku Self-Hosted Guide](https://thealtstack.com/self-hosted/dokku)
+- [Official Documentation](https://dokku.com)
+- [GitHub Repository](https://github.com/dokku/dokku)
diff --git a/docs/app/deploy/erpnext/page.mdx b/docs/app/deploy/erpnext/page.mdx
new file mode 100644
index 0000000..c836160
--- /dev/null
+++ b/docs/app/deploy/erpnext/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy ERPNext Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting ERPNext with Docker Compose. "
+---
+
+# Deploy ERPNext
+
+A free and open-source integrated Enterprise Resource Planning (ERP) software.
+
+
+ ⭐ 31.6k stars
+ 📜 GNU General Public License v3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working ERPNext instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for ERPNext and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ erpnext:
+ image: frappe/erpnext-worker:latest
+ container_name: erpnext
+ restart: unless-stopped
+ ports:
+ - "8000:8000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/erpnext && cd /opt/erpnext
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs erpnext | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [ERPNext on AltStack Directory](https://thealtstack.com/alternative-to/erpnext)
+- [ERPNext Self-Hosted Guide](https://thealtstack.com/self-hosted/erpnext)
+- [Official Documentation](https://erpnext.com)
+- [GitHub Repository](https://github.com/frappe/erpnext)
diff --git a/docs/app/deploy/flux/page.mdx b/docs/app/deploy/flux/page.mdx
new file mode 100644
index 0000000..9709497
--- /dev/null
+++ b/docs/app/deploy/flux/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy FLUX Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting FLUX with Docker Compose. "
+---
+
+# Deploy FLUX
+
+Next-gen open image generation model from Black Forest Labs. State-of-the-art quality rivaling Midjourney.
+
+
+ ⭐ 20.0k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working FLUX instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for FLUX and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ flux:
+ image: blackforestlabs/flux:latest
+ container_name: flux
+ restart: unless-stopped
+ ports:
+ - "8000:8000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/flux && cd /opt/flux
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs flux | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [FLUX on AltStack Directory](https://thealtstack.com/alternative-to/flux)
+- [FLUX Self-Hosted Guide](https://thealtstack.com/self-hosted/flux)
+- [Official Documentation](https://blackforestlabs.ai)
+- [GitHub Repository](https://github.com/black-forest-labs/flux)
diff --git a/docs/app/deploy/freecad/page.mdx b/docs/app/deploy/freecad/page.mdx
new file mode 100644
index 0000000..a7e0689
--- /dev/null
+++ b/docs/app/deploy/freecad/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy FreeCAD Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting FreeCAD with Docker Compose. "
+---
+
+# Deploy FreeCAD
+
+A general-purpose parametric 3D CAD modeler and a BIM software application.
+
+
+ ⭐ 21.0k stars
+ 📜 LGPLv2+
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working FreeCAD instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for FreeCAD and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ freecad:
+ image: lscr.io/linuxserver/freecad:latest
+ container_name: freecad
+ restart: unless-stopped
+ ports:
+ - "3000:3000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/freecad && cd /opt/freecad
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs freecad | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [FreeCAD on AltStack Directory](https://thealtstack.com/alternative-to/freecad)
+- [FreeCAD Self-Hosted Guide](https://thealtstack.com/self-hosted/freecad)
+- [Official Documentation](https://www.freecad.org)
+- [GitHub Repository](https://github.com/FreeCAD/FreeCAD)
diff --git a/docs/app/deploy/gemma/page.mdx b/docs/app/deploy/gemma/page.mdx
new file mode 100644
index 0000000..dce1fc1
--- /dev/null
+++ b/docs/app/deploy/gemma/page.mdx
@@ -0,0 +1,117 @@
+---
+title: "Deploy Google Gemma 2 Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Google Gemma 2 with Docker Compose. "
+---
+
+# Deploy Google Gemma 2
+
+Google's open-weight models (9B, 27B) with class-leading performance and efficient architecture.
+
+
+ ⭐ 20.0k stars
+ 📜 Gemma License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Google Gemma 2 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Google Gemma 2 and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ ollama-gemma:
+ image: ollama/ollama:latest
+ container_name: ollama-gemma
+ restart: unless-stopped
+ ports:
+ - "11437:11434"
+ volumes:
+ - ollama_gemma:/root/.ollama
+
+volumes:
+ ollama_gemma:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/gemma && cd /opt/gemma
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs gemma | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Google Gemma 2 on AltStack Directory](https://thealtstack.com/alternative-to/gemma)
+- [Google Gemma 2 Self-Hosted Guide](https://thealtstack.com/self-hosted/gemma)
+- [Official Documentation](https://ai.google.dev/gemma)
+- [GitHub Repository](https://github.com/google/gemma-2)
diff --git a/docs/app/deploy/gimp/page.mdx b/docs/app/deploy/gimp/page.mdx
new file mode 100644
index 0000000..248fa2c
--- /dev/null
+++ b/docs/app/deploy/gimp/page.mdx
@@ -0,0 +1,121 @@
+---
+title: "Deploy GIMP Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting GIMP with Docker Compose. "
+---
+
+# Deploy GIMP
+
+Read-only mirror of https://gitlab.gnome.org/GNOME/gimp
+
+
+ ⭐ 6.0k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working GIMP instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for GIMP and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ gimp:
+ image: linuxserver/gimp:latest
+ container_name: gimp
+ restart: unless-stopped
+ ports:
+ - "3000:3000"
+ environment:
+ - PUID=1000
+ - PGID=1000
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/gimp && cd /opt/gimp
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `PUID` | `1000` | No |
+| `PGID` | `1000` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs gimp | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [GIMP on AltStack Directory](https://thealtstack.com/alternative-to/gimp)
+- [GIMP Self-Hosted Guide](https://thealtstack.com/self-hosted/gimp)
+- [Official Documentation](https://www.gimp.org)
+- [GitHub Repository](https://github.com/GNOME/gimp)
diff --git a/docs/app/deploy/glitchtip/page.mdx b/docs/app/deploy/glitchtip/page.mdx
new file mode 100644
index 0000000..0ce9253
--- /dev/null
+++ b/docs/app/deploy/glitchtip/page.mdx
@@ -0,0 +1,150 @@
+---
+title: "Deploy GlitchTip Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting GlitchTip with Docker Compose. "
+---
+
+# Deploy GlitchTip
+
+Open source error tracking that's compatible with Sentry SDKs.
+
+
+ ⭐ 3.0k stars
+ 📜 MIT
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working GlitchTip instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for GlitchTip and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ glitchtip:
+ image: glitchtip/glitchtip:latest
+ container_name: glitchtip
+ restart: unless-stopped
+ depends_on:
+ - db
+ - redis
+ ports:
+ - "8000:8000"
+ environment:
+ - DATABASE_URL=postgres://glitchtip:glitchtip@db:5432/glitchtip
+ - REDIS_URL=redis://redis:6379
+ - SECRET_KEY=change_me_to_something_random
+ - PORT=8000
+
+ db:
+ image: postgres:14
+ container_name: glitchtip-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=glitchtip
+ - POSTGRES_PASSWORD=glitchtip
+ - POSTGRES_DB=glitchtip
+ volumes:
+ - glitchtip_db_data:/var/lib/postgresql/data
+
+ redis:
+ image: redis:alpine
+ container_name: glitchtip-redis
+ restart: unless-stopped
+
+volumes:
+ glitchtip_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/glitchtip && cd /opt/glitchtip
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `DATABASE_URL` | `postgres://glitchtip:glitchtip@db:5432/glitchtip` | No |
+| `REDIS_URL` | `redis://redis:6379` | No |
+| `SECRET_KEY` | `change_me_to_something_random` | No |
+| `PORT` | `8000` | No |
+| `POSTGRES_USER` | `glitchtip` | No |
+| `POSTGRES_PASSWORD` | `glitchtip` | No |
+| `POSTGRES_DB` | `glitchtip` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs glitchtip | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [GlitchTip on AltStack Directory](https://thealtstack.com/alternative-to/glitchtip)
+- [GlitchTip Self-Hosted Guide](https://thealtstack.com/self-hosted/glitchtip)
+- [Official Documentation](https://glitchtip.com)
+- [GitHub Repository](https://github.com/glitchtip/glitchtip)
diff --git a/docs/app/deploy/gpt4all/page.mdx b/docs/app/deploy/gpt4all/page.mdx
new file mode 100644
index 0000000..9d107bf
--- /dev/null
+++ b/docs/app/deploy/gpt4all/page.mdx
@@ -0,0 +1,132 @@
+---
+title: "Deploy GPT4All Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting GPT4All with Docker Compose. "
+---
+
+# Deploy GPT4All
+
+Run open-source LLMs locally on your CPU and GPU. No internet required.
+
+
+ ⭐ 65.0k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working GPT4All instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for GPT4All and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for GPT4All
+version: '3.8'
+
+services:
+ gpt4all:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ container_name: gpt4all-server
+ ports:
+ - "4891:4891"
+ volumes:
+ - gpt4all_models:/app/models
+ networks:
+ - gpt4all_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:4891/v1/models" ] # GPT4All local API endpoint
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ restart: unless-stopped
+
+networks:
+ gpt4all_net:
+ driver: bridge
+
+volumes:
+ gpt4all_models:
+ name: gpt4all_models
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/gpt4all && cd /opt/gpt4all
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs gpt4all | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [GPT4All on AltStack Directory](https://thealtstack.com/alternative-to/gpt4all)
+- [GPT4All Self-Hosted Guide](https://thealtstack.com/self-hosted/gpt4all)
+- [Official Documentation](https://gpt4all.io)
+- [GitHub Repository](https://github.com/nomic-ai/gpt4all)
diff --git a/docs/app/deploy/hunyuan-video/page.mdx b/docs/app/deploy/hunyuan-video/page.mdx
new file mode 100644
index 0000000..bb79ffe
--- /dev/null
+++ b/docs/app/deploy/hunyuan-video/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy HunyuanVideo 1.5 Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting HunyuanVideo 1.5 with Docker Compose. "
+---
+
+# Deploy HunyuanVideo 1.5
+
+Tencent's state-of-the-art open-source video generation model with 13B parameters.
+
+
+ ⭐ 8.0k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working HunyuanVideo 1.5 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for HunyuanVideo 1.5 and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ hunyuan:
+ image: tencent/hunyuan:latest
+ container_name: hunyuan
+ restart: unless-stopped
+ ports:
+ - "8000:8000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/hunyuan-video && cd /opt/hunyuan-video
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs hunyuan-video | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [HunyuanVideo 1.5 on AltStack Directory](https://thealtstack.com/alternative-to/hunyuan-video)
+- [HunyuanVideo 1.5 Self-Hosted Guide](https://thealtstack.com/self-hosted/hunyuan-video)
+- [Official Documentation](https://github.com/Tencent/HunyuanVideo)
+- [GitHub Repository](https://github.com/Tencent/HunyuanVideo)
diff --git a/docs/app/deploy/jitsi-meet/page.mdx b/docs/app/deploy/jitsi-meet/page.mdx
new file mode 100644
index 0000000..972d00f
--- /dev/null
+++ b/docs/app/deploy/jitsi-meet/page.mdx
@@ -0,0 +1,122 @@
+---
+title: "Deploy Jitsi Meet Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Jitsi Meet with Docker Compose. "
+---
+
+# Deploy Jitsi Meet
+
+Jitsi Meet - Secure, Simple and Scalable Video Conferences that you use as a standalone app or embed in your web application.
+
+
+ ⭐ 28.6k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Jitsi Meet instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Jitsi Meet and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ jitsi-web:
+ image: jitsi/web:latest
+ container_name: jitsi-web
+ restart: unless-stopped
+ ports:
+ - "8000:80"
+ - "8443:443"
+ environment:
+ - PUBLIC_URL=https://localhost:8443
+ - XMPP_SERVER=xmpp.meet.jitsi
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/jitsi-meet && cd /opt/jitsi-meet
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `PUBLIC_URL` | `https://localhost:8443` | No |
+| `XMPP_SERVER` | `xmpp.meet.jitsi` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs jitsi-meet | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Jitsi Meet on AltStack Directory](https://thealtstack.com/alternative-to/jitsi-meet)
+- [Jitsi Meet Self-Hosted Guide](https://thealtstack.com/self-hosted/jitsi-meet)
+- [Official Documentation](https://jitsi.org)
+- [GitHub Repository](https://github.com/jitsi/jitsi-meet)
diff --git a/docs/app/deploy/jitsu/page.mdx b/docs/app/deploy/jitsu/page.mdx
new file mode 100644
index 0000000..df54180
--- /dev/null
+++ b/docs/app/deploy/jitsu/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Jitsu Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Jitsu with Docker Compose. "
+---
+
+# Deploy Jitsu
+
+High-performance data collection platform and open-source Segment alternative.
+
+
+ ⭐ 5.0k stars
+ 📜 MIT
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Jitsu instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Jitsu and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ jitsu:
+ image: jitsu/jitsu:latest
+ container_name: jitsu
+ restart: unless-stopped
+ ports:
+ - "8000:8000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/jitsu && cd /opt/jitsu
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs jitsu | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Jitsu on AltStack Directory](https://thealtstack.com/alternative-to/jitsu)
+- [Jitsu Self-Hosted Guide](https://thealtstack.com/self-hosted/jitsu)
+- [Official Documentation](https://jitsu.com)
+- [GitHub Repository](https://github.com/jitsucom/jitsu)
diff --git a/docs/app/deploy/kdenlive/page.mdx b/docs/app/deploy/kdenlive/page.mdx
new file mode 100644
index 0000000..c49356e
--- /dev/null
+++ b/docs/app/deploy/kdenlive/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Kdenlive Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Kdenlive with Docker Compose. "
+---
+
+# Deploy Kdenlive
+
+Open source video editing software based on the MLT Framework and KDE.
+
+
+ ⭐ 3.5k stars
+ 📜 GPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Kdenlive instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Kdenlive and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ kdenlive:
+ image: lscr.io/linuxserver/kdenlive:latest
+ container_name: kdenlive
+ restart: unless-stopped
+ ports:
+ - "3000:3000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/kdenlive && cd /opt/kdenlive
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs kdenlive | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Kdenlive on AltStack Directory](https://thealtstack.com/alternative-to/kdenlive)
+- [Kdenlive Self-Hosted Guide](https://thealtstack.com/self-hosted/kdenlive)
+- [Official Documentation](https://kdenlive.org)
+- [GitHub Repository](https://github.com/KDE/kdenlive)
diff --git a/docs/app/deploy/keepassxc/page.mdx b/docs/app/deploy/keepassxc/page.mdx
new file mode 100644
index 0000000..d952dad
--- /dev/null
+++ b/docs/app/deploy/keepassxc/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy KeePassXC Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting KeePassXC with Docker Compose. "
+---
+
+# Deploy KeePassXC
+
+KeePassXC is a cross-platform community-driven port of the Windows application “KeePass Password Safe”.
+
+
+ ⭐ 25.8k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working KeePassXC instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for KeePassXC and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ keepassxc:
+ image: jlesage/keepassxc:latest
+ container_name: keepassxc
+ restart: unless-stopped
+ ports:
+ - "5800:5800"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/keepassxc && cd /opt/keepassxc
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs keepassxc | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [KeePassXC on AltStack Directory](https://thealtstack.com/alternative-to/keepassxc)
+- [KeePassXC Self-Hosted Guide](https://thealtstack.com/self-hosted/keepassxc)
+- [Official Documentation](https://keepassxc.org)
+- [GitHub Repository](https://github.com/keepassxreboot/keepassxc)
diff --git a/docs/app/deploy/keycloak/page.mdx b/docs/app/deploy/keycloak/page.mdx
new file mode 100644
index 0000000..b767e62
--- /dev/null
+++ b/docs/app/deploy/keycloak/page.mdx
@@ -0,0 +1,149 @@
+---
+title: "Deploy Keycloak Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Keycloak with Docker Compose. "
+---
+
+# Deploy Keycloak
+
+Open source identity and access management for modern applications and services.
+
+
+ ⭐ 23.0k stars
+ 📜 Apache 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Keycloak instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Keycloak and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ keycloak:
+ image: quay.io/keycloak/keycloak:latest
+ container_name: keycloak
+ restart: unless-stopped
+ command: start-dev
+ depends_on:
+ - db
+ ports:
+ - "8080:8080"
+ environment:
+ - KEYCLOAK_ADMIN=admin
+ - KEYCLOAK_ADMIN_PASSWORD=admin
+ - KC_DB=postgres
+ - KC_DB_URL=jdbc:postgresql://db:5432/keycloak
+ - KC_DB_USERNAME=keycloak
+ - KC_DB_PASSWORD=keycloak
+
+ db:
+ image: postgres:15-alpine
+ container_name: keycloak-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_DB=keycloak
+ - POSTGRES_USER=keycloak
+ - POSTGRES_PASSWORD=keycloak
+ volumes:
+ - keycloak_db_data:/var/lib/postgresql/data
+
+volumes:
+ keycloak_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/keycloak && cd /opt/keycloak
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `KEYCLOAK_ADMIN` | `admin` | No |
+| `KEYCLOAK_ADMIN_PASSWORD` | `admin` | No |
+| `KC_DB` | `postgres` | No |
+| `KC_DB_URL` | `jdbc:postgresql://db:5432/keycloak` | No |
+| `KC_DB_USERNAME` | `keycloak` | No |
+| `KC_DB_PASSWORD` | `keycloak` | No |
+| `POSTGRES_DB` | `keycloak` | No |
+| `POSTGRES_USER` | `keycloak` | No |
+| `POSTGRES_PASSWORD` | `keycloak` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs keycloak | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Keycloak on AltStack Directory](https://thealtstack.com/alternative-to/keycloak)
+- [Keycloak Self-Hosted Guide](https://thealtstack.com/self-hosted/keycloak)
+- [Official Documentation](https://www.keycloak.org)
+- [GitHub Repository](https://github.com/keycloak/keycloak)
diff --git a/docs/app/deploy/krita/page.mdx b/docs/app/deploy/krita/page.mdx
new file mode 100644
index 0000000..ee518f0
--- /dev/null
+++ b/docs/app/deploy/krita/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Krita Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Krita with Docker Compose. "
+---
+
+# Deploy Krita
+
+Krita is a free and open source cross-platform application that offers an end-to-end solution for creating digital art files from scratch built on the KDE and Qt frameworks.
+
+
+ ⭐ 9.3k stars
+ 📜 GNU General Public License v3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Krita instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Krita and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ krita:
+ image: linuxserver/krita:latest
+ container_name: krita
+ restart: unless-stopped
+ ports:
+ - "3000:3000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/krita && cd /opt/krita
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs krita | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Krita on AltStack Directory](https://thealtstack.com/alternative-to/krita)
+- [Krita Self-Hosted Guide](https://thealtstack.com/self-hosted/krita)
+- [Official Documentation](https://krita.org)
+- [GitHub Repository](https://github.com/KDE/krita)
diff --git a/docs/app/deploy/librecad/page.mdx b/docs/app/deploy/librecad/page.mdx
new file mode 100644
index 0000000..c3834f3
--- /dev/null
+++ b/docs/app/deploy/librecad/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy LibreCAD Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting LibreCAD with Docker Compose. "
+---
+
+# Deploy LibreCAD
+
+A mature, feature-rich 2D CAD application with a loyal user community.
+
+
+ ⭐ 6.5k stars
+ 📜 GPLv2
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working LibreCAD instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for LibreCAD and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ librecad:
+ image: lscr.io/linuxserver/librecad:latest
+ container_name: librecad
+ restart: unless-stopped
+ ports:
+ - "3000:3000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/librecad && cd /opt/librecad
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs librecad | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [LibreCAD on AltStack Directory](https://thealtstack.com/alternative-to/librecad)
+- [LibreCAD Self-Hosted Guide](https://thealtstack.com/self-hosted/librecad)
+- [Official Documentation](https://librecad.org)
+- [GitHub Repository](https://github.com/LibreCAD/LibreCAD)
diff --git a/docs/app/deploy/listmonk/page.mdx b/docs/app/deploy/listmonk/page.mdx
new file mode 100644
index 0000000..51b07d6
--- /dev/null
+++ b/docs/app/deploy/listmonk/page.mdx
@@ -0,0 +1,138 @@
+---
+title: "Deploy Listmonk Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Listmonk with Docker Compose. "
+---
+
+# Deploy Listmonk
+
+High performance, self-hosted newsletter and mailing list manager with a modern dashboard.
+
+
+ ⭐ 19.0k stars
+ 📜 AGPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Listmonk instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Listmonk and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ listmonk:
+ image: listmonk/listmonk:latest
+ container_name: listmonk
+ restart: unless-stopped
+ command: sh -c './listmonk --install --yes --idempotent && ./listmonk'
+ depends_on:
+ - listmonk-db
+ ports:
+ - "9000:9000"
+ volumes:
+ - ./config.toml:/listmonk/config.toml
+
+ listmonk-db:
+ image: postgres:13-alpine
+ container_name: listmonk-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=listmonk
+ - POSTGRES_PASSWORD=listmonk
+ - POSTGRES_DB=listmonk
+ volumes:
+ - listmonk_db_data:/var/lib/postgresql/data
+
+volumes:
+ listmonk_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/listmonk && cd /opt/listmonk
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `POSTGRES_USER` | `listmonk` | No |
+| `POSTGRES_PASSWORD` | `listmonk` | No |
+| `POSTGRES_DB` | `listmonk` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs listmonk | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Listmonk on AltStack Directory](https://thealtstack.com/alternative-to/listmonk)
+- [Listmonk Self-Hosted Guide](https://thealtstack.com/self-hosted/listmonk)
+- [Official Documentation](https://listmonk.app)
+- [GitHub Repository](https://github.com/knadh/listmonk)
diff --git a/docs/app/deploy/llama/page.mdx b/docs/app/deploy/llama/page.mdx
new file mode 100644
index 0000000..f4067f8
--- /dev/null
+++ b/docs/app/deploy/llama/page.mdx
@@ -0,0 +1,118 @@
+---
+title: "Deploy Meta Llama 3.1 Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Meta Llama 3.1 with Docker Compose. "
+---
+
+# Deploy Meta Llama 3.1
+
+Meta's flagship open-weight model with 128K context. Supports 8B, 70B, and 405B parameters.
+
+
+ ⭐ 65.0k stars
+ 📜 Llama 3.1 Community License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Meta Llama 3.1 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Meta Llama 3.1 and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ ollama-llama:
+ image: ollama/ollama:latest
+ container_name: ollama-llama
+ restart: unless-stopped
+ command: serve
+ ports:
+ - "11434:11434"
+ volumes:
+ - ollama:/root/.ollama
+
+volumes:
+ ollama:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/llama && cd /opt/llama
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs llama | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Meta Llama 3.1 on AltStack Directory](https://thealtstack.com/alternative-to/llama)
+- [Meta Llama 3.1 Self-Hosted Guide](https://thealtstack.com/self-hosted/llama)
+- [Official Documentation](https://llama.meta.com)
+- [GitHub Repository](https://github.com/meta-llama/llama3)
diff --git a/docs/app/deploy/matomo/page.mdx b/docs/app/deploy/matomo/page.mdx
new file mode 100644
index 0000000..98b4a8b
--- /dev/null
+++ b/docs/app/deploy/matomo/page.mdx
@@ -0,0 +1,119 @@
+---
+title: "Deploy Matomo Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Matomo with Docker Compose. "
+---
+
+# Deploy Matomo
+
+Empowering People Ethically 🚀 — Matomo is hiring! Join us → https://matomo.org/jobs Matomo is the leading open-source alternative to Google Analytics, giving you complete control and built-in privacy. Easily collect, visualise, and analyse data from websites & apps. Star us on GitHub ⭐️ – Pull Requests welcome!
+
+
+ ⭐ 21.3k stars
+ 📜 GNU General Public License v3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Matomo instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Matomo and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ matomo:
+ image: matomo:latest
+ container_name: matomo
+ restart: unless-stopped
+ ports:
+ - "8080:80"
+ environment:
+ - MATOMO_DATABASE_HOST=db
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/matomo && cd /opt/matomo
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `MATOMO_DATABASE_HOST` | `db` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs matomo | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Matomo on AltStack Directory](https://thealtstack.com/alternative-to/matomo)
+- [Matomo Self-Hosted Guide](https://thealtstack.com/self-hosted/matomo)
+- [Official Documentation](https://matomo.org)
+- [GitHub Repository](https://github.com/matomo-org/matomo)
diff --git a/docs/app/deploy/mattermost/page.mdx b/docs/app/deploy/mattermost/page.mdx
new file mode 100644
index 0000000..8b3feff
--- /dev/null
+++ b/docs/app/deploy/mattermost/page.mdx
@@ -0,0 +1,143 @@
+---
+title: "Deploy Mattermost Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Mattermost with Docker Compose. "
+---
+
+# Deploy Mattermost
+
+Mattermost is an open source platform for secure collaboration across the entire software development lifecycle..
+
+
+ ⭐ 35.2k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Mattermost instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Mattermost and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ mattermost:
+ image: mattermost/mattermost-team-edition:latest
+ container_name: mattermost
+ restart: unless-stopped
+ depends_on:
+ - db
+ ports:
+ - "8065:8065"
+ environment:
+ - MM_SQLSETTINGS_DRIVERNAME=postgres
+ - MM_SQLSETTINGS_DATASOURCE=postgres://mmuser:mmuser_password@db:5432/mattermost?sslmode=disable&connect_timeout=10
+ - MM_SERVICESETTINGS_SITEURL=http://localhost:8065
+ volumes:
+ - ./volumes/app/config:/mattermost/config
+ - ./volumes/app/data:/mattermost/data
+ - ./volumes/app/logs:/mattermost/logs
+
+ db:
+ image: postgres:13-alpine
+ container_name: mattermost-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=mmuser
+ - POSTGRES_PASSWORD=mmuser_password
+ - POSTGRES_DB=mattermost
+ volumes:
+ - ./volumes/db/var/lib/postgresql/data:/var/lib/postgresql/data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/mattermost && cd /opt/mattermost
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `MM_SQLSETTINGS_DRIVERNAME` | `postgres` | No |
+| `MM_SQLSETTINGS_DATASOURCE` | `postgres://mmuser:mmuser_password@db:5432/mattermost?sslmode=disable&connect_timeout=10` | No |
+| `MM_SERVICESETTINGS_SITEURL` | `http://localhost:8065` | No |
+| `POSTGRES_USER` | `mmuser` | No |
+| `POSTGRES_PASSWORD` | `mmuser_password` | No |
+| `POSTGRES_DB` | `mattermost` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs mattermost | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Mattermost on AltStack Directory](https://thealtstack.com/alternative-to/mattermost)
+- [Mattermost Self-Hosted Guide](https://thealtstack.com/self-hosted/mattermost)
+- [Official Documentation](https://mattermost.com)
+- [GitHub Repository](https://github.com/mattermost/mattermost)
diff --git a/docs/app/deploy/mautic/page.mdx b/docs/app/deploy/mautic/page.mdx
new file mode 100644
index 0000000..55e7bbd
--- /dev/null
+++ b/docs/app/deploy/mautic/page.mdx
@@ -0,0 +1,153 @@
+---
+title: "Deploy Mautic Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Mautic with Docker Compose. "
+---
+
+# Deploy Mautic
+
+World's largest open source marketing automation project.
+
+
+ ⭐ 7.0k stars
+ 📜 GPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Mautic instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Mautic and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ mautic:
+ image: mautic/mautic:latest
+ container_name: mautic
+ restart: unless-stopped
+ depends_on:
+ - db
+ ports:
+ - "8080:80"
+ environment:
+ - MAUTIC_DB_HOST=db
+ - MAUTIC_DB_USER=mautic
+ - MAUTIC_DB_PASSWORD=mautic
+ - MAUTIC_DB_NAME=mautic
+ - MAUTIC_RUN_CRON_JOBS=true
+ volumes:
+ - mautic_data:/var/www/html
+
+ db:
+ image: mysql:5.7
+ container_name: mautic-db
+ restart: unless-stopped
+ command: --default-authentication-plugin=mysql_native_password
+ environment:
+ - MYSQL_ROOT_PASSWORD=root
+ - MYSQL_USER=mautic
+ - MYSQL_PASSWORD=mautic
+ - MYSQL_DATABASE=mautic
+ volumes:
+ - mautic_db_data:/var/lib/mysql
+
+volumes:
+ mautic_data:
+ mautic_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/mautic && cd /opt/mautic
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `MAUTIC_DB_HOST` | `db` | No |
+| `MAUTIC_DB_USER` | `mautic` | No |
+| `MAUTIC_DB_PASSWORD` | `mautic` | No |
+| `MAUTIC_DB_NAME` | `mautic` | No |
+| `MAUTIC_RUN_CRON_JOBS` | `true` | No |
+| `plugin` | `mysql_native_password` | No |
+| `MYSQL_ROOT_PASSWORD` | `root` | No |
+| `MYSQL_USER` | `mautic` | No |
+| `MYSQL_PASSWORD` | `mautic` | No |
+| `MYSQL_DATABASE` | `mautic` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs mautic | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Mautic on AltStack Directory](https://thealtstack.com/alternative-to/mautic)
+- [Mautic Self-Hosted Guide](https://thealtstack.com/self-hosted/mautic)
+- [Official Documentation](https://www.mautic.org)
+- [GitHub Repository](https://github.com/mautic/mautic)
diff --git a/docs/app/deploy/medusa/page.mdx b/docs/app/deploy/medusa/page.mdx
new file mode 100644
index 0000000..8d3d166
--- /dev/null
+++ b/docs/app/deploy/medusa/page.mdx
@@ -0,0 +1,150 @@
+---
+title: "Deploy Medusa.js Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Medusa.js with Docker Compose. "
+---
+
+# Deploy Medusa.js
+
+The open-source alternative to Shopify. Building blocks for digital commerce.
+
+
+ ⭐ 24.0k stars
+ 📜 MIT
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Medusa.js instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Medusa.js and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ medusa:
+ image: medusajs/medusa:latest
+ container_name: medusa
+ restart: unless-stopped
+ depends_on:
+ - db
+ - redis
+ ports:
+ - "9000:9000"
+ environment:
+ - DATABASE_URL=postgres://medusa:medusa@db:5432/medusa
+ - REDIS_URL=redis://redis:6379
+ - JWT_SECRET=supersecret
+ - COOKIE_SECRET=supersecret
+
+ db:
+ image: postgres:15-alpine
+ container_name: medusa-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=medusa
+ - POSTGRES_PASSWORD=medusa
+ - POSTGRES_DB=medusa
+ volumes:
+ - medusa_db_data:/var/lib/postgresql/data
+
+ redis:
+ image: redis:alpine
+ container_name: medusa-redis
+ restart: unless-stopped
+
+volumes:
+ medusa_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/medusa && cd /opt/medusa
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `DATABASE_URL` | `postgres://medusa:medusa@db:5432/medusa` | No |
+| `REDIS_URL` | `redis://redis:6379` | No |
+| `JWT_SECRET` | `supersecret` | No |
+| `COOKIE_SECRET` | `supersecret` | No |
+| `POSTGRES_USER` | `medusa` | No |
+| `POSTGRES_PASSWORD` | `medusa` | No |
+| `POSTGRES_DB` | `medusa` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs medusa | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Medusa.js on AltStack Directory](https://thealtstack.com/alternative-to/medusa)
+- [Medusa.js Self-Hosted Guide](https://thealtstack.com/self-hosted/medusa)
+- [Official Documentation](https://medusajs.com)
+- [GitHub Repository](https://github.com/medusajs/medusa)
diff --git a/docs/app/deploy/metabase/page.mdx b/docs/app/deploy/metabase/page.mdx
new file mode 100644
index 0000000..5a3a0a7
--- /dev/null
+++ b/docs/app/deploy/metabase/page.mdx
@@ -0,0 +1,148 @@
+---
+title: "Deploy Metabase Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Metabase with Docker Compose. "
+---
+
+# Deploy Metabase
+
+The simplest, fastest way to get business intelligence and analytics throughout your company.
+
+
+ ⭐ 38.0k stars
+ 📜 AGPLv3
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Metabase instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Metabase and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ metabase:
+ image: metabase/metabase:latest
+ container_name: metabase
+ restart: unless-stopped
+ depends_on:
+ - db
+ ports:
+ - "3000:3000"
+ environment:
+ - MB_DB_TYPE=postgres
+ - MB_DB_DBNAME=metabase
+ - MB_DB_PORT=5432
+ - MB_DB_USER=metabase
+ - MB_DB_PASS=metabase
+ - MB_DB_HOST=db
+
+ db:
+ image: postgres:14-alpine
+ container_name: metabase-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=metabase
+ - POSTGRES_PASSWORD=metabase
+ - POSTGRES_DB=metabase
+ volumes:
+ - metabase_db_data:/var/lib/postgresql/data
+
+volumes:
+ metabase_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/metabase && cd /opt/metabase
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `MB_DB_TYPE` | `postgres` | No |
+| `MB_DB_DBNAME` | `metabase` | No |
+| `MB_DB_PORT` | `5432` | No |
+| `MB_DB_USER` | `metabase` | No |
+| `MB_DB_PASS` | `metabase` | No |
+| `MB_DB_HOST` | `db` | No |
+| `POSTGRES_USER` | `metabase` | No |
+| `POSTGRES_PASSWORD` | `metabase` | No |
+| `POSTGRES_DB` | `metabase` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs metabase | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Metabase on AltStack Directory](https://thealtstack.com/alternative-to/metabase)
+- [Metabase Self-Hosted Guide](https://thealtstack.com/self-hosted/metabase)
+- [Official Documentation](https://www.metabase.com)
+- [GitHub Repository](https://github.com/metabase/metabase)
diff --git a/docs/app/deploy/minio/page.mdx b/docs/app/deploy/minio/page.mdx
new file mode 100644
index 0000000..80d95c4
--- /dev/null
+++ b/docs/app/deploy/minio/page.mdx
@@ -0,0 +1,128 @@
+---
+title: "Deploy MinIO Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting MinIO with Docker Compose. "
+---
+
+# Deploy MinIO
+
+High-performance, S3-compatible object storage for AI and enterprise data.
+
+
+ ⭐ 45.0k stars
+ 📜 AGPLv3
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working MinIO instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for MinIO and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ minio:
+ image: minio/minio:latest
+ container_name: minio
+ restart: unless-stopped
+ ports:
+ - "9000:9000"
+ - "9090:9090"
+ command: server /data --console-address ":9090"
+ environment:
+ - MINIO_ROOT_USER=minioadmin
+ - MINIO_ROOT_PASSWORD=minioadmin
+ volumes:
+ - minio_data:/data
+
+volumes:
+ minio_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/minio && cd /opt/minio
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `MINIO_ROOT_USER` | `minioadmin` | No |
+| `MINIO_ROOT_PASSWORD` | `minioadmin` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs minio | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [MinIO on AltStack Directory](https://thealtstack.com/alternative-to/minio)
+- [MinIO Self-Hosted Guide](https://thealtstack.com/self-hosted/minio)
+- [Official Documentation](https://min.io)
+- [GitHub Repository](https://github.com/minio/minio)
diff --git a/docs/app/deploy/mistral/page.mdx b/docs/app/deploy/mistral/page.mdx
new file mode 100644
index 0000000..e585357
--- /dev/null
+++ b/docs/app/deploy/mistral/page.mdx
@@ -0,0 +1,117 @@
+---
+title: "Deploy Mistral Large 2 Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Mistral Large 2 with Docker Compose. "
+---
+
+# Deploy Mistral Large 2
+
+Flagship 123B model from Mistral AI. Optimized for multilingual, reasoning, and coding tasks.
+
+
+ ⭐ 20.0k stars
+ 📜 Mistral Research License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Mistral Large 2 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Mistral Large 2 and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ ollama-mistral:
+ image: ollama/ollama:latest
+ container_name: ollama-mistral
+ restart: unless-stopped
+ ports:
+ - "11436:11434"
+ volumes:
+ - ollama_mistral:/root/.ollama
+
+volumes:
+ ollama_mistral:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/mistral && cd /opt/mistral
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs mistral | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Mistral Large 2 on AltStack Directory](https://thealtstack.com/alternative-to/mistral)
+- [Mistral Large 2 Self-Hosted Guide](https://thealtstack.com/self-hosted/mistral)
+- [Official Documentation](https://mistral.ai)
+- [GitHub Repository](https://github.com/mistralai/mistral-inference)
diff --git a/docs/app/deploy/mixpost/page.mdx b/docs/app/deploy/mixpost/page.mdx
new file mode 100644
index 0000000..5f70e08
--- /dev/null
+++ b/docs/app/deploy/mixpost/page.mdx
@@ -0,0 +1,156 @@
+---
+title: "Deploy Mixpost Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Mixpost with Docker Compose. "
+---
+
+# Deploy Mixpost
+
+Self-hosted social media management software.
+
+
+ ⭐ 3.0k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Mixpost instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Mixpost and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ mixpost:
+ image: inovector/mixpost:latest
+ container_name: mixpost
+ restart: unless-stopped
+ depends_on:
+ - db
+ - redis
+ ports:
+ - "80:80"
+ environment:
+ - APP_URL=http://localhost
+ - DB_HOST=db
+ - DB_DATABASE=mixpost
+ - DB_USERNAME=mixpost
+ - DB_PASSWORD=mixpost
+ - REDIS_HOST=redis
+
+ db:
+ image: mysql:8.0
+ container_name: mixpost-db
+ restart: unless-stopped
+ environment:
+ - MYSQL_DATABASE=mixpost
+ - MYSQL_USER=mixpost
+ - MYSQL_PASSWORD=mixpost
+ - MYSQL_ROOT_PASSWORD=root
+ volumes:
+ - mixpost_db_data:/var/lib/mysql
+
+ redis:
+ image: redis:alpine
+ container_name: mixpost-redis
+ restart: unless-stopped
+
+volumes:
+ mixpost_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/mixpost && cd /opt/mixpost
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `APP_URL` | `http://localhost` | No |
+| `DB_HOST` | `db` | No |
+| `DB_DATABASE` | `mixpost` | No |
+| `DB_USERNAME` | `mixpost` | No |
+| `DB_PASSWORD` | `mixpost` | No |
+| `REDIS_HOST` | `redis` | No |
+| `MYSQL_DATABASE` | `mixpost` | No |
+| `MYSQL_USER` | `mixpost` | No |
+| `MYSQL_PASSWORD` | `mixpost` | No |
+| `MYSQL_ROOT_PASSWORD` | `root` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs mixpost | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Mixpost on AltStack Directory](https://thealtstack.com/alternative-to/mixpost)
+- [Mixpost Self-Hosted Guide](https://thealtstack.com/self-hosted/mixpost)
+- [Official Documentation](https://mixpost.app)
+- [GitHub Repository](https://github.com/inovector/mixpost)
diff --git a/docs/app/deploy/mochi-1/page.mdx b/docs/app/deploy/mochi-1/page.mdx
new file mode 100644
index 0000000..ad3f3de
--- /dev/null
+++ b/docs/app/deploy/mochi-1/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Mochi-1 Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Mochi-1 with Docker Compose. "
+---
+
+# Deploy Mochi-1
+
+High-fidelity open-weights video generation model from Genmo, rivaling closed-source alternatives.
+
+
+ ⭐ 5.0k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Mochi-1 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Mochi-1 and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ mochi-1:
+ image: genmo/mochi-1:latest
+ container_name: mochi-1
+ restart: unless-stopped
+ ports:
+ - "8000:8000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/mochi-1 && cd /opt/mochi-1
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs mochi-1 | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Mochi-1 on AltStack Directory](https://thealtstack.com/alternative-to/mochi-1)
+- [Mochi-1 Self-Hosted Guide](https://thealtstack.com/self-hosted/mochi-1)
+- [Official Documentation](https://www.genmo.ai)
+- [GitHub Repository](https://github.com/genmoai/mochi1)
diff --git a/docs/app/deploy/n8n/page.mdx b/docs/app/deploy/n8n/page.mdx
new file mode 100644
index 0000000..b06e507
--- /dev/null
+++ b/docs/app/deploy/n8n/page.mdx
@@ -0,0 +1,138 @@
+---
+title: "Deploy n8n Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting n8n with Docker Compose. "
+---
+
+# Deploy n8n
+
+Fair-code workflow automation tool. Easily automate tasks across different services.
+
+
+ ⭐ 49.0k stars
+ 📜 Sustainable Use License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working n8n instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for n8n and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ n8n:
+ image: n8nio/n8n:latest
+ container_name: n8n
+ restart: unless-stopped
+ ports:
+ - "5678:5678"
+ environment:
+ - N8N_BASIC_AUTH_ACTIVE=true
+ - N8N_BASIC_AUTH_USER=admin
+ - N8N_BASIC_AUTH_PASSWORD=password
+ - N8N_HOST=localhost
+ - N8N_PORT=5678
+ - N8N_PROTOCOL=http
+ - NODE_ENV=production
+ - WEBHOOK_URL=http://localhost:5678/
+ volumes:
+ - n8n_data:/home/node/.n8n
+
+volumes:
+ n8n_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/n8n && cd /opt/n8n
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `N8N_BASIC_AUTH_ACTIVE` | `true` | No |
+| `N8N_BASIC_AUTH_USER` | `admin` | No |
+| `N8N_BASIC_AUTH_PASSWORD` | `password` | No |
+| `N8N_HOST` | `localhost` | No |
+| `N8N_PORT` | `5678` | No |
+| `N8N_PROTOCOL` | `http` | No |
+| `NODE_ENV` | `production` | No |
+| `WEBHOOK_URL` | `http://localhost:5678/` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs n8n | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [n8n on AltStack Directory](https://thealtstack.com/alternative-to/n8n)
+- [n8n Self-Hosted Guide](https://thealtstack.com/self-hosted/n8n)
+- [Official Documentation](https://n8n.io)
+- [GitHub Repository](https://github.com/n8n-io/n8n)
diff --git a/docs/app/deploy/odoo/page.mdx b/docs/app/deploy/odoo/page.mdx
new file mode 100644
index 0000000..ce53b35
--- /dev/null
+++ b/docs/app/deploy/odoo/page.mdx
@@ -0,0 +1,161 @@
+---
+title: "Deploy Odoo Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Odoo with Docker Compose. "
+---
+
+# Deploy Odoo
+
+A suite of open source business apps: CRM, eCommerce, accounting, manufacturing, warehouse, and more.
+
+
+ ⭐ 48.9k stars
+ 📜 LGPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Odoo instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Odoo and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for Odoo
+version: '3.8'
+
+services:
+ odoo:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ container_name: odoo
+ ports:
+ - "8069:8069"
+ environment:
+ - HOST=db
+ - USER=odoo
+ - PASSWORD=odoo
+ depends_on:
+ db:
+ condition: service_healthy
+ networks:
+ - odoo_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:8069/" ]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ restart: unless-stopped
+
+ db:
+ image: postgres:15-alpine
+ container_name: odoo-db
+ environment:
+ POSTGRES_USER: odoo
+ POSTGRES_PASSWORD: odoo
+ POSTGRES_DB: postgres
+ volumes:
+ - odoo_db_data:/var/lib/postgresql/data
+ networks:
+ - odoo_net
+ healthcheck:
+ test: [ "CMD-SHELL", "pg_isready -U odoo" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+networks:
+ odoo_net:
+ driver: bridge
+
+volumes:
+ odoo_db_data:
+ name: odoo_db_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/odoo && cd /opt/odoo
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `HOST` | `db` | No |
+| `USER` | `odoo` | No |
+| `PASSWORD` | `odoo` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs odoo | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Odoo on AltStack Directory](https://thealtstack.com/alternative-to/odoo)
+- [Odoo Self-Hosted Guide](https://thealtstack.com/self-hosted/odoo)
+- [Official Documentation](https://www.odoo.com)
+- [GitHub Repository](https://github.com/odoo/odoo)
diff --git a/docs/app/deploy/ollama/page.mdx b/docs/app/deploy/ollama/page.mdx
new file mode 100644
index 0000000..6224dc3
--- /dev/null
+++ b/docs/app/deploy/ollama/page.mdx
@@ -0,0 +1,137 @@
+---
+title: "Deploy Ollama Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Ollama with Docker Compose. "
+---
+
+# Deploy Ollama
+
+Get up and running with Llama 3, Mistral, Gemma, and other large language models locally.
+
+
+ ⭐ 60.0k stars
+ 📜 MIT License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Ollama instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Ollama and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for Ollama
+version: '3.8'
+
+services:
+ ollama:
+ image: ollama/ollama:latest # Official image is highly recommended for GPU support
+ container_name: ollama
+ ports:
+ - "11434:11434"
+ volumes:
+ - ollama_data:/root/.ollama
+ # For GPU support (NVIDIA), uncomment the following:
+ # deploy:
+ # resources:
+ # reservations:
+ # devices:
+ # - driver: nvidia
+ # count: all
+ # capabilities: [gpu]
+ networks:
+ - ollama_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:11434/api/tags" ]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+networks:
+ ollama_net:
+ driver: bridge
+
+volumes:
+ ollama_data:
+ name: ollama_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/ollama && cd /opt/ollama
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs ollama | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Ollama on AltStack Directory](https://thealtstack.com/alternative-to/ollama)
+- [Ollama Self-Hosted Guide](https://thealtstack.com/self-hosted/ollama)
+- [Official Documentation](https://ollama.com)
+- [GitHub Repository](https://github.com/ollama/ollama)
diff --git a/docs/app/deploy/onlyoffice/page.mdx b/docs/app/deploy/onlyoffice/page.mdx
new file mode 100644
index 0000000..ceec9f0
--- /dev/null
+++ b/docs/app/deploy/onlyoffice/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy ONLYOFFICE Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting ONLYOFFICE with Docker Compose. "
+---
+
+# Deploy ONLYOFFICE
+
+Powerful online document editors for text, spreadsheets, and presentations. Highly compatible with MS Office.
+
+
+ ⭐ 11.0k stars
+ 📜 AGPLv3
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working ONLYOFFICE instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for ONLYOFFICE and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ onlyoffice:
+ image: onlyoffice/documentserver:latest
+ container_name: onlyoffice
+ restart: unless-stopped
+ ports:
+ - "8080:80"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/onlyoffice && cd /opt/onlyoffice
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs onlyoffice | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [ONLYOFFICE on AltStack Directory](https://thealtstack.com/alternative-to/onlyoffice)
+- [ONLYOFFICE Self-Hosted Guide](https://thealtstack.com/self-hosted/onlyoffice)
+- [Official Documentation](https://www.onlyoffice.com)
+- [GitHub Repository](https://github.com/ONLYOFFICE/DocumentServer)
diff --git a/docs/app/deploy/orangehrm/page.mdx b/docs/app/deploy/orangehrm/page.mdx
new file mode 100644
index 0000000..e9d96d4
--- /dev/null
+++ b/docs/app/deploy/orangehrm/page.mdx
@@ -0,0 +1,146 @@
+---
+title: "Deploy OrangeHRM Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting OrangeHRM with Docker Compose. "
+---
+
+# Deploy OrangeHRM
+
+The world's most popular open source human resource management software.
+
+
+ ⭐ 1.2k stars
+ 📜 GPLv2
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working OrangeHRM instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for OrangeHRM and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ orangehrm:
+ image: orangehrm/orangehrm:latest
+ container_name: orangehrm
+ restart: unless-stopped
+ depends_on:
+ - db
+ ports:
+ - "80:80"
+ environment:
+ - ORANGEHRM_DATABASE_HOST=db
+ - ORANGEHRM_DATABASE_USER=orangehrm
+ - ORANGEHRM_DATABASE_PASSWORD=orangehrm
+ - ORANGEHRM_DATABASE_NAME=orangehrm
+
+ db:
+ image: mariadb:10.6
+ container_name: orangehrm-db
+ restart: unless-stopped
+ environment:
+ - MYSQL_ROOT_PASSWORD=root
+ - MYSQL_USER=orangehrm
+ - MYSQL_PASSWORD=orangehrm
+ - MYSQL_DATABASE=orangehrm
+ volumes:
+ - orangehrm_db_data:/var/lib/mysql
+
+volumes:
+ orangehrm_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/orangehrm && cd /opt/orangehrm
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `ORANGEHRM_DATABASE_HOST` | `db` | No |
+| `ORANGEHRM_DATABASE_USER` | `orangehrm` | No |
+| `ORANGEHRM_DATABASE_PASSWORD` | `orangehrm` | No |
+| `ORANGEHRM_DATABASE_NAME` | `orangehrm` | No |
+| `MYSQL_ROOT_PASSWORD` | `root` | No |
+| `MYSQL_USER` | `orangehrm` | No |
+| `MYSQL_PASSWORD` | `orangehrm` | No |
+| `MYSQL_DATABASE` | `orangehrm` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs orangehrm | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [OrangeHRM on AltStack Directory](https://thealtstack.com/alternative-to/orangehrm)
+- [OrangeHRM Self-Hosted Guide](https://thealtstack.com/self-hosted/orangehrm)
+- [Official Documentation](https://www.orangehrm.com)
+- [GitHub Repository](https://github.com/orangehrm/orangehrm)
diff --git a/docs/app/deploy/outline/page.mdx b/docs/app/deploy/outline/page.mdx
new file mode 100644
index 0000000..b040f70
--- /dev/null
+++ b/docs/app/deploy/outline/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Outline Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Outline with Docker Compose. "
+---
+
+# Deploy Outline
+
+Fast, collaborative, knowledge base for your team built using React and Markdown.
+
+
+ ⭐ 24.0k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Outline instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Outline and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ outline:
+ image: outlinewiki/outline:latest
+ container_name: outline
+ restart: unless-stopped
+ ports:
+ - "3000:3000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/outline && cd /opt/outline
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs outline | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Outline on AltStack Directory](https://thealtstack.com/alternative-to/outline)
+- [Outline Self-Hosted Guide](https://thealtstack.com/self-hosted/outline)
+- [Official Documentation](https://www.getoutline.com)
+- [GitHub Repository](https://github.com/outline/outline)
diff --git a/docs/app/deploy/page.mdx b/docs/app/deploy/page.mdx
new file mode 100644
index 0000000..96635b5
--- /dev/null
+++ b/docs/app/deploy/page.mdx
@@ -0,0 +1,171 @@
+---
+title: "Deploy Guides"
+description: "65+ self-hosting deploy guides with Docker Compose configs. Find your tool, copy the config, ship it."
+---
+
+# Deploy Guides
+
+Every guide follows the same pattern: **copy the Docker Compose config, tweak the `.env`, run `docker compose up -d`.** Done.
+
+> 💡 **New to self-hosting?** Start with the [Quick Start](/quick-start) guide first, then come back here.
+
+---
+
+## 🤖 AI & Machine Learning
+
+Run AI models on your own hardware. No API keys, no usage limits, no data leaving your server.
+
+| Tool | What It Does |
+|---|---|
+| [Ollama](/deploy/ollama) | Run LLMs locally with a simple CLI |
+| [DeepSeek](/deploy/deepseek) | DeepSeek-V3 / R1 reasoning models |
+| [Meta Llama](/deploy/llama) | Meta's open-weight Llama 3.1 |
+| [Mistral](/deploy/mistral) | Mistral Large 2 — fast and capable |
+| [Qwen](/deploy/qwen) | Alibaba's Qwen 2.5 models |
+| [Google Gemma](/deploy/gemma) | Google's lightweight open models |
+| [GPT4All](/deploy/gpt4all) | Desktop-first local LLM runner |
+| [Continue](/deploy/continue-dev) | AI code assistant for VS Code/JetBrains |
+| [TabbyML](/deploy/tabby) | Self-hosted GitHub Copilot alternative |
+| [Stable Diffusion](/deploy/stable-diffusion) | AI image generation (SD 3.5) |
+| [FLUX](/deploy/flux) | Next-gen image generation |
+| [HunyuanVideo](/deploy/hunyuan-video) | AI video generation |
+| [Mochi-1](/deploy/mochi-1) | Text-to-video AI |
+
+---
+
+## 📊 Analytics & Monitoring
+
+Own your data. No more sending user behavior to Google.
+
+| Tool | What It Does |
+|---|---|
+| [Plausible](/deploy/plausible) | Privacy-first web analytics |
+| [PostHog](/deploy/posthog) | Product analytics + session replay |
+| [Matomo](/deploy/matomo) | Full Google Analytics replacement |
+| [Jitsu](/deploy/jitsu) | Open-source Segment alternative |
+| [Metabase](/deploy/metabase) | Business intelligence dashboards |
+| [Apache Superset](/deploy/superset) | Enterprise data visualization |
+| [GlitchTip](/deploy/glitchtip) | Error tracking (Sentry alternative) |
+| [SigNoz](/deploy/signoz) | Full-stack observability platform |
+| [Uptime Kuma](/deploy/uptime-kuma) | Beautiful uptime monitoring |
+
+---
+
+## 💬 Productivity & Collaboration
+
+Replace Slack, Notion, and Jira — on your terms.
+
+| Tool | What It Does |
+|---|---|
+| [Mattermost](/deploy/mattermost) | Slack alternative for teams |
+| [Rocket.Chat](/deploy/rocketchat) | Team chat with omnichannel support |
+| [Outline](/deploy/outline) | Beautiful team knowledge base |
+| [AFFiNE](/deploy/affine) | Notion + Miro hybrid workspace |
+| [AppFlowy](/deploy/appflowy) | Open-source Notion alternative |
+| [ONLYOFFICE](/deploy/onlyoffice) | Self-hosted Google Docs/Sheets |
+| [Plane](/deploy/plane) | Project management (Jira alternative) |
+| [Taiga](/deploy/taiga) | Agile project management |
+| [Cal.com](/deploy/calcom) | Scheduling (Calendly alternative) |
+| [Documenso](/deploy/documenso) | Digital signatures (DocuSign alternative) |
+| [Zammad](/deploy/zammad) | Helpdesk & ticketing system |
+
+---
+
+## 🏢 Business & CRM
+
+Run your business without SaaS subscriptions.
+
+| Tool | What It Does |
+|---|---|
+| [Odoo](/deploy/odoo) | Full ERP suite (CRM, accounting, HR) |
+| [ERPNext](/deploy/erpnext) | Manufacturing & distribution ERP |
+| [Twenty](/deploy/twenty) | Modern CRM (Salesforce alternative) |
+| [Akaunting](/deploy/akaunting) | Free accounting software |
+| [OrangeHRM](/deploy/orangehrm) | HR management platform |
+| [Medusa.js](/deploy/medusa) | Headless e-commerce engine |
+
+---
+
+## 🔐 Security & Authentication
+
+Control who gets in. Period.
+
+| Tool | What It Does |
+|---|---|
+| [Keycloak](/deploy/keycloak) | Enterprise identity & access management |
+| [Authentik](/deploy/authentik) | Modern SSO and user management |
+| [Vaultwarden](/deploy/vaultwarden) | Bitwarden-compatible password vault |
+| [Bitwarden](/deploy/bitwarden) | Official password manager server |
+| [KeePassXC](/deploy/keepassxc) | Offline password manager |
+
+---
+
+## ⚙️ DevOps & Infrastructure
+
+The tools that run your tools.
+
+| Tool | What It Does |
+|---|---|
+| [Coolify](/deploy/coolify) | Self-hosted Vercel/Netlify |
+| [Dokku](/deploy/dokku) | Mini Heroku on your server |
+| [n8n](/deploy/n8n) | Workflow automation (Zapier alternative) |
+| [Activepieces](/deploy/activepieces) | Visual automation builder |
+| [Coder](/deploy/coder) | Cloud development environments |
+| [MinIO](/deploy/minio) | S3-compatible object storage |
+| [PocketBase](/deploy/pocketbase) | Backend in a single binary |
+| [Appwrite](/deploy/appwrite) | Firebase alternative |
+| [Supabase](/deploy/supabase) | Postgres-powered Firebase alternative |
+
+---
+
+## 📧 Marketing & Email
+
+Send emails, run campaigns, own your audience.
+
+| Tool | What It Does |
+|---|---|
+| [Listmonk](/deploy/listmonk) | Newsletter & mailing list manager |
+| [Mautic](/deploy/mautic) | Marketing automation platform |
+| [Postal](/deploy/postal) | Mail delivery platform (Mailgun alternative) |
+| [Mixpost](/deploy/mixpost) | Social media management |
+| [Chaskiq](/deploy/chaskiq) | Customer messaging platform |
+
+---
+
+## 🎨 Creative Tools
+
+Design, edit, and create without Adobe subscriptions.
+
+| Tool | What It Does |
+|---|---|
+| [Penpot](/deploy/penpot) | Design & prototyping (Figma alternative) |
+| [GIMP](/deploy/gimp) | Image editing (Photoshop alternative) |
+| [Krita](/deploy/krita) | Digital painting & illustration |
+| [Kdenlive](/deploy/kdenlive) | Video editing |
+| [FreeCAD](/deploy/freecad) | 3D parametric modeling |
+| [LibreCAD](/deploy/librecad) | 2D CAD drafting |
+
+---
+
+## 🔌 Communication
+
+| Tool | What It Does |
+|---|---|
+| [Jitsi Meet](/deploy/jitsi-meet) | Video conferencing (Zoom alternative) |
+
+---
+
+## Prerequisites for All Guides
+
+Every guide assumes you have:
+- A server with Docker and Docker Compose installed → [Setup Guide](/quick-start/choosing-a-server)
+- Basic terminal access (SSH)
+- A domain name (optional but recommended) → [Reverse Proxy Setup](/concepts/reverse-proxies)
+
+## Essential Reading
+
+Before your first deploy, read these:
+- [Docker in 10 Minutes](/concepts/docker-basics)
+- [Reverse Proxies Explained](/concepts/reverse-proxies)
+- [SSL/TLS for Self-Hosters](/concepts/ssl-tls)
+- [Backups That Actually Work](/concepts/backups)
diff --git a/docs/app/deploy/penpot/page.mdx b/docs/app/deploy/penpot/page.mdx
new file mode 100644
index 0000000..fdfe275
--- /dev/null
+++ b/docs/app/deploy/penpot/page.mdx
@@ -0,0 +1,185 @@
+---
+title: "Deploy Penpot Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Penpot with Docker Compose. "
+---
+
+# Deploy Penpot
+
+Penpot: The open-source design tool for design and code collaboration
+
+
+ ⭐ 44.2k stars
+ 📜 Mozilla Public License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Penpot instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Penpot and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ penpot-frontend:
+ image: penpotapp/frontend:latest
+ container_name: penpot-frontend
+ restart: unless-stopped
+ depends_on:
+ - penpot-backend
+ - penpot-exporter
+ ports:
+ - "9010:80"
+ environment:
+ - PENPOT_FLAGS=disable-registration disable-login-with-password
+ volumes:
+ - penpot_assets:/opt/data/assets
+
+ penpot-backend:
+ image: penpotapp/backend:latest
+ container_name: penpot-backend
+ restart: unless-stopped
+ depends_on:
+ - penpot-postgres
+ - penpot-redis
+ environment:
+ - PENPOT_FLAGS=disable-registration disable-login-with-password
+ - PENPOT_DATABASE_URI=postgresql://penpot-postgres/penpot
+ - PENPOT_DATABASE_USERNAME=penpot
+ - PENPOT_DATABASE_PASSWORD=penpot
+ - PENPOT_REDIS_URI=redis://penpot-redis/0
+ - PENPOT_ASSETS_STORAGE_BACKEND=assets-fs
+ - PENPOT_STORAGE_ASSETS_FS_DIRECTORY=/opt/data/assets
+ - PENPOT_TELEMETRY_ENABLED=false
+ volumes:
+ - penpot_assets:/opt/data/assets
+
+ penpot-exporter:
+ image: penpotapp/exporter:latest
+ container_name: penpot-exporter
+ restart: unless-stopped
+ environment:
+ - PENPOT_DATABASE_URI=postgresql://penpot-postgres/penpot
+ - PENPOT_DATABASE_USERNAME=penpot
+ - PENPOT_DATABASE_PASSWORD=penpot
+ - PENPOT_REDIS_URI=redis://penpot-redis/0
+
+ penpot-postgres:
+ image: postgres:15
+ container_name: penpot-postgres
+ restart: unless-stopped
+ environment:
+ - POSTGRES_INITDB_ARGS=--data-checksums
+ - POSTGRES_DB=penpot
+ - POSTGRES_USER=penpot
+ - POSTGRES_PASSWORD=penpot
+ volumes:
+ - penpot_postgres_v15:/var/lib/postgresql/data
+
+ penpot-redis:
+ image: redis:7
+ container_name: penpot-redis
+ restart: unless-stopped
+
+volumes:
+ penpot_postgres_v15:
+ penpot_assets:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/penpot && cd /opt/penpot
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `PENPOT_FLAGS` | `disable-registration disable-login-with-password` | No |
+| `PENPOT_DATABASE_URI` | `postgresql://penpot-postgres/penpot` | No |
+| `PENPOT_DATABASE_USERNAME` | `penpot` | No |
+| `PENPOT_DATABASE_PASSWORD` | `penpot` | No |
+| `PENPOT_REDIS_URI` | `redis://penpot-redis/0` | No |
+| `PENPOT_ASSETS_STORAGE_BACKEND` | `assets-fs` | No |
+| `PENPOT_STORAGE_ASSETS_FS_DIRECTORY` | `/opt/data/assets` | No |
+| `PENPOT_TELEMETRY_ENABLED` | `false` | No |
+| `POSTGRES_INITDB_ARGS` | `--data-checksums` | No |
+| `POSTGRES_DB` | `penpot` | No |
+| `POSTGRES_USER` | `penpot` | No |
+| `POSTGRES_PASSWORD` | `penpot` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs penpot | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Penpot on AltStack Directory](https://thealtstack.com/alternative-to/penpot)
+- [Penpot Self-Hosted Guide](https://thealtstack.com/self-hosted/penpot)
+- [Official Documentation](https://penpot.app)
+- [GitHub Repository](https://github.com/penpot/penpot)
diff --git a/docs/app/deploy/plane/page.mdx b/docs/app/deploy/plane/page.mdx
new file mode 100644
index 0000000..7240a19
--- /dev/null
+++ b/docs/app/deploy/plane/page.mdx
@@ -0,0 +1,160 @@
+---
+title: "Deploy Plane Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Plane with Docker Compose. "
+---
+
+# Deploy Plane
+
+🔥🔥🔥 Open-source Jira, Linear, Monday, and ClickUp alternative. Plane is a modern project management platform to manage tasks, sprints, docs, and triage.
+
+
+ ⭐ 45.5k stars
+ 📜 GNU Affero General Public License v3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Plane instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Plane and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ plane-web:
+ image: makeplane/plane-frontend:latest
+ container_name: plane-frontend
+ restart: unless-stopped
+ depends_on:
+ - plane-backend
+ ports:
+ - "3000:80"
+
+ plane-backend:
+ image: makeplane/plane-backend:latest
+ container_name: plane-backend
+ restart: unless-stopped
+ depends_on:
+ - plane-db
+ - plane-redis
+ ports:
+ - "8000:8000"
+ environment:
+ - DATABASE_URL=postgres://plane:plane@plane-db:5432/plane
+ - REDIS_URL=redis://plane-redis:6379/
+ - SECRET_KEY=replace-me-with-a-secure-key
+
+ plane-db:
+ image: postgres:15-alpine
+ container_name: plane-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=plane
+ - POSTGRES_PASSWORD=plane
+ - POSTGRES_DB=plane
+ volumes:
+ - plane_db_data:/var/lib/postgresql/data
+
+ plane-redis:
+ image: redis:7-alpine
+ container_name: plane-redis
+ restart: unless-stopped
+ volumes:
+ - plane_redis_data:/data
+
+volumes:
+ plane_db_data:
+ plane_redis_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/plane && cd /opt/plane
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `DATABASE_URL` | `postgres://plane:plane@plane-db:5432/plane` | No |
+| `REDIS_URL` | `redis://plane-redis:6379/` | No |
+| `SECRET_KEY` | `replace-me-with-a-secure-key` | No |
+| `POSTGRES_USER` | `plane` | No |
+| `POSTGRES_PASSWORD` | `plane` | No |
+| `POSTGRES_DB` | `plane` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs plane | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Plane on AltStack Directory](https://thealtstack.com/alternative-to/plane)
+- [Plane Self-Hosted Guide](https://thealtstack.com/self-hosted/plane)
+- [Official Documentation](https://plane.so)
+- [GitHub Repository](https://github.com/makeplane/plane)
diff --git a/docs/app/deploy/plausible/page.mdx b/docs/app/deploy/plausible/page.mdx
new file mode 100644
index 0000000..007337b
--- /dev/null
+++ b/docs/app/deploy/plausible/page.mdx
@@ -0,0 +1,179 @@
+---
+title: "Deploy Plausible Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Plausible with Docker Compose. "
+---
+
+# Deploy Plausible
+
+Simple, open source, lightweight and privacy-friendly web analytics alternative to Google Analytics.
+
+
+ ⭐ 24.2k stars
+ 📜 GNU Affero General Public License v3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+A production-ready Plausible Analytics instance. Note that Plausible uses a two-database architecture:
+
+- **PostgreSQL:** Stores your users, sites, and metadata.
+- **ClickHouse:** A high-performance columnar database that stores the millions of raw events (pageviews) you'll be collecting.
+
+> 🌍 **Geolocation Tip:** To see where your visitors are coming from, you'll need to download the free MaxMind GeoLite2 database after deployment and place it in the `./geoip` folder.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Plausible and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ plausible:
+ image: plausible/analytics:latest
+ container_name: plausible
+ restart: unless-stopped
+ command: sh -c "sleep 10 && /entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh run"
+ depends_on:
+ - plausible_db
+ - plausible_events_db
+ - mail
+ ports:
+ - "8000:8000"
+ environment:
+ - BASE_URL=http://localhost:8000
+ - SECRET_KEY_BASE=ChangeMeChangeMeChangeMeChangeMeChangeMeChangeMeChangeMeChangeMe
+ - DATABASE_URL=postgres://postgres:postgres@plausible_db:5432/plausible_db
+ - CLICKHOUSE_DATABASE_URL=http://plausible_events_db:8123/plausible_events_db
+ - MAILER_EMAIL=admin@example.com
+ - SMTP_HOST_ADDR=mail
+ - SMTP_HOST_PORT=25
+ - SMTP_USER_NAME=
+ - SMTP_USER_PWD=
+ - SMTP_SSL_Enabled=false
+ volumes:
+ - ./geoip:/geoip:ro
+
+ plausible_db:
+ image: postgres:14-alpine
+ container_name: plausible_db
+ restart: unless-stopped
+ volumes:
+ - plausible_db_data:/var/lib/postgresql/data
+ environment:
+ - POSTGRES_PASSWORD=postgres
+ - POSTGRES_DB=plausible_db
+
+ plausible_events_db:
+ image: clickhouse/clickhouse-server:24.3.3.102-alpine
+ container_name: plausible_events_db
+ restart: unless-stopped
+ volumes:
+ - plausible_events_data:/var/lib/clickhouse
+ - ./clickhouse/clickhouse-config.xml:/etc/clickhouse-server/config.d/logging.xml:ro
+ - ./clickhouse/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/logging.xml:ro
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+
+ mail:
+ image: bytemark/smtp
+ container_name: plausible_mail
+ restart: unless-stopped
+
+volumes:
+ plausible_db_data:
+ plausible_events_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/plausible && cd /opt/plausible
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `BASE_URL` | `http://localhost:8000` | No |
+| `SECRET_KEY_BASE` | `ChangeMeChangeMeChangeMeChangeMeChangeMeChangeMeChangeMeChangeMe` | No |
+| `DATABASE_URL` | `postgres://postgres:postgres@plausible_db:5432/plausible_db` | No |
+| `CLICKHOUSE_DATABASE_URL` | `http://plausible_events_db:8123/plausible_events_db` | No |
+| `MAILER_EMAIL` | `admin@example.com` | No |
+| `SMTP_HOST_ADDR` | `mail` | No |
+| `SMTP_HOST_PORT` | `25` | No |
+| `SMTP_SSL_Enabled` | `false` | No |
+| `POSTGRES_PASSWORD` | `postgres` | No |
+| `POSTGRES_DB` | `plausible_db` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs plausible | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Plausible on AltStack Directory](https://thealtstack.com/alternative-to/plausible)
+- [Plausible Self-Hosted Guide](https://thealtstack.com/self-hosted/plausible)
+- [Official Documentation](https://plausible.io)
+- [GitHub Repository](https://github.com/plausible/analytics)
diff --git a/docs/app/deploy/pocketbase/page.mdx b/docs/app/deploy/pocketbase/page.mdx
new file mode 100644
index 0000000..fe276df
--- /dev/null
+++ b/docs/app/deploy/pocketbase/page.mdx
@@ -0,0 +1,118 @@
+---
+title: "Deploy PocketBase Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting PocketBase with Docker Compose. "
+---
+
+# Deploy PocketBase
+
+Open Source realtime backend in 1 file
+
+
+ ⭐ 56.0k stars
+ 📜 MIT License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working PocketBase instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for PocketBase and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ pocketbase:
+ image: pocketbase/pocketbase:latest
+ container_name: pocketbase
+ restart: unless-stopped
+ command: serve --http=0.0.0.0:8090
+ ports:
+ - "8090:8090"
+ volumes:
+ - pb_data:/pb/pb_data
+
+volumes:
+ pb_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/pocketbase && cd /opt/pocketbase
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs pocketbase | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [PocketBase on AltStack Directory](https://thealtstack.com/alternative-to/pocketbase)
+- [PocketBase Self-Hosted Guide](https://thealtstack.com/self-hosted/pocketbase)
+- [Official Documentation](https://pocketbase.io)
+- [GitHub Repository](https://github.com/pocketbase/pocketbase)
diff --git a/docs/app/deploy/postal/page.mdx b/docs/app/deploy/postal/page.mdx
new file mode 100644
index 0000000..bb471d0
--- /dev/null
+++ b/docs/app/deploy/postal/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Postal Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Postal with Docker Compose. "
+---
+
+# Deploy Postal
+
+A fully featured open source mail delivery platform for incoming & outgoing e-mail.
+
+
+ ⭐ 15.0k stars
+ 📜 MIT
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Postal instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Postal and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ postal:
+ image: postalserver/postal:latest
+ container_name: postal
+ restart: unless-stopped
+ ports:
+ - "5000:5000"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/postal && cd /opt/postal
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs postal | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Postal on AltStack Directory](https://thealtstack.com/alternative-to/postal)
+- [Postal Self-Hosted Guide](https://thealtstack.com/self-hosted/postal)
+- [Official Documentation](https://postalserver.io)
+- [GitHub Repository](https://github.com/postalserver/postal)
diff --git a/docs/app/deploy/posthog/page.mdx b/docs/app/deploy/posthog/page.mdx
new file mode 100644
index 0000000..8c8d6e4
--- /dev/null
+++ b/docs/app/deploy/posthog/page.mdx
@@ -0,0 +1,199 @@
+---
+title: "Deploy PostHog Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting PostHog with Docker Compose. "
+---
+
+# Deploy PostHog
+
+🦔 PostHog is an all-in-one developer platform for building successful products. We offer product analytics, web analytics, session replay, error tracking, feature flags, experimentation, surveys, data warehouse, a CDP, and an AI product assistant to help debug your code, ship features faster, and keep all your usage and customer data in one stack.
+
+
+ ⭐ 31.2k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working PostHog instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for PostHog and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ db:
+ image: postgres:14-alpine
+ container_name: posthog-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_PASSWORD=posthog
+ - POSTGRES_DB=posthog
+ - POSTGRES_USER=posthog
+ volumes:
+ - posthog_postgres_data:/var/lib/postgresql/data
+
+ redis:
+ image: redis:6-alpine
+ container_name: posthog-redis
+ restart: unless-stopped
+ volumes:
+ - posthog_redis_data:/data
+
+ clickhouse:
+ image: clickhouse/clickhouse-server:22.3-alpine
+ container_name: posthog-clickhouse
+ restart: unless-stopped
+ environment:
+ - CLICKHOUSE_DB=posthog
+ - CLICKHOUSE_USER=default
+ - CLICKHOUSE_PASSWORD=
+ volumes:
+ - posthog_clickhouse_data:/var/lib/clickhouse
+
+ kafka:
+ image: confluentinc/cp-kafka:7.5.3
+ container_name: posthog-kafka
+ restart: unless-stopped
+ depends_on:
+ - zookeeper
+ environment:
+ - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
+ - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092
+ - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
+
+ zookeeper:
+ image: confluentinc/cp-zookeeper:7.5.3
+ container_name: posthog-zookeeper
+ restart: unless-stopped
+ environment:
+ - ZOOKEEPER_CLIENT_PORT=2181
+ - ZOOKEEPER_TICK_TIME=2000
+
+ posthog:
+ image: posthog/posthog:release-1.40.0
+ container_name: posthog
+ restart: unless-stopped
+ depends_on:
+ - db
+ - redis
+ - clickhouse
+ - kafka
+ ports:
+ - "8000:8000"
+ environment:
+ - DATABASE_URL=postgres://posthog:posthog@db:5432/posthog
+ - REDIS_URL=redis://redis:6379/
+ - CLICKHOUSE_HOST=clickhouse
+ - KAFKA_HOSTS=kafka:9092
+ - SECRET_KEY=please-change-this-secret-key-in-production-12345
+ - SKIP_SERVICE_VERSION_REQUIREMENTS=1
+ volumes:
+ - ./uploads:/app/static/uploads
+
+volumes:
+ posthog_postgres_data:
+ posthog_redis_data:
+ posthog_clickhouse_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/posthog && cd /opt/posthog
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `POSTGRES_PASSWORD` | `posthog` | No |
+| `POSTGRES_DB` | `posthog` | No |
+| `POSTGRES_USER` | `posthog` | No |
+| `CLICKHOUSE_DB` | `posthog` | No |
+| `CLICKHOUSE_USER` | `default` | No |
+| `KAFKA_ZOOKEEPER_CONNECT` | `zookeeper:2181` | No |
+| `KAFKA_ADVERTISED_LISTENERS` | `PLAINTEXT://kafka:9092` | No |
+| `KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR` | `1` | No |
+| `ZOOKEEPER_CLIENT_PORT` | `2181` | No |
+| `ZOOKEEPER_TICK_TIME` | `2000` | No |
+| `DATABASE_URL` | `postgres://posthog:posthog@db:5432/posthog` | No |
+| `REDIS_URL` | `redis://redis:6379/` | No |
+| `CLICKHOUSE_HOST` | `clickhouse` | No |
+| `KAFKA_HOSTS` | `kafka:9092` | No |
+| `SECRET_KEY` | `please-change-this-secret-key-in-production-12345` | No |
+| `SKIP_SERVICE_VERSION_REQUIREMENTS` | `1` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs posthog | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [PostHog on AltStack Directory](https://thealtstack.com/alternative-to/posthog)
+- [PostHog Self-Hosted Guide](https://thealtstack.com/self-hosted/posthog)
+- [Official Documentation](https://posthog.com)
+- [GitHub Repository](https://github.com/PostHog/posthog)
diff --git a/docs/app/deploy/qwen/page.mdx b/docs/app/deploy/qwen/page.mdx
new file mode 100644
index 0000000..99da2e3
--- /dev/null
+++ b/docs/app/deploy/qwen/page.mdx
@@ -0,0 +1,117 @@
+---
+title: "Deploy Qwen 2.5 Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Qwen 2.5 with Docker Compose. "
+---
+
+# Deploy Qwen 2.5
+
+Comprehensive LLM series from Alibaba Cloud, excelling in coding, math, and multilingual support.
+
+
+ ⭐ 50.0k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Qwen 2.5 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Qwen 2.5 and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ ollama-qwen:
+ image: ollama/ollama:latest
+ container_name: ollama-qwen
+ restart: unless-stopped
+ ports:
+ - "11438:11434"
+ volumes:
+ - ollama_qwen:/root/.ollama
+
+volumes:
+ ollama_qwen:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/qwen && cd /opt/qwen
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs qwen | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Qwen 2.5 on AltStack Directory](https://thealtstack.com/alternative-to/qwen)
+- [Qwen 2.5 Self-Hosted Guide](https://thealtstack.com/self-hosted/qwen)
+- [Official Documentation](https://qwenlm.github.io)
+- [GitHub Repository](https://github.com/QwenLM/Qwen2.5)
diff --git a/docs/app/deploy/rocketchat/page.mdx b/docs/app/deploy/rocketchat/page.mdx
new file mode 100644
index 0000000..91f01c6
--- /dev/null
+++ b/docs/app/deploy/rocketchat/page.mdx
@@ -0,0 +1,144 @@
+---
+title: "Deploy Rocket.Chat Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Rocket.Chat with Docker Compose. "
+---
+
+# Deploy Rocket.Chat
+
+The Secure CommsOS™ for mission-critical operations
+
+
+ ⭐ 44.5k stars
+ 📜 Other
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Rocket.Chat instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Rocket.Chat and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ rocketchat:
+ image: registry.rocket.chat/rocketchat/rocket.chat:latest
+ container_name: rocketchat
+ restart: unless-stopped
+ depends_on:
+ - mongo
+ ports:
+ - "3002:3000"
+ environment:
+ - MONGO_URL=mongodb://mongo:27017/rocketchat
+ - ROOT_URL=http://localhost:3002
+ - PORT=3000
+
+ mongo:
+ image: mongo:5.0
+ container_name: rocketchat-mongo
+ restart: unless-stopped
+ command: mongod --oplogSize 128 --replSet rs0 --storageEngine=wiredTiger
+ volumes:
+ - ./data/db:/data/db
+
+ mongo-init-replica:
+ image: mongo:5.0
+ container_name: mongo-init-replica
+ restart: unless-stopped
+ command: bash /init-replica.sh
+ depends_on:
+ - mongo
+ volumes:
+ - ./init-replica.sh:/init-replica.sh
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/rocketchat && cd /opt/rocketchat
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `MONGO_URL` | `mongodb://mongo:27017/rocketchat` | No |
+| `ROOT_URL` | `http://localhost:3002` | No |
+| `PORT` | `3000` | No |
+| `storageEngine` | `wiredTiger` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs rocketchat | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Rocket.Chat on AltStack Directory](https://thealtstack.com/alternative-to/rocketchat)
+- [Rocket.Chat Self-Hosted Guide](https://thealtstack.com/self-hosted/rocketchat)
+- [Official Documentation](https://rocket.chat)
+- [GitHub Repository](https://github.com/RocketChat/Rocket.Chat)
diff --git a/docs/app/deploy/signoz/page.mdx b/docs/app/deploy/signoz/page.mdx
new file mode 100644
index 0000000..43bfe7b
--- /dev/null
+++ b/docs/app/deploy/signoz/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy SigNoz Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting SigNoz with Docker Compose. "
+---
+
+# Deploy SigNoz
+
+Open source observability platform. SigNoz helps developers monitor applications and troubleshoot problems.
+
+
+ ⭐ 18.0k stars
+ 📜 MIT
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working SigNoz instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for SigNoz and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ signoz-frontend:
+ image: signoz/frontend:latest
+ container_name: signoz-frontend
+ restart: unless-stopped
+ ports:
+ - "3301:3301"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/signoz && cd /opt/signoz
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs signoz | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [SigNoz on AltStack Directory](https://thealtstack.com/alternative-to/signoz)
+- [SigNoz Self-Hosted Guide](https://thealtstack.com/self-hosted/signoz)
+- [Official Documentation](https://signoz.io)
+- [GitHub Repository](https://github.com/signoz/signoz)
diff --git a/docs/app/deploy/stable-diffusion/page.mdx b/docs/app/deploy/stable-diffusion/page.mdx
new file mode 100644
index 0000000..2173147
--- /dev/null
+++ b/docs/app/deploy/stable-diffusion/page.mdx
@@ -0,0 +1,112 @@
+---
+title: "Deploy Stable Diffusion 3.5 Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Stable Diffusion 3.5 with Docker Compose. "
+---
+
+# Deploy Stable Diffusion 3.5
+
+The latest open-weights image generation model from Stability AI, offering superior prompt adherence.
+
+
+ ⭐ 10.0k stars
+ 📜 Stability Community License
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Stable Diffusion 3.5 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Stable Diffusion 3.5 and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ sd-webui:
+ image: automatic1111/stable-diffusion-webui:latest
+ container_name: sd-webui
+ restart: unless-stopped
+ ports:
+ - "7860:7860"
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/stable-diffusion && cd /opt/stable-diffusion
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs stable-diffusion | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Stable Diffusion 3.5 on AltStack Directory](https://thealtstack.com/alternative-to/stable-diffusion)
+- [Stable Diffusion 3.5 Self-Hosted Guide](https://thealtstack.com/self-hosted/stable-diffusion)
+- [Official Documentation](https://stability.ai)
+- [GitHub Repository](https://github.com/Stability-AI/sd3.5)
diff --git a/docs/app/deploy/supabase/page.mdx b/docs/app/deploy/supabase/page.mdx
new file mode 100644
index 0000000..6d209c2
--- /dev/null
+++ b/docs/app/deploy/supabase/page.mdx
@@ -0,0 +1,208 @@
+---
+title: "Deploy Supabase Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Supabase with Docker Compose. "
+---
+
+# Deploy Supabase
+
+The Postgres development platform. Supabase gives you a dedicated Postgres database to build your web, mobile, and AI applications.
+
+
+ ⭐ 97.4k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+A fully working Supabase instance running on your server. This isn't just a database; it's a full backend-as-a-service including:
+
+- **PostgreSQL:** The world's most advanced relational database.
+- **GoTrue:** User management and JWT-based authentication.
+- **PostgREST:** Turns your database into a RESTful API automatically.
+- **Realtime:** Listen to database changes via WebSockets.
+- **Storage:** S3-compatible file storage.
+
+> ⚠️ **Critical Security Note:** The default configuration uses "postgres" as the password and a temporary JWT secret. You MUST change these in your `.env` file before exposing this to the internet.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Supabase and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Supabase Production-Ready Docker Compose
+# Note: Supabase is a collection of services. Official images are the standard.
+# This setup includes the core services: PostgREST, GoTrue, Realtime, Storage, and PostgreSQL.
+
+version: '3.8'
+
+services:
+ db:
+ container_name: supabase-db
+ image: supabase/postgres:15.1.1.78
+ command: postgres -c config_file=/etc/postgresql/postgresql.conf -c log_min_messages=fatal
+ healthcheck:
+ test: ["CMD", "pg_isready", "-U", "postgres"]
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ environment:
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
+ PGDATA: /var/lib/postgresql/data/pgdata
+ volumes:
+ - supabase_db_data:/var/lib/postgresql/data
+ networks:
+ - supabase_net
+
+ auth:
+ container_name: supabase-auth
+ image: supabase/gotrue:v2.143.0
+ depends_on:
+ db:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:9999/health"]
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ environment:
+ GOTRUE_DB_DRIVER: postgres
+ GOTRUE_DB_DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres?sslmode=disable
+ GOTRUE_SITE_URL: ${SITE_URL:-http://localhost:3000}
+ GOTRUE_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-don-not-use-in-prod}
+ networks:
+ - supabase_net
+
+ rest:
+ container_name: supabase-rest
+ image: postgrest/postgrest:v11.2.2
+ depends_on:
+ db:
+ condition: service_healthy
+ environment:
+ PGRST_DB_URI: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres
+ PGRST_DB_SCHEMA: public
+ PGRST_DB_ANON_ROLE: anon
+ networks:
+ - supabase_net
+
+ realtime:
+ container_name: supabase-realtime
+ image: supabase/realtime:v2.25.56
+ depends_on:
+ db:
+ condition: service_healthy
+ environment:
+ DB_HOST: db
+ DB_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
+ JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-don-not-use-in-prod}
+ networks:
+ - supabase_net
+
+ storage:
+ container_name: supabase-storage
+ image: supabase/storage-api:v0.43.12
+ depends_on:
+ db:
+ condition: service_healthy
+ environment:
+ ANON_KEY: ${ANON_KEY}
+ SERVICE_KEY: ${SERVICE_KEY}
+ PGRST_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-don-not-use-in-prod}
+ DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres
+ networks:
+ - supabase_net
+
+networks:
+ supabase_net:
+ driver: bridge
+
+volumes:
+ supabase_db_data:
+ name: supabase_db_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/supabase && cd /opt/supabase
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `POSTGRES_PASSWORD` | `postgres` | No |
+| `SITE_URL` | `http://localhost:3000` | No |
+| `JWT_SECRET` | `super-secret-jwt-token-don-not-use-in-prod` | No |
+| `ANON_KEY` | `—` | ✅ Yes |
+| `SERVICE_KEY` | `—` | ✅ Yes |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs supabase | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Supabase on AltStack Directory](https://thealtstack.com/alternative-to/supabase)
+- [Supabase Self-Hosted Guide](https://thealtstack.com/self-hosted/supabase)
+- [Official Documentation](https://supabase.com)
+- [GitHub Repository](https://github.com/supabase/supabase)
diff --git a/docs/app/deploy/superset/page.mdx b/docs/app/deploy/superset/page.mdx
new file mode 100644
index 0000000..c678499
--- /dev/null
+++ b/docs/app/deploy/superset/page.mdx
@@ -0,0 +1,171 @@
+---
+title: "Deploy Apache Superset Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Apache Superset with Docker Compose. "
+---
+
+# Deploy Apache Superset
+
+Enterprise-ready business intelligence web application.
+
+
+ ⭐ 59.0k stars
+ 📜 Apache 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Apache Superset instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Apache Superset and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for Apache Superset
+version: '3.8'
+
+services:
+ superset:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ container_name: superset
+ ports:
+ - "8088:8088"
+ environment:
+ - DATABASE_URL=postgresql://superset:superset@db:5432/superset
+ - REDIS_URL=redis://redis:6379
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ networks:
+ - superset_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:8088/health" ]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+
+ db:
+ image: postgres:15-alpine
+ container_name: superset-db
+ environment:
+ POSTGRES_USER: superset
+ POSTGRES_PASSWORD: superset
+ POSTGRES_DB: superset
+ volumes:
+ - superset_db_data:/var/lib/postgresql/data
+ networks:
+ - superset_net
+ healthcheck:
+ test: [ "CMD-SHELL", "pg_isready -U superset" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+ redis:
+ image: redis:7-alpine
+ container_name: superset-redis
+ networks:
+ - superset_net
+ healthcheck:
+ test: [ "CMD", "redis-cli", "ping" ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+networks:
+ superset_net:
+ driver: bridge
+
+volumes:
+ superset_db_data:
+ name: superset_db_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/superset && cd /opt/superset
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `DATABASE_URL` | `postgresql://superset:superset@db:5432/superset` | No |
+| `REDIS_URL` | `redis://redis:6379` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs superset | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Apache Superset on AltStack Directory](https://thealtstack.com/alternative-to/superset)
+- [Apache Superset Self-Hosted Guide](https://thealtstack.com/self-hosted/superset)
+- [Official Documentation](https://superset.apache.org)
+- [GitHub Repository](https://github.com/apache/superset)
diff --git a/docs/app/deploy/tabby/page.mdx b/docs/app/deploy/tabby/page.mdx
new file mode 100644
index 0000000..596cc51
--- /dev/null
+++ b/docs/app/deploy/tabby/page.mdx
@@ -0,0 +1,117 @@
+---
+title: "Deploy TabbyML Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting TabbyML with Docker Compose. "
+---
+
+# Deploy TabbyML
+
+Self-hosted AI coding assistant. An open-source, self-hosted alternative to GitHub Copilot.
+
+
+ ⭐ 25.0k stars
+ 📜 Apache License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working TabbyML instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for TabbyML and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ tabby:
+ image: tabbyml/tabby:latest
+ container_name: tabby
+ restart: unless-stopped
+ ports:
+ - "8080:8080"
+ volumes:
+ - tabby-data:/data
+
+volumes:
+ tabby-data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/tabby && cd /opt/tabby
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs tabby | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [TabbyML on AltStack Directory](https://thealtstack.com/alternative-to/tabby)
+- [TabbyML Self-Hosted Guide](https://thealtstack.com/self-hosted/tabby)
+- [Official Documentation](https://tabby.tabbyml.com)
+- [GitHub Repository](https://github.com/TabbyML/tabby)
diff --git a/docs/app/deploy/taiga/page.mdx b/docs/app/deploy/taiga/page.mdx
new file mode 100644
index 0000000..38e41af
--- /dev/null
+++ b/docs/app/deploy/taiga/page.mdx
@@ -0,0 +1,172 @@
+---
+title: "Deploy Taiga Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Taiga with Docker Compose. "
+---
+
+# Deploy Taiga
+
+Self-host Taiga on your own server.
+
+
+ ⭐ 0.8k stars
+ 📜 Mozilla Public License 2.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Taiga instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Taiga and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ taiga-front:
+ image: taigaio/taiga-front:latest
+ container_name: taiga-front
+ restart: unless-stopped
+ depends_on:
+ - taiga-back
+ ports:
+ - "9000:80"
+ environment:
+ - TAIGA_URL=http://localhost:9000
+ - TAIGA_WEBSOCKETS_URL=ws://localhost:9000
+
+ taiga-back:
+ image: taigaio/taiga-back:latest
+ container_name: taiga-back
+ restart: unless-stopped
+ depends_on:
+ - taiga-db
+ - taiga-redis
+ - taiga-async-rabbitmq
+ environment:
+ - POSTGRES_DB=taiga
+ - POSTGRES_USER=taiga
+ - POSTGRES_PASSWORD=taiga
+ - TAIGA_SECRET_KEY=exe3quu8Su2wohx0uNgo0eif4wohphah
+
+ taiga-db:
+ image: postgres:13-alpine
+ container_name: taiga-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_DB=taiga
+ - POSTGRES_USER=taiga
+ - POSTGRES_PASSWORD=taiga
+ volumes:
+ - taiga_db_data:/var/lib/postgresql/data
+
+ taiga-async-rabbitmq:
+ image: rabbitmq:3.8-management-alpine
+ container_name: taiga-rabbitmq
+ restart: unless-stopped
+ environment:
+ - RABBITMQ_ERLANG_COOKIE=secret-cookie
+ - RABBITMQ_DEFAULT_USER=taiga
+ - RABBITMQ_DEFAULT_PASS=taiga
+
+ taiga-redis:
+ image: redis:6-alpine
+ container_name: taiga-redis
+ restart: unless-stopped
+
+volumes:
+ taiga_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/taiga && cd /opt/taiga
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `TAIGA_URL` | `http://localhost:9000` | No |
+| `TAIGA_WEBSOCKETS_URL` | `ws://localhost:9000` | No |
+| `POSTGRES_DB` | `taiga` | No |
+| `POSTGRES_USER` | `taiga` | No |
+| `POSTGRES_PASSWORD` | `taiga` | No |
+| `TAIGA_SECRET_KEY` | `exe3quu8Su2wohx0uNgo0eif4wohphah` | No |
+| `RABBITMQ_ERLANG_COOKIE` | `secret-cookie` | No |
+| `RABBITMQ_DEFAULT_USER` | `taiga` | No |
+| `RABBITMQ_DEFAULT_PASS` | `taiga` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs taiga | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Taiga on AltStack Directory](https://thealtstack.com/alternative-to/taiga)
+- [Taiga Self-Hosted Guide](https://thealtstack.com/self-hosted/taiga)
+- [Official Documentation](https://taiga.io)
+- [GitHub Repository](https://github.com/taigaio/taiga-back)
diff --git a/docs/app/deploy/twenty/page.mdx b/docs/app/deploy/twenty/page.mdx
new file mode 100644
index 0000000..510ced1
--- /dev/null
+++ b/docs/app/deploy/twenty/page.mdx
@@ -0,0 +1,140 @@
+---
+title: "Deploy Twenty Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Twenty with Docker Compose. "
+---
+
+# Deploy Twenty
+
+A modern open-source CRM alternative to Salesforce and Pipedrive.
+
+
+ ⭐ 15.0k stars
+ 📜 AGPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Twenty instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Twenty and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ twenty:
+ image: twentyhq/twenty:latest
+ container_name: twenty
+ restart: unless-stopped
+ depends_on:
+ - db
+ ports:
+ - "3000:3000"
+ environment:
+ - PG_DATABASE_URL=postgres://twenty:twenty@db:5432/twenty
+ - FRONTEND_URL=http://localhost:3000
+
+ db:
+ image: postgres:15-alpine
+ container_name: twenty-db
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=twenty
+ - POSTGRES_PASSWORD=twenty
+ - POSTGRES_DB=twenty
+ volumes:
+ - twenty_db_data:/var/lib/postgresql/data
+
+volumes:
+ twenty_db_data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/twenty && cd /opt/twenty
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `PG_DATABASE_URL` | `postgres://twenty:twenty@db:5432/twenty` | No |
+| `FRONTEND_URL` | `http://localhost:3000` | No |
+| `POSTGRES_USER` | `twenty` | No |
+| `POSTGRES_PASSWORD` | `twenty` | No |
+| `POSTGRES_DB` | `twenty` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs twenty | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Twenty on AltStack Directory](https://thealtstack.com/alternative-to/twenty)
+- [Twenty Self-Hosted Guide](https://thealtstack.com/self-hosted/twenty)
+- [Official Documentation](https://twenty.com)
+- [GitHub Repository](https://github.com/twentyhq/twenty)
diff --git a/docs/app/deploy/uptime-kuma/page.mdx b/docs/app/deploy/uptime-kuma/page.mdx
new file mode 100644
index 0000000..5dd6061
--- /dev/null
+++ b/docs/app/deploy/uptime-kuma/page.mdx
@@ -0,0 +1,130 @@
+---
+title: "Deploy Uptime Kuma Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Uptime Kuma with Docker Compose. "
+---
+
+# Deploy Uptime Kuma
+
+A fancy self-hosted monitoring tool.
+
+
+ ⭐ 55.0k stars
+ 📜 MIT
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Uptime Kuma instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Uptime Kuma and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+# Docker Compose for Uptime Kuma
+version: '3.8'
+
+services:
+ uptime-kuma:
+ image: louislam/uptime-kuma:1 # Official image is standard
+ container_name: uptime-kuma
+ ports:
+ - "3001:3001"
+ volumes:
+ - uptime_kuma_data:/app/data
+ networks:
+ - uptime_net
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:3001/" ]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ restart: unless-stopped
+
+networks:
+ uptime_net:
+ driver: bridge
+
+volumes:
+ uptime_kuma_data:
+ name: uptime_kuma_data
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/uptime-kuma && cd /opt/uptime-kuma
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs uptime-kuma | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Uptime Kuma on AltStack Directory](https://thealtstack.com/alternative-to/uptime-kuma)
+- [Uptime Kuma Self-Hosted Guide](https://thealtstack.com/self-hosted/uptime-kuma)
+- [Official Documentation](https://uptime.kuma.pet)
+- [GitHub Repository](https://github.com/louislam/uptime-kuma)
diff --git a/docs/app/deploy/vaultwarden/page.mdx b/docs/app/deploy/vaultwarden/page.mdx
new file mode 100644
index 0000000..8dce328
--- /dev/null
+++ b/docs/app/deploy/vaultwarden/page.mdx
@@ -0,0 +1,126 @@
+---
+title: "Deploy Vaultwarden Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Vaultwarden with Docker Compose. "
+---
+
+# Deploy Vaultwarden
+
+Unofficial Bitwarden compatible server written in Rust, formerly known as bitwarden_rs.
+
+
+ ⭐ 32.0k stars
+ 📜 AGPL-3.0
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Vaultwarden instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Vaultwarden and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ vaultwarden:
+ image: vaultwarden/server:latest
+ container_name: vaultwarden
+ restart: unless-stopped
+ ports:
+ - "8080:80"
+ volumes:
+ - vw-data:/data
+ environment:
+ - WEBSOCKET_ENABLED=true
+ - SIGNUPS_ALLOWED=true
+
+volumes:
+ vw-data:
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/vaultwarden && cd /opt/vaultwarden
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `WEBSOCKET_ENABLED` | `true` | No |
+| `SIGNUPS_ALLOWED` | `true` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs vaultwarden | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Vaultwarden on AltStack Directory](https://thealtstack.com/alternative-to/vaultwarden)
+- [Vaultwarden Self-Hosted Guide](https://thealtstack.com/self-hosted/vaultwarden)
+- [Official Documentation](https://github.com/dani-garcia/vaultwarden)
+- [GitHub Repository](https://github.com/dani-garcia/vaultwarden)
diff --git a/docs/app/deploy/zammad/page.mdx b/docs/app/deploy/zammad/page.mdx
new file mode 100644
index 0000000..a09735b
--- /dev/null
+++ b/docs/app/deploy/zammad/page.mdx
@@ -0,0 +1,142 @@
+---
+title: "Deploy Zammad Self-Hosted (Docker)"
+description: "Step-by-step guide to self-hosting Zammad with Docker Compose. "
+---
+
+# Deploy Zammad
+
+A web-based, open source helpdesk/customer support system with many features.
+
+
+ ⭐ 5.0k stars
+ 📜 AGPLv3
+ 🔴 Advanced
+ ⏱ ~20 minutes
+
+
+
+
+
+
+## What You'll Get
+
+A fully working Zammad instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices.
+
+## Prerequisites
+
+- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server))
+- A domain name pointed to your server (optional but recommended)
+- Basic terminal access (SSH)
+
+## The Config
+
+Create a directory for Zammad and add this `docker-compose.yml`:
+
+```yaml
+# -------------------------------------------------------------------------
+# 🚀 Created and distributed by The AltStack
+# 🌍 https://thealtstack.com
+# -------------------------------------------------------------------------
+
+version: '3.8'
+
+services:
+ zammad:
+ image: zammad/zammad-docker-compose:zammad-6.3.1-23
+ container_name: zammad
+ restart: unless-stopped
+ depends_on:
+ - zammad-postgresql
+ - zammad-elasticsearch
+ - zammad-redis
+ ports:
+ - "8080:8080"
+
+ zammad-elasticsearch:
+ image: bitnami/elasticsearch:8.12.2
+ container_name: zammad-elasticsearch
+ restart: unless-stopped
+ environment:
+ - discovery.type=single-node
+
+ zammad-postgresql:
+ image: postgres:15-alpine
+ container_name: zammad-postgresql
+ restart: unless-stopped
+ environment:
+ - POSTGRES_USER=zammad
+ - POSTGRES_PASSWORD=zammad
+
+ zammad-redis:
+ image: redis:7.2-alpine
+ container_name: zammad-redis
+ restart: unless-stopped
+```
+
+## Let's Ship It
+
+```bash
+# Create a directory
+mkdir -p /opt/zammad && cd /opt/zammad
+
+# Create the docker-compose.yml (paste the config above)
+nano docker-compose.yml
+
+# Pull images and start
+docker compose up -d
+
+# Watch the logs
+docker compose logs -f
+```
+
+## Environment Variables
+
+| Variable | Default | Required |
+|---|---|---|
+| `POSTGRES_USER` | `zammad` | No |
+| `POSTGRES_PASSWORD` | `zammad` | No |
+
+
+## Post-Deployment Checklist
+
+- [ ] Service is accessible on the configured port
+- [ ] Admin account created (if applicable)
+- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies))
+- [ ] SSL/HTTPS working
+- [ ] Backup script set up ([backup guide](/concepts/backups))
+- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma))
+
+## The "I Broke It" Section
+
+**Container won't start?**
+```bash
+docker compose logs zammad | tail -50
+```
+
+**Port already in use?**
+```bash
+# Find what's using the port
+lsof -i :PORT_NUMBER
+```
+
+**Need to start fresh?**
+```bash
+docker compose down -v # ⚠️ This deletes volumes/data!
+docker compose up -d
+```
+
+## Going Further
+
+- [Zammad on AltStack Directory](https://thealtstack.com/alternative-to/zammad)
+- [Zammad Self-Hosted Guide](https://thealtstack.com/self-hosted/zammad)
+- [Official Documentation](https://zammad.org)
+- [GitHub Repository](https://github.com/zammad/zammad)
diff --git a/docs/app/globals.css b/docs/app/globals.css
new file mode 100644
index 0000000..15458fe
--- /dev/null
+++ b/docs/app/globals.css
@@ -0,0 +1,378 @@
+@import "tailwindcss";
+
+/* ============================================
+ AltStack Docs — Custom Theme Overrides
+ ============================================ */
+
+/* ---- Font Stack ---- */
+:root {
+ --font-sans: var(--font-outfit), 'Outfit', system-ui, -apple-system, sans-serif;
+ --font-mono: var(--font-mono), 'JetBrains Mono', 'Fira Code', monospace;
+}
+
+/* ---- AltStack Brand Colors ---- */
+:root {
+ --altstack-red: #ef4444;
+ --altstack-orange: #f97316;
+ --altstack-bg: #050505;
+ --altstack-surface: #0a0a0a;
+ --altstack-surface-elevated: #111111;
+ --altstack-border: rgba(255, 255, 255, 0.08);
+ --altstack-glass: rgba(10, 10, 10, 0.7);
+ --altstack-text-dim: rgba(255, 255, 255, 0.5);
+ --hero-from: #ffffff;
+ --hero-to: #94a3b8;
+}
+
+/* ---- Dark mode as default feel ---- */
+html {
+ color-scheme: dark;
+}
+
+:root {
+ --nextra-primary-hue: 10deg;
+}
+
+/* Light mode overrides for high contrast */
+html[class~="light"] {
+ --nextra-bg: #ffffff;
+ --altstack-bg: #ffffff;
+ --altstack-surface: #f8fafc;
+ --altstack-surface-elevated: #f1f5f9;
+ --altstack-border: rgba(0, 0, 0, 0.08);
+ --altstack-text-dim: #64748b;
+ --altstack-glass: rgba(255, 255, 255, 0.8);
+ --hero-from: #0f172a;
+ --hero-to: #334155;
+}
+
+html[class~="dark"] {
+ --nextra-bg: var(--altstack-bg);
+}
+
+/* ---- Logo Styling & Animations ---- */
+@keyframes float-red {
+
+ 0%,
+ 100% {
+ transform: translateY(0);
+ }
+
+ 50% {
+ transform: translateY(-8px);
+ }
+}
+
+@keyframes float-glass {
+
+ 0%,
+ 100% {
+ transform: translateY(0);
+ }
+
+ 50% {
+ transform: translateY(-5px);
+ }
+}
+
+@keyframes float-slate {
+
+ 0%,
+ 100% {
+ transform: translateY(0);
+ }
+
+ 50% {
+ transform: translateY(-2px);
+ }
+}
+
+.animate-float-red {
+ animation: float-red 3s ease-in-out infinite;
+}
+
+.animate-float-glass {
+ animation: float-glass 3.5s ease-in-out infinite;
+}
+
+.animate-float-slate {
+ animation: float-slate 4s ease-in-out infinite;
+}
+
+.altstack-logo {
+ display: flex;
+ align-items: center;
+ gap: 0.75rem;
+ font-weight: 900;
+ font-size: 1.3rem;
+ letter-spacing: -0.03em;
+ color: white;
+}
+
+html[class~="light"] .altstack-logo {
+ color: #0f172a;
+}
+
+/* ---- Navbar & Sidebar Glassmorphism ---- */
+.nextra-nav-container {
+ background-color: var(--altstack-glass) !important;
+ backdrop-filter: blur(12px) !important;
+ -webkit-backdrop-filter: blur(12px) !important;
+ border-bottom: 1px solid var(--altstack-border);
+}
+
+.nextra-sidebar-container {
+ background-color: transparent !important;
+}
+
+/* ---- Home Page Card Overrides (Legacy for now) ---- */
+.nextra-cards {
+ margin-top: 2rem !important;
+}
+
+/* ---- Custom Grid Classes ---- */
+.premium-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
+ gap: 1.5rem;
+ margin-top: 2.5rem;
+}
+
+.premium-card {
+ position: relative;
+ padding: 1.75rem;
+ background: var(--altstack-surface);
+ border: 1px solid var(--altstack-border);
+ border-radius: 20px;
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
+ text-decoration: none !important;
+ overflow: hidden;
+}
+
+.premium-card:hover {
+ background: var(--altstack-surface-elevated);
+ border-color: rgba(259, 68, 68, 0.3);
+ transform: translateY(-4px);
+ box-shadow: 0 12px 24px -12px rgba(0, 0, 0, 0.5);
+}
+
+.premium-card::before {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ height: 2px;
+ background: linear-gradient(90deg, var(--altstack-red), var(--altstack-orange));
+ opacity: 0;
+ transition: opacity 0.3s ease;
+}
+
+.premium-card:hover::before {
+ opacity: 1;
+}
+
+.premium-card-title {
+ display: flex;
+ align-items: center;
+ gap: 0.75rem;
+ font-size: 1.15rem;
+ font-weight: 700;
+ color: white;
+ margin-bottom: 0.75rem;
+}
+
+html[class~="light"] .premium-card-title {
+ color: #0f172a;
+}
+
+.premium-card-description {
+ font-size: 0.95rem;
+ line-height: 1.6;
+ color: var(--altstack-text-dim);
+}
+
+/* ---- Footer ---- */
+.altstack-footer {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ padding: 3rem 0 2rem;
+ font-size: 0.85rem;
+ color: var(--altstack-text-dim);
+ border-top: 1px solid var(--altstack-border);
+ margin-top: 4rem;
+}
+
+.altstack-footer a {
+ color: var(--altstack-red);
+ text-decoration: none;
+ font-weight: 600;
+}
+
+.altstack-footer a:hover {
+ text-decoration: underline;
+}
+
+.footer-header {
+ color: #0f172a;
+ /* slate-900 */
+}
+
+html[class~="dark"] .footer-header {
+ color: #ffffff !important;
+}
+
+html[class~="light"] .footer-header {
+ color: #0f172a !important;
+}
+
+/* ---- Difficulty badges ---- */
+.badge-beginner {
+ display: inline-flex;
+ align-items: center;
+ gap: 0.3rem;
+ padding: 0.15rem 0.6rem;
+ font-size: 0.75rem;
+ font-weight: 700;
+ color: #22c55e;
+ background: rgba(34, 197, 94, 0.1);
+ border: 1px solid rgba(34, 197, 94, 0.2);
+ border-radius: 9999px;
+}
+
+.badge-intermediate {
+ display: inline-flex;
+ align-items: center;
+ gap: 0.3rem;
+ padding: 0.15rem 0.6rem;
+ font-size: 0.75rem;
+ font-weight: 700;
+ color: #eab308;
+ background: rgba(234, 179, 8, 0.1);
+ border: 1px solid rgba(234, 179, 8, 0.2);
+ border-radius: 9999px;
+}
+
+.badge-advanced {
+ display: inline-flex;
+ align-items: center;
+ gap: 0.3rem;
+ padding: 0.15rem 0.6rem;
+ font-size: 0.75rem;
+ font-weight: 700;
+ color: #ef4444;
+ background: rgba(239, 68, 68, 0.1);
+ border: 1px solid rgba(239, 68, 68, 0.2);
+ border-radius: 9999px;
+}
+
+/* ---- Hero info bar for deploy guides ---- */
+.deploy-hero {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 0.75rem;
+ margin: 1rem 0 1.5rem;
+ padding: 1rem 1.25rem;
+ background: var(--altstack-surface);
+ border: 1px solid var(--altstack-border);
+ border-radius: 16px;
+ font-size: 0.85rem;
+ color: rgba(255, 255, 255, 0.6);
+}
+
+.deploy-hero-item {
+ display: flex;
+ align-items: center;
+ gap: 0.35rem;
+}
+
+/* ---- Manual Logo Fix (Robust Override) ---- */
+html[class~="dark"] .manual-logo-text {
+ color: #ffffff !important;
+}
+
+html[class~="light"] .manual-logo-text {
+ color: #0f172a !important;
+ /* slate-900 */
+}
+
+/* Fill overrides */
+html[class~="dark"] .manual-logo-fill {
+ fill: rgba(255, 255, 255, 0.1) !important;
+}
+
+html[class~="light"] .manual-logo-fill {
+ fill: rgba(15, 23, 42, 0.1) !important;
+}
+
+html[class~="dark"] .manual-logo-fill-secondary {
+ fill: rgba(255, 255, 255, 0.2) !important;
+}
+
+html[class~="light"] .manual-logo-fill-secondary {
+ fill: rgba(15, 23, 42, 0.2) !important;
+}
+
+/* Stroke overrides */
+html[class~="dark"] .manual-logo-stroke {
+ stroke: rgba(255, 255, 255, 0.2) !important;
+}
+
+html[class~="light"] .manual-logo-stroke {
+ stroke: rgba(15, 23, 42, 0.2) !important;
+}
+
+html[class~="dark"] .manual-logo-stroke-secondary {
+ stroke: rgba(255, 255, 255, 0.3) !important;
+}
+
+html[class~="light"] .manual-logo-stroke-secondary {
+ stroke: rgba(15, 23, 42, 0.3) !important;
+}
+
+html[class~="dark"] .manual-logo-stroke-tertiary {
+ stroke: rgba(255, 255, 255, 0.4) !important;
+}
+
+html[class~="light"] .manual-logo-stroke-tertiary {
+ stroke: rgba(15, 23, 42, 0.4) !important;
+}
+
+/* ============================================
+ Mobile UI Fixes
+ ============================================ */
+
+/* Fix mobile menu z-index issues to ensure it sits above content */
+.nextra-nav-container,
+.nextra-navbar {
+ z-index: 60 !important;
+}
+
+/* Ensure search and other elements don't overlap the menu */
+.nextra-search-container {
+ z-index: 40;
+}
+
+/* Adjust mobile menu spacing to prevent overlap with navbar */
+@media (max-width: 768px) {
+ .nextra-menu-mobile {
+ padding-top: 4rem;
+ z-index: 45;
+ }
+
+ /* Ensure the mobile menu content is scrollable and visible */
+ .nextra-menu-mobile .nextra-scrollbar {
+ padding-bottom: 5rem;
+ }
+}
+
+/* Force solid background on mobile menu */
+@media (max-width: 768px) {
+
+ .nextra-menu-mobile,
+ .nextra-mobile-nav {
+ background-color: var(--altstack-bg) !important;
+ z-index: 50 !important;
+ }
+}
\ No newline at end of file
diff --git a/docs/app/icon.tsx b/docs/app/icon.tsx
new file mode 100644
index 0000000..efedb85
--- /dev/null
+++ b/docs/app/icon.tsx
@@ -0,0 +1,60 @@
+import { ImageResponse } from 'next/og';
+
+
+
+export const size = {
+ width: 32,
+ height: 32,
+};
+export const contentType = 'image/png';
+
+export default function Icon() {
+ return new ImageResponse(
+ (
+
+ ),
+ {
+ ...size,
+ }
+ );
+}
diff --git a/docs/app/layout.tsx b/docs/app/layout.tsx
new file mode 100644
index 0000000..036b19c
--- /dev/null
+++ b/docs/app/layout.tsx
@@ -0,0 +1,292 @@
+import { Footer, Layout, Navbar } from 'nextra-theme-docs'
+import Link from 'next/link'
+import Script from 'next/script'
+import { Head } from 'nextra/components'
+import { getPageMap } from 'nextra/page-map'
+import { Outfit, JetBrains_Mono } from 'next/font/google'
+import type { Metadata, Viewport } from 'next'
+import type { ReactNode } from 'react'
+import 'nextra-theme-docs/style.css'
+import './globals.css'
+
+const outfit = Outfit({
+ subsets: ['latin'],
+ variable: '--font-outfit',
+})
+
+const jetbrainsMono = JetBrains_Mono({
+ subsets: ['latin'],
+ variable: '--font-mono',
+})
+
+export const metadata: Metadata = {
+ metadataBase: new URL('https://docs.thealtstack.com'),
+ title: {
+ default: 'AltStack Docs — Self-Hosting Guides & Deploy Recipes',
+ template: '%s — AltStack Docs',
+ },
+ description:
+ 'Step-by-step guides to self-host open source software. Docker Compose configs, deployment recipes, and stack-building guides for developers and teams.',
+ openGraph: {
+ title: 'AltStack Docs',
+ description:
+ 'Self-hosting guides, deploy configs, and stack-building recipes for open source software.',
+ url: 'https://docs.thealtstack.com',
+ siteName: 'AltStack Docs',
+ locale: 'en_US',
+ type: 'website',
+ },
+ twitter: {
+ card: 'summary_large_image',
+ title: 'AltStack Docs',
+ description:
+ 'Self-hosting guides, deploy configs, and stack recipes.',
+ },
+}
+
+export const viewport: Viewport = {
+ themeColor: '#050505',
+}
+
+import AIChatLinks from '../components/AIChatLinks'
+
+function Logo() {
+ return (
+
+
+
+ {/* Bottom Layer (Slate) */}
+
+
+
+
+
+ {/* Middle Layer (Glass) */}
+
+
+
+
+
+ {/* Top Layer (Red) */}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ AltStack docs
+
+
+ )
+}
+
+const navbar = (
+ }
+ >
+
+
+)
+
+const footer = (
+
+
+
+
+
+
+
+
+
+ Step-by-step guides to self-host open source software with real configs and zero filler.
+
+
+
+
+
+
Documentation
+
+ Quick Start
+ Deploy Guides
+ Curated Stacks
+
+
+
+
+
+
+
+
+
+
+ © {new Date().getFullYear()} The AltStack. Empowering through Open Source.
+
+
+
+
+ Systems Operational
+
+
+
+
+
+
+)
+
+export default async function RootLayout({
+ children,
+}: {
+ children: ReactNode
+}) {
+ return (
+
+
+
+ {/* Google Analytics */}
+
+
+
+
+ {children}
+
+
+
+ )
+}
diff --git a/docs/app/page.mdx b/docs/app/page.mdx
new file mode 100644
index 0000000..e869f5d
--- /dev/null
+++ b/docs/app/page.mdx
@@ -0,0 +1,127 @@
+---
+title: AltStack Docs
+searchable: false
+---
+
+import { Rocket, Box, Flame, Brain, ArrowRight, ScrollText, Globe } from 'lucide-react'
+
+
+
+ The AltStack Docs
+
+
+ The World's First Sovereign Infrastructure Engine. Self-hosting guides that actually work.
+
+
+
+
+
+
+
+ New to self-hosting? Start here. 5 minutes to your first deploy.
+
+
+
+
+
+
+
+ 65+ tools with Docker Compose configs. Find yours, ship it, own it.
+
+
+
+
+
+
+
+ Pre-built tool bundles for bootstrappers, designers, DevOps, privacy, and AI.
+
+
+
+
+
+
+
+ Docker, reverse proxies, SSL, backups — explained like you're a human.
+
+
+
+
+
+
+
+
+
+
Why These Docs Exist
+
+
+ Our philosophy, editorial rules, and what makes these docs different.
+
+
+
+
+
+
+
+
+
+
AltStack Directory
+
+
+ Browse 400+ open source tools, compare alternatives, and build your stack.
+
+
+
+
+
+
+
+
+
Why These Docs Exist
+
+ Every tool in the
AltStack directory can be self-hosted. These docs show you
how — with real configs, honest trade-offs, and none of the hand-waving.
+
+
+
+
The Rules
+
+ 1. Every guide ends with a working deployment
+ 2. Every config is tested and copy-pasteable
+ 3. Every tool gets an honest verdict
+ 4. We don't waste your time with "Introduction" filler
+
+
+
+
diff --git a/docs/app/quick-start/_meta.ts b/docs/app/quick-start/_meta.ts
new file mode 100644
index 0000000..ac377fb
--- /dev/null
+++ b/docs/app/quick-start/_meta.ts
@@ -0,0 +1,21 @@
+import type { MetaRecord } from 'nextra'
+
+const meta: MetaRecord = {
+ 'what-is-self-hosting': {
+ title: 'What is Self-Hosting?',
+ },
+ 'first-deployment': {
+ title: 'Your First Deployment',
+ },
+ 'choosing-a-server': {
+ title: 'Choosing a Server',
+ },
+ 'reverse-proxy': {
+ title: 'Setting Up a Reverse Proxy',
+ },
+ 'starter-kit': {
+ title: 'The Starter Kit',
+ },
+}
+
+export default meta
diff --git a/docs/app/quick-start/choosing-a-server/page.mdx b/docs/app/quick-start/choosing-a-server/page.mdx
new file mode 100644
index 0000000..23ec26c
--- /dev/null
+++ b/docs/app/quick-start/choosing-a-server/page.mdx
@@ -0,0 +1,113 @@
+---
+title: Choosing a Server
+description: "A no-nonsense comparison of VPS providers for self-hosting. Hetzner, DigitalOcean, Linode, and more — which one to pick and why."
+---
+
+# Choosing a Server
+
+You need a place to run your tools. That place is a **VPS** (Virtual Private Server) — basically a computer in a data center that you rent by the month.
+
+> ⚠️ **Heads Up:** You *can* self-host on a Raspberry Pi or old laptop at home. But a VPS gives you a static IP, proper uptime, and you don't need to worry about your ISP blocking ports. Start with a VPS. Go homelab later.
+
+## The Short Answer
+
+**Just get a Hetzner CX22.** €4.50/mo, 2 vCPUs, 4GB RAM, 40GB SSD, 20TB traffic. It'll run 5–10 Docker containers comfortably.
+
+If Hetzner isn't available in your region, get a [DigitalOcean $6/mo Droplet](https://m.do.co/c/2ed27757a361).
+
+That's the recommendation. Below is the reasoning.
+
+## The Comparison
+
+| Provider | Cheapest Plan | CPU | RAM | Storage | Best For |
+|---|---|---|---|---|---|
+| **Hetzner** | €3.79/mo | 2 shared | 4 GB | 40 GB | Best value overall, EU & US |
+| [**DigitalOcean**](https://m.do.co/c/2ed27757a361) | $6/mo | 1 vCPU | 1 GB | 25 GB | Beginners, great docs |
+| **Linode (Akamai)** | $5/mo | 1 vCPU | 1 GB | 25 GB | Solid alternative to DO |
+| **Vultr** | $5/mo | 1 vCPU | 1 GB | 25 GB | Global edge locations |
+| **OVH** | €3.50/mo | 1 vCPU | 2 GB | 20 GB | Budget EU hosting |
+| **Oracle Cloud** | Free tier | 4 ARM | 24 GB | 200 GB | Can't beat free (if you qualify) |
+
+## What Specs Do You Need?
+
+Here's a rough guide based on what you want to run:
+
+| Use Case | RAM | CPU | Storage | Monthly Cost |
+|---|---|---|---|---|
+| 1–3 lightweight tools (Plausible, Uptime Kuma, Listmonk) | 2 GB | 1 vCPU | 20 GB | ~$5 |
+| An entire Bootstrapper Stack (Supabase, Coolify, Plausible, etc.) | 4 GB | 2 vCPU | 40 GB | ~$6 |
+| AI models (Ollama, Stable Diffusion) | 8+ GB | 4+ vCPU | 80+ GB | ~$15+ |
+| "I self-host everything" | 16 GB | 4 vCPU | 160 GB | ~$25 |
+
+> 🔥 **Pro Tip:** Start small. You can upgrade a VPS in about 30 seconds. It's way harder to downgrade. Get a 4GB plan and upgrade when you actually feel it.
+
+Once you have a VPS, the setup is the same everywhere. Don't just install Docker and leave it open; follow these steps to secure your investment.
+
+### 1. Hardening SSH (Don't skip this)
+Root password login is a magnet for brute-force attacks. Use SSH keys.
+
+```bash
+# On your local machine:
+ssh-keygen -t ed25519 -C "your_email@example.com"
+ssh-copy-id root@your-server-ip
+
+# Now SSH back into the server:
+ssh root@your-server-ip
+```
+
+Disable password login:
+```bash
+nano /etc/ssh/sshd_config
+# Find and set: PasswordAuthentication no
+# Restart SSH: systemctl restart ssh
+```
+
+### 2. Configure the Firewall (UFW)
+Only open the ports you actually need.
+
+```bash
+# Allow SSH, HTTP, and HTTPS
+ufw allow ssh
+ufw allow 80/tcp
+ufw allow 443/tcp
+
+# Enable firewall
+ufw enable
+```
+
+### 3. Install Docker & Compose
+The easiest way is the official convenience script.
+
+```bash
+# Update the system
+apt update && apt upgrade -y
+
+# Install Docker
+curl -fsSL https://get.docker.com | sh
+
+# Install Docker Compose plugin
+apt install docker-compose-plugin -y
+
+# Verify
+docker --version
+docker compose version
+```
+
+### 4. Create a Non-Root User (Optional but Good)
+Running everything as root is risky. Create a user with sudo privileges.
+
+```bash
+adduser dev
+usermod -aG sudo dev
+usermod -aG docker dev
+# Now log in as 'dev' for future work
+```
+
+That's your server ready and secured. Every deploy guide in these docs assumes you've done this.
+
+## Next Steps
+
+Your server is ready. Time to deploy something real:
+
+→ [The AltStack Starter Kit](/quick-start/starter-kit) — Our recommended first set of tools
+→ [Deploy Guides](/deploy) — Pick any tool and deploy it
diff --git a/docs/app/quick-start/first-deployment/page.mdx b/docs/app/quick-start/first-deployment/page.mdx
new file mode 100644
index 0000000..be865b9
--- /dev/null
+++ b/docs/app/quick-start/first-deployment/page.mdx
@@ -0,0 +1,69 @@
+---
+title: Your First Deployment
+description: "Deploy Uptime Kuma in under 5 minutes with a single Docker command. Your first taste of self-hosting freedom."
+---
+
+# Your First Deployment
+
+**By the end of this page**, you'll have [Uptime Kuma](https://thealtstack.com/alternative-to/uptime-kuma) — a beautiful uptime monitor — running on your machine. It takes about 5 minutes.
+
+We're starting with Uptime Kuma because:
+- It's a single Docker command (no compose file needed)
+- It has a gorgeous UI you'll actually enjoy using
+- It's immediately useful — it monitors your other self-hosted tools
+- It proves that self-hosting isn't scary
+
+## Prerequisites
+
+- Docker installed on your machine ([install guide](https://docs.docker.com/get-docker/))
+- A terminal (Terminal on Mac, PowerShell on Windows, or any Linux terminal)
+
+> 💡 **Why?** Docker is how we package and run software without dependency hell. If you haven't installed it yet, the link above takes 3 minutes.
+
+## Let's Ship It
+
+Open your terminal and run this single command:
+
+```bash
+docker run -d \
+ --restart=unless-stopped \
+ -p 3001:3001 \
+ -v uptime-kuma:/app/data \
+ --name uptime-kuma \
+ louislam/uptime-kuma:1
+```
+
+That's it. Not kidding.
+
+### What just happened?
+
+| Flag | What it does |
+|---|---|
+| `-d` | Runs in the background (detached) |
+| `--restart=unless-stopped` | Auto-restarts if your server reboots |
+| `-p 3001:3001` | Makes it accessible on port 3001 |
+| `-v uptime-kuma:/app/data` | Saves your data in a Docker volume (survives restarts) |
+| `--name uptime-kuma` | Gives the container a human-readable name |
+
+## See It Live
+
+Open your browser and go to:
+
+```
+http://localhost:3001
+```
+
+You should see the Uptime Kuma setup screen. Create an admin account, add a monitor for `https://google.com` to test it, and watch the green dots roll in.
+
+**Congratulations. You just self-hosted your first tool.** 🎉
+
+## You Did It. Now What?
+
+You just proved to yourself that self-hosting works. Here's where to go next:
+
+1. **[Choosing a Server](/quick-start/choosing-a-server)** — Move from localhost to a real VPS so others can access your tools
+2. **[Docker Basics](/concepts/docker-basics)** — Understand what Docker Compose is (spoiler: it's the next level)
+3. **[Deploy Plausible](/deploy/plausible)** — Replace Google Analytics entirely
+4. **[The Bootstrapper Stack](/stacks/bootstrapper)** — Deploy an entire SaaS toolkit for $0/mo
+
+> 🏆 **The Verdict:** If this felt easy, that's because it *is* easy. The rest of our guides follow the same pattern: copy a config, run a command, own your software.
diff --git a/docs/app/quick-start/page.mdx b/docs/app/quick-start/page.mdx
new file mode 100644
index 0000000..a7e461e
--- /dev/null
+++ b/docs/app/quick-start/page.mdx
@@ -0,0 +1,88 @@
+---
+title: Quick Start
+description: "Your roadmap to self-hosting independence. Go from zero to a running infrastructure in under an hour."
+---
+
+import { Clock, Server, Shield, Rocket, ArrowRight, CheckCircle } from 'lucide-react'
+
+# Quick Start
+
+**Go from "I've never self-hosted anything" to "I'm running my own infrastructure" in under an hour.** This guide is your roadmap — follow it in order.
+
+
+
+
{"1"}
+
{"Understand the Basics"}
+
+ {" 5 min read"}
+
+
{"What self-hosting actually means, why you'd do it, and the three things you need. No jargon, no gatekeeping."}
+
+ {"What is Self-Hosting?"}
+
+
+
+
{"2"}
+
{"Deploy Your First Tool"}
+
+ {" 5 min hands-on"}
+
+
{"One Docker command. One running tool. Prove to yourself that self-hosting works. We start with Uptime Kuma — it's beautiful, useful, and takes 30 seconds."}
+
+ {"Your First Deployment"}
+
+
+
+
{"3"}
+
{"Get a Real Server"}
+
+ {" 15 min hands-on"}
+
+
{"Move from localhost to a VPS. We compare providers, recommend the best value, and walk you through SSH hardening, firewalls, and Docker installation."}
+
+ {"Choosing a Server"}
+
+
+
+
{"4"}
+
{"Set Up Domains & SSL"}
+
+ {" 10 min hands-on"}
+
+
{"Give your tools proper domains like "}{"uptime.yourdomain.com"}{" with automatic HTTPS. We cover Caddy, Nginx Proxy Manager, and Traefik."}
+
+ {"Setting Up a Reverse Proxy"}
+
+
+
+
{"✓"}
+
{"Deploy Your Starter Kit"}
+
+ {" 20 min hands-on"}
+
+
{"Your first real stack: Uptime Kuma + Plausible + Coolify. Three tools that replace ~$35/mo in SaaS, all running on a $6/mo server."}
+
+ {"The AltStack Starter Kit"}
+
+
+
+
+---
+
+## Prerequisites
+
+Before you start, you need:
+
+- **A computer with a terminal** — Mac Terminal, Windows PowerShell, or any Linux shell
+- **Docker installed** — [Get Docker](https://docs.docker.com/get-docker/) (3 minutes)
+- **$0–6/mo budget** — Free for localhost experiments, ~$6/mo for a real VPS
+
+That's genuinely it. No Kubernetes. No cloud certifications. No weekend-long setup marathons.
+
+## Fast Track (Experienced Users)
+
+Already comfortable with Docker and have a VPS? Skip straight to the good stuff:
+
+1. **[The Starter Kit](/quick-start/starter-kit)** — Deploy Uptime Kuma + Plausible + Coolify in 20 minutes
+2. **[Deploy Guides](/deploy)** — Pick any of our 65+ tools and ship it
+3. **[Curated Stacks](/stacks)** — Grab a complete toolkit for your use case
diff --git a/docs/app/quick-start/reverse-proxy/page.mdx b/docs/app/quick-start/reverse-proxy/page.mdx
new file mode 100644
index 0000000..c110fc8
--- /dev/null
+++ b/docs/app/quick-start/reverse-proxy/page.mdx
@@ -0,0 +1,145 @@
+---
+title: Setting Up a Reverse Proxy
+description: "How to use Caddy, Nginx Proxy Manager, or Traefik to give your self-hosted tools proper domains and automatic SSL."
+---
+
+import { Tabs } from 'nextra/components'
+
+# Setting Up a Reverse Proxy
+
+Right now your tools are running on ports like `:3001` or `:8080`. A **reverse proxy** is the traffic cop that maps a domain like `uptime.yourdomain.com` to your server's local port and handles SSL automatically.
+
+## Which one should I pick?
+
+- **Caddy:** Best for 99% of people. Zero-config SSL, human-readable config, extremely fast.
+- **Nginx Proxy Manager:** Best if you want a web UI to click and manage your domains.
+- **Traefik:** Best if you want a "hands-off" approach that auto-discovers new containers as you spin them up (complex but powerful).
+
+---
+
+
+
+ ### Caddy Setup
+ Caddy is the "batteries included" proxy. It just works.
+
+ #### 1. The Docker Compose
+ Create a folder for Caddy and add this `docker-compose.yml`:
+
+ ```yaml
+ version: '3.8'
+ services:
+ caddy:
+ image: caddy:2-alpine
+ restart: unless-stopped
+ ports:
+ - "80:80"
+ - "443:443"
+ - "443:443/udp"
+ volumes:
+ - ./Caddyfile:/etc/caddy/Caddyfile
+ - caddy_data:/data
+ - caddy_config:/config
+
+ volumes:
+ caddy_data:
+ caddy_config:
+ ```
+
+ #### 2. The Caddyfile
+ In the same folder, create a file named `Caddyfile`:
+
+ ```caddy
+ uptime.yourdomain.com {
+ reverse_proxy localhost:3001
+ }
+
+ plausible.yourdomain.com {
+ reverse_proxy localhost:8000
+ }
+ ```
+
+ #### 3. Start it
+ ```bash
+ docker compose up -d
+ ```
+
+
+
+ ### Nginx Proxy Manager (NPM) Setup
+ NPM gives you a beautiful web interface to manage your SSL and proxy hosts.
+
+ #### 1. The Docker Compose
+ ```yaml
+ version: '3.8'
+ services:
+ app:
+ image: 'jc21/nginx-proxy-manager:latest'
+ restart: unless-stopped
+ ports:
+ - '80:80'
+ - '81:81'
+ - '443:443'
+ volumes:
+ - ./data:/data
+ - ./letsencrypt:/etc/letsencrypt
+ ```
+
+ #### 2. Access the UI
+ 1. Visit `http://your-server-ip:81`
+ 2. Default credentials:
+ - Email: `admin@example.com`
+ - Password: `changeme`
+ 3. Change your login info immediately.
+ 4. Click **Proxy Hosts** -> **Add Proxy Host** to point your domain to your tool's port.
+
+
+
+ ### Traefik Setup
+ Traefik uses "labels" on your other containers to automatically route traffic.
+
+ #### 1. The Traefik Container
+ ```yaml
+ version: '3.8'
+ services:
+ traefik:
+ image: traefik:v3.0
+ command:
+ - "--api.insecure=true"
+ - "--providers.docker=true"
+ - "--entrypoints.web.address=:80"
+ - "--entrypoints.websecure.address=:443"
+ - "--certificatesresolvers.myresolver.acme.tlschallenge=true"
+ - "--certificatesresolvers.myresolver.acme.email=your-email@example.com"
+ - "--certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json"
+ ports:
+ - "80:80"
+ - "443:443"
+ - "8080:8080"
+ volumes:
+ - "/var/run/docker.sock:/var/run/docker.sock:ro"
+ - "./letsencrypt:/letsencrypt"
+ ```
+
+ #### 2. Routing a container
+ Add these labels to any *other* container you want to proxy:
+
+ ```yaml
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.mytool.rule=Host(`mytool.yourdomain.com`)"
+ - "traefik.http.routers.mytool.entrypoints=websecure"
+ - "traefik.http.routers.mytool.tls.certresolver=myresolver"
+ ```
+
+
+
+---
+
+## 🔒 A Note on SSL/TLS
+Every proxy above handles SSL certificates via **Let's Encrypt** automatically.
+
+**Crucial Step:** Before you start your proxy, your domain's **DNS A Record** must point to your server's public IP address. If the DNS isn't pointing correctly, Let's Encrypt will fail to issue a certificate.
+
+## Next Steps
+→ [Your First Deployment](/quick-start/first-deployment) — Connect your tools to your new proxy.
+→ [SSL/TLS Deep Dive](/concepts/ssl-tls) — How it works under the hood.
diff --git a/docs/app/quick-start/starter-kit/page.mdx b/docs/app/quick-start/starter-kit/page.mdx
new file mode 100644
index 0000000..53366c4
--- /dev/null
+++ b/docs/app/quick-start/starter-kit/page.mdx
@@ -0,0 +1,46 @@
+---
+title: The AltStack Starter Kit
+description: "The recommended first 3 tools to self-host: Uptime Kuma, Plausible, and Coolify. Your foundation for software independence."
+---
+
+# The AltStack Starter Kit
+
+If you're new to self-hosting, don't try to replace your entire SaaS stack on day one. Start with three tools that are immediately useful, easy to deploy, and will teach you how everything works.
+
+## The Stack
+
+| Tool | Replaces | Why It's First |
+|---|---|---|
+| [**Uptime Kuma**](/deploy/uptime-kuma) | Pingdom, UptimeRobot ($15/mo) | Monitors everything else you deploy. Set it up first. |
+| [**Plausible**](/deploy/plausible) | Google Analytics (free but creepy) | Privacy-respecting analytics. You'll see results immediately. |
+| [**Coolify**](/deploy/coolify) | Vercel, Heroku ($20+/mo) | Deploy future apps with git push. Your own PaaS. |
+
+**Total monthly cost:** ~$6 (one small VPS)
+**Total SaaS cost replaced:** ~$35+/mo
+
+## Deploy Order
+
+This order matters:
+
+### 1. Uptime Kuma (5 min)
+Your monitoring dashboard. Once it's running, add monitors for everything else you deploy. You'll always know if something goes down.
+
+→ [Deploy Uptime Kuma](/deploy/uptime-kuma)
+
+### 2. Plausible Analytics (10 min)
+Lightweight, cookie-free analytics. Add the tracking script to your site and immediately start seeing real visitor data — without the privacy guilt.
+
+→ [Deploy Plausible](/deploy/plausible)
+
+### 3. Coolify (15 min)
+This is the game-changer. Coolify turns your server into a self-hosted Vercel/Netlify. Git push → auto deploy. It'll become the foundation for everything you build.
+
+→ [Deploy Coolify](/deploy/coolify)
+
+## After the Starter Kit
+
+Once you're comfortable, explore the [Curated Stacks](/stacks) — pre-built bundles of tools designed for specific use cases:
+
+- 🚀 [The Bootstrapper Stack](/stacks/bootstrapper) — Full SaaS toolkit for $0/mo
+- 🔒 [The Privacy Stack](/stacks/privacy) — Maximum data sovereignty
+- 🤖 [The AI-First Stack](/stacks/ai-first) — Run LLMs and image gen locally
diff --git a/docs/app/quick-start/what-is-self-hosting/page.mdx b/docs/app/quick-start/what-is-self-hosting/page.mdx
new file mode 100644
index 0000000..8c5f80a
--- /dev/null
+++ b/docs/app/quick-start/what-is-self-hosting/page.mdx
@@ -0,0 +1,62 @@
+---
+title: What is Self-Hosting?
+description: "Self-hosting explained without the gatekeeping. What it is, why you'd do it, and what you actually need to get started."
+---
+
+# What is Self-Hosting?
+
+**In one sentence:** Self-hosting means running software on a server *you* control, instead of paying someone else to run it for you.
+
+That's it. That's the whole concept.
+
+When you use Gmail, Google runs the email server. When you use Notion, Notion Inc runs the database. When you self-host, **you** run these things — on a cheap VPS, a spare laptop, or even a Raspberry Pi.
+
+## Why Would You Do This?
+
+| Reason | What It Means |
+|---|---|
+| **💰 Cost** | Most open source tools are free. A $5/mo server can replace $200+/mo in SaaS subscriptions. |
+| **🔒 Privacy** | Your data lives on your server. No one mines it, sells it, or trains AI on it. |
+| **🎛️ Control** | No surprise feature removals. No price hikes. No "we're sunsetting this product" emails. |
+| **🧠 Learning** | You'll actually understand how software works. This makes you a better developer, designer, or founder. |
+
+## What You Actually Need
+
+Here's the brutal truth: **self-hosting requires effort**. Not *genius-level* effort, but *following-a-recipe* effort.
+
+You need three things:
+
+1. **A server** — A $4–6/mo VPS from Hetzner, [DigitalOcean](https://m.do.co/c/2ed27757a361), or similar. [We compare them →](/quick-start/choosing-a-server)
+2. **Docker** — The tool that packages software into neat, runnable containers. [Docker basics →](/concepts/docker-basics)
+3. **20 minutes of patience** — Most tools deploy in under 20 minutes if you follow our guides.
+
+> 🔥 **Pro Tip:** You don't need to know Linux commands by heart. You need to know how to copy-paste. Seriously.
+
+## The Self-Hosting Spectrum
+
+Not all self-hosting is equal. Here's where most people land:
+
+```
+Easy ─────────────────────────────────────── Hard
+
+Docker run Docker Compose Kubernetes
+(1 command) (1 YAML file) (for masochists)
+```
+
+**Good news:** 95% of the tools in our directory work with Docker Compose — a single file that describes your entire setup. Our guides always give you that file, ready to go.
+
+## Common Fears (Debunked)
+
+**"I'll get hacked"** → You're more likely to get breached through a SaaS provider's data leak than through a properly configured VPS with a firewall and SSH keys.
+
+**"I can't handle maintenance"** → Docker makes updates a two-command affair: `docker-compose pull && docker-compose up -d`. That's it.
+
+**"What if it breaks at 3 AM?"** → Set up [Uptime Kuma](/deploy/uptime-kuma) (takes 5 minutes) and you'll know before your users do.
+
+**"I don't have time"** → You don't have time to submit GDPR data requests to 47 SaaS vendors either. Pick your battles.
+
+## Next Steps
+
+Ready? Let's deploy something real.
+
+→ [Your First Deployment](/quick-start/first-deployment) — Get a tool running in 5 minutes flat.
diff --git a/docs/app/sitemap.ts b/docs/app/sitemap.ts
new file mode 100644
index 0000000..779852d
--- /dev/null
+++ b/docs/app/sitemap.ts
@@ -0,0 +1,60 @@
+import { MetadataRoute } from 'next'
+import path from 'path'
+import fs from 'fs'
+
+const BASE_URL = 'https://docs.thealtstack.com'
+
+/**
+ * Dynamically generates the sitemap for the docs site by scanning
+ * the app directory for all page.mdx files. This replaces the old
+ * static public/sitemap.xml that would drift out of sync.
+ */
+export default function sitemap(): MetadataRoute.Sitemap {
+ const appDir = path.join(process.cwd(), 'app')
+ const pages: string[] = []
+
+ // Recursively find all page.mdx files
+ function scan(dir: string, prefix: string) {
+ const entries = fs.readdirSync(dir, { withFileTypes: true })
+ for (const entry of entries) {
+ // Skip hidden dirs, node_modules, test dirs
+ if (entry.name.startsWith('.') || entry.name === 'node_modules' || entry.name === 'test') continue
+
+ if (entry.isDirectory()) {
+ scan(path.join(dir, entry.name), `${prefix}/${entry.name}`)
+ } else if (entry.name === 'page.mdx') {
+ // Root page.mdx → '/', nested → '/concepts/docker-basics'
+ pages.push(prefix || '/')
+ }
+ }
+ }
+
+ scan(appDir, '')
+
+ // Priority map for top-level sections
+ const priorityMap: Record = {
+ '/': 1.0,
+ '/quick-start': 0.9,
+ '/deploy': 0.8,
+ '/concepts': 0.8,
+ '/stacks': 0.7,
+ }
+
+ return pages.map(pagePath => {
+ // Determine priority based on section
+ const section = '/' + (pagePath.split('/')[1] || '')
+ const priority = priorityMap[section] || priorityMap[pagePath] || 0.6
+
+ // Determine change frequency
+ let changeFrequency: 'daily' | 'weekly' | 'monthly' | 'yearly' = 'monthly'
+ if (pagePath === '/') changeFrequency = 'weekly'
+ if (pagePath.startsWith('/deploy')) changeFrequency = 'weekly'
+
+ return {
+ url: `${BASE_URL}${pagePath}`,
+ lastModified: new Date(),
+ changeFrequency,
+ priority,
+ }
+ })
+}
diff --git a/docs/app/stacks/_meta.ts b/docs/app/stacks/_meta.ts
new file mode 100644
index 0000000..e44f495
--- /dev/null
+++ b/docs/app/stacks/_meta.ts
@@ -0,0 +1,21 @@
+import type { MetaRecord } from 'nextra'
+
+const meta: MetaRecord = {
+ bootstrapper: {
+ title: '🚀 The Bootstrapper Stack',
+ },
+ designer: {
+ title: '🎨 The Designer Stack',
+ },
+ 'ai-first': {
+ title: '🤖 The AI-First Stack',
+ },
+ devops: {
+ title: '⚙️ The DevOps Stack',
+ },
+ privacy: {
+ title: '🔒 The Privacy Stack',
+ },
+}
+
+export default meta
diff --git a/docs/app/stacks/ai-first/page.mdx b/docs/app/stacks/ai-first/page.mdx
new file mode 100644
index 0000000..67ecd87
--- /dev/null
+++ b/docs/app/stacks/ai-first/page.mdx
@@ -0,0 +1,38 @@
+---
+title: "The AI-First Stack"
+description: "Own your AI. Run LLMs, image generation, and code assistants locally with zero API keys, zero usage limits, and zero data leaving your machine."
+---
+
+# 🤖 The AI-First Stack
+
+**Own your AI.** Run powerful AI locally. No API keys, no usage limits, no data leaving your machine.
+
+| What | Tool | Replaces |
+|---|---|---|
+| LLM Inference | [Llama](/deploy/llama) | ChatGPT ($20/mo) |
+| Coding Model | [DeepSeek](/deploy/deepseek) | GitHub Copilot ($10/mo) |
+| Image Generation | [Stable Diffusion](/deploy/stable-diffusion) | Midjourney ($10/mo) |
+| IDE Assistant | [Continue.dev](/deploy/continue-dev) | Copilot extension ($10/mo) |
+| Code Autocomplete | [Tabby](/deploy/tabby) | Tabnine ($12/mo) |
+
+**Total saved: ~$69/mo** (nice)
+
+## Hardware Requirements
+
+Running AI locally requires GPU horsepower. Here's what you need:
+
+| Model Type | Minimum VRAM | Recommended GPU |
+|---|---|---|
+| Small LLMs (7B params) | 6 GB | RTX 3060, RTX 4060 |
+| Large LLMs (70B params) | 48 GB | 2× RTX 3090, A6000 |
+| Image Generation (SDXL) | 8 GB | RTX 3070+ |
+| Code Models (DeepSeek) | 8 GB | RTX 4060+ |
+
+> 🔥 **Pro Tip:** Start with Ollama + Llama 3. It runs well on an 8GB GPU and gives you a local ChatGPT replacement in under 5 minutes.
+
+## Deploy Guides
+
+→ [Deploy Ollama (LLM Runner)](/deploy/ollama)
+→ [Deploy Stable Diffusion](/deploy/stable-diffusion)
+→ [Deploy Tabby (Code AI)](/deploy/tabby)
+→ [Deploy Continue.dev](/deploy/continue-dev)
diff --git a/docs/app/stacks/bootstrapper/page.mdx b/docs/app/stacks/bootstrapper/page.mdx
new file mode 100644
index 0000000..c54e566
--- /dev/null
+++ b/docs/app/stacks/bootstrapper/page.mdx
@@ -0,0 +1,70 @@
+---
+title: "The Bootstrapper Stack"
+description: "Launch your SaaS for $0/mo. The complete open source stack with database, auth, deployment, analytics, project management, and design."
+---
+
+# 🚀 The Bootstrapper Stack
+
+**Launch for $0/mo.** Everything you need to build, ship, and manage a SaaS product without spending a dime on software.
+
+| What | Tool | Monthly SaaS Cost Replaced |
+|---|---|---|
+| Database & Auth | [Supabase](/deploy/supabase) | ~$25/mo (Firebase) |
+| Project Management | [Plane](/deploy/plane) | ~$10/mo (Jira) |
+| Team Chat | [Rocket.Chat](/deploy/rocketchat) | ~$7/mo (Slack) |
+| Deployment PaaS | [Coolify](/deploy/coolify) | ~$20/mo (Vercel Pro) |
+| Web Analytics | [Plausible](/deploy/plausible) | ~$9/mo (Plausible Cloud) |
+| UI/UX Design | [Penpot](/deploy/penpot) | ~$15/mo (Figma) |
+
+**Total saved: ~$310/mo** · **Your cost: ~$6/mo (one VPS)**
+
+## Deploy Order
+
+Deploy in this order — each tool builds on the previous:
+
+### 1. Coolify (your PaaS)
+Coolify turns your VPS into a self-hosted Vercel. Once it's running, you can deploy everything else *through it*.
+
+→ [Deploy Coolify](/deploy/coolify)
+
+### 2. Supabase (your backend)
+Database, authentication, storage, and realtime — all in one. This is your app's backbone.
+
+→ [Deploy Supabase](/deploy/supabase)
+
+### 3. Plausible (your analytics)
+Drop Google Analytics. Plausible is lightweight, cookie-free, and respects your users' privacy.
+
+→ [Deploy Plausible](/deploy/plausible)
+
+### 4. Plane (your project board)
+Jira without the Jira experience. Clean, fast, issue tracking that doesn't make you want to quit.
+
+→ [Deploy Plane](/deploy/plane)
+
+### 5. Rocket.Chat (your team chat)
+Slack without the $7/user/mo. Self-hosted, full-featured, and it doesn't sell your conversations.
+
+→ [Deploy Rocket.Chat](/deploy/rocketchat)
+
+### 6. Penpot (your design tool)
+open source Figma alternative. Real-time collaboration, SVG-based, and free forever.
+
+→ [Deploy Penpot](/deploy/penpot)
+
+## Server Requirements
+
+| Spec | Recommended |
+|---|---|
+| RAM | 4 GB minimum (8 GB ideal) |
+| CPU | 2 vCPU |
+| Storage | 40 GB SSD |
+| OS | Ubuntu 22.04+ or Debian 12+ |
+| Monthly cost | ~$6 (Hetzner CX22) |
+
+## Who This Is For
+
+- Solo founders building a SaaS MVP
+- Early-stage startups that refuse to burn cash on software
+- Developers who want to control their entire stack
+- Anyone tired of the "free tier" → "pay us now" bait-and-switch
diff --git a/docs/app/stacks/designer/page.mdx b/docs/app/stacks/designer/page.mdx
new file mode 100644
index 0000000..971ace1
--- /dev/null
+++ b/docs/app/stacks/designer/page.mdx
@@ -0,0 +1,37 @@
+---
+title: "The Designer Stack"
+description: "Ditch Creative Cloud. Professional open source design tools for UI/UX, photo editing, digital art, and documentation."
+---
+
+# 🎨 The Designer Stack
+
+**Ditch Creative Cloud.** Professional design tools that rival Adobe — from UI/UX prototyping to photo editing and digital art.
+
+| What | Tool | Replaces |
+|---|---|---|
+| UI/UX Design | [Penpot](/deploy/penpot) | Figma ($15/mo) |
+| Photo Editing | [GIMP](/deploy/gimp) | Photoshop ($22/mo) |
+| Digital Art | [Krita](/deploy/krita) | Procreate / Illustrator ($22/mo) |
+| Knowledge Base | [AppFlowy](/deploy/appflowy) | Notion ($10/mo) |
+
+**Total saved: ~$110/mo**
+
+## The Honest Take
+
+Let's be real: these tools don't have feature parity with Adobe. But for 90% of design work — UI mockups, web design, photo editing, illustrations — they're more than enough. And the gap closes every month.
+
+**Where they shine:**
+- Penpot is genuinely better than Figma for developers (SVG-native, CSS grid support)
+- GIMP handles 95% of what Photoshop does
+- Krita is beloved by digital artists — many prefer it over paid alternatives
+
+**Where they struggle:**
+- Video editing (no open source premiere replacement yet)
+- Print design workflows (InDesign still wins here)
+
+## Deploy Guides
+
+→ [Deploy Penpot](/deploy/penpot)
+→ [Deploy GIMP](/deploy/gimp)
+→ [Deploy Krita](/deploy/krita)
+→ [Deploy AppFlowy](/deploy/appflowy)
diff --git a/docs/app/stacks/devops/page.mdx b/docs/app/stacks/devops/page.mdx
new file mode 100644
index 0000000..ca9b43e
--- /dev/null
+++ b/docs/app/stacks/devops/page.mdx
@@ -0,0 +1,46 @@
+---
+title: "The DevOps Stack"
+description: "Self-host your entire infrastructure. Backend, hosting, deployment, analytics, and monitoring — zero vendor lock-in."
+---
+
+# ⚙️ The DevOps Stack
+
+**Self-host everything.** From backend to hosting to monitoring — deploy and manage your entire infrastructure on your own terms.
+
+| What | Tool | Replaces |
+|---|---|---|
+| Backend as a Service | [Supabase](/deploy/supabase) | Firebase ($25+/mo) |
+| PaaS (Deployment) | [Coolify](/deploy/coolify) | Vercel Pro ($20/mo) |
+| Git Deployment | [Dokku](/deploy/dokku) | Heroku ($25/mo) |
+| Web Analytics | [Plausible](/deploy/plausible) | Google Analytics (free, but your data) |
+| Product Analytics | [PostHog](/deploy/posthog) | Amplitude ($49/mo) |
+
+**Total saved: ~$375/mo**
+
+## The Philosophy
+
+The DevOps Stack is for teams that refuse to be dependent on any single vendor. Every tool here:
+
+- **Runs on standard Docker** — migrate anywhere in minutes
+- **Uses Postgres** — your data is in an open format
+- **Has an active community** — you're never truly on your own
+- **Scales horizontally** — grows with you without pricing tiers
+
+## Server Requirements
+
+This is the most resource-intensive stack. You need a proper server:
+
+| Spec | Recommended |
+|---|---|
+| RAM | 8 GB minimum (16 GB ideal) |
+| CPU | 4 vCPU |
+| Storage | 80 GB SSD |
+| Cost | ~$15/mo (Hetzner CX32) |
+
+## Deploy Guides
+
+→ [Deploy Supabase](/deploy/supabase)
+→ [Deploy Coolify](/deploy/coolify)
+→ [Deploy Dokku](/deploy/dokku)
+→ [Deploy Plausible](/deploy/plausible)
+→ [Deploy PostHog](/deploy/posthog)
diff --git a/docs/app/stacks/page.mdx b/docs/app/stacks/page.mdx
new file mode 100644
index 0000000..79cc0d2
--- /dev/null
+++ b/docs/app/stacks/page.mdx
@@ -0,0 +1,152 @@
+---
+title: Curated Stacks
+description: "Pre-tested bundles of open source tools designed for specific use cases. Save hundreds per month by self-hosting an entire toolkit."
+---
+
+import { Rocket, Palette, Bot, Settings, Lock, ArrowRight, DollarSign, Layers } from 'lucide-react'
+
+# Curated Stacks
+
+**A Curated Stack is a pre-tested bundle of open source tools designed for a specific use case.** Instead of researching, comparing, and testing 50 tools yourself, we've done it for you.
+
+Each stack tells you exactly which tools to deploy, in what order, on what hardware — and how much SaaS money you'll save.
+
+## The 5 Stacks
+
+
+
+
+
+
+
+
+
The Bootstrapper Stack
+
+ Full SaaS toolkit for solo founders. Database, auth, deployment, analytics, project management, and design.
+
+ Saves ~$310/mo
+ 6 tools
+ 4 GB RAM
+
+
+
+
+
+
+ Professional design tools that rival Adobe. UI/UX prototyping, photo editing, and digital art — all free.
+
+ Saves ~$110/mo
+ 4 tools
+ 4 GB RAM
+
+
+
+
+
+
+
+
+
+
The AI-First Stack
+
+ Run powerful AI locally. LLMs, image generation, and code completion — no API keys, no usage limits.
+
+ Saves ~$69/mo
+ 5 tools
+ 8+ GB RAM
+
+
+
+
+
+
+
+
+
+
The DevOps Stack
+
+ From backend to hosting to monitoring — deploy and manage your entire infrastructure with zero vendor lock-in.
+
+ Saves ~$375/mo
+ 5 tools
+ 4 GB RAM
+
+
+
+
+
+
+
+
+
+
The Privacy Stack
+
+ Every tool runs on your infrastructure. Your data never touches a third-party server. Maximum data sovereignty.
+
+ Saves ~$185/mo
+ 6 tools
+ 4 GB RAM
+
+
+
+
+
+
+---
+
+## How We Pick Tools
+
+Every tool in a Curated Stack must pass five filters:
+
+| Criteria | What It Means |
+|---|---|
+| **Actively maintained** | Regular commits, responsive maintainers, not abandonware |
+| **Docker-native** | Ships with official or well-maintained Docker images |
+| **Production-proven** | Used by real teams and individuals, not just demo projects |
+| **Honest quality** | We've actually deployed it. If the UX is rough, we say so |
+| **No vendor lock-in** | Standard data formats, exportable data, no proprietary traps |
+
+We don't include tools because they're popular. We include them because they actually work when you deploy them on a $6/mo VPS.
+
+## Which Stack Is Right for You?
+
+| If you are... | Start with |
+|---|---|
+| A solo founder building a SaaS MVP | [The Bootstrapper Stack](/stacks/bootstrapper) |
+| A designer ditching Adobe subscriptions | [The Designer Stack](/stacks/designer) |
+| An AI enthusiast who wants to run models locally | [The AI-First Stack](/stacks/ai-first) |
+| A developer managing your own infrastructure | [The DevOps Stack](/stacks/devops) |
+| Anyone who cares deeply about data privacy | [The Privacy Stack](/stacks/privacy) |
+
+## Mixing & Matching
+
+Stacks aren't exclusive — tools overlap by design. Many teams run a combination:
+
+- **Bootstrapper + AI-First** → SaaS toolkit with local AI capabilities
+- **DevOps + Privacy** → Full infrastructure with maximum data sovereignty
+- **Designer + Bootstrapper** → Creative team with SaaS backbone
+
+The only constraint is hardware. Each stack page lists specific RAM, CPU, and storage requirements. If you're running multiple stacks, add the requirements together and size your VPS accordingly.
+
+> 🔥 **Pro Tip:** Start with one stack. Get comfortable. Then layer on tools from other stacks as you need them. Trying to deploy everything at once is how people burn out.
+
+## Build Your Own Stack
+
+Don't see your perfect combination? Browse the [Deploy Guides](/deploy) — every tool has an independent deployment page. Pick the ones that fit your workflow and build a custom stack.
+
+If you want to suggest a new Curated Stack, [open an issue on GitHub](https://github.com/AltStackHQ/docs/issues) and we'll consider adding it.
diff --git a/docs/app/stacks/privacy/page.mdx b/docs/app/stacks/privacy/page.mdx
new file mode 100644
index 0000000..3df7744
--- /dev/null
+++ b/docs/app/stacks/privacy/page.mdx
@@ -0,0 +1,47 @@
+---
+title: "The Privacy Stack"
+description: "Zero data leaks. Every tool runs on your infrastructure. Your data never touches a third-party server."
+---
+
+# 🔒 The Privacy Stack
+
+**Zero data leaks.** Every tool runs on your infrastructure. Your data never touches a third-party server. For teams and individuals who take privacy seriously.
+
+| What | Tool | Replaces |
+|---|---|---|
+| Password Manager | [Bitwarden](/deploy/bitwarden) | 1Password ($3/mo) |
+| Team Chat | [Mattermost](/deploy/mattermost) | Slack ($7/mo) |
+| Video Calls | [Jitsi Meet](/deploy/jitsi-meet) | Zoom ($14/mo) |
+| Analytics | [Matomo](/deploy/matomo) | Google Analytics (free, sells your data) |
+| Notes & Docs | [AppFlowy](/deploy/appflowy) | Notion ($10/mo) |
+| Knowledge Base | [Affine](/deploy/affine) | Confluence ($6/mo) |
+
+**Total saved: ~$185/mo**
+
+## Why Privacy Matters
+
+It's not about having "something to hide." It's about:
+
+- **Compliance**: GDPR, HIPAA, SOC 2 — self-hosting makes audits simpler
+- **IP Protection**: Your internal docs and conversations stay *internal*
+- **Trust**: Your users' data is on your servers, not in someone else's quarterly report
+- **Sovereignty**: No foreign government can compel access to your data on a third-party server
+
+## The Privacy Audit
+
+For each tool in this stack, we verify:
+
+- ✅ **No telemetry** (or can be disabled)
+- ✅ **No external API calls** after deployment
+- ✅ **Data stored locally** in your Postgres / filesystem
+- ✅ **End-to-end encryption** available where applicable
+- ✅ **Self-contained** — works offline
+
+## Deploy Guides
+
+→ [Deploy Bitwarden](/deploy/bitwarden)
+→ [Deploy Mattermost](/deploy/mattermost)
+→ [Deploy Jitsi Meet](/deploy/jitsi-meet)
+→ [Deploy Matomo](/deploy/matomo)
+→ [Deploy AppFlowy](/deploy/appflowy)
+→ [Deploy Affine](/deploy/affine)
diff --git a/docs/app/why/page.mdx b/docs/app/why/page.mdx
new file mode 100644
index 0000000..ce20318
--- /dev/null
+++ b/docs/app/why/page.mdx
@@ -0,0 +1,79 @@
+---
+title: Why These Docs Exist
+description: "The AltStack Docs manifesto. Why we built these guides, what makes them different, and the rules we follow."
+---
+
+# Why These Docs Exist
+
+**Most self-hosting documentation is terrible.** Not because the tools are bad — because the docs are written by developers who already understand everything, for developers who already understand everything.
+
+We built these docs to fix that.
+
+## The Problem
+
+Go try to self-host any popular open-source tool right now. Here's what you'll find:
+
+- **Incomplete `docker-compose.yml` files** that reference environment variables nobody explains
+- **"Getting Started" guides** that skip the 3 steps where you actually get stuck
+- **Documentation written in 2019** for a codebase that's been rewritten twice since
+- **The dreaded "see the wiki"** link that leads to 47 half-finished pages
+
+You're not stupid for finding this confusing. The documentation is genuinely bad.
+
+## Our Philosophy
+
+Every guide in these docs follows a simple principle:
+
+> **If you can't go from zero to a working deployment by following this page alone, the page is broken.**
+
+We don't write theoretical explanations of how Docker networking works and then wish you luck. We give you the config file. We explain what each line does. We tell you where it'll probably break and how to fix it.
+
+## The 4 Rules
+
+These aren't suggestions — they're the editorial standard every page must meet.
+
+### 1. Every guide ends with a working deployment
+
+Not "and then configure it to your needs." Not "refer to the upstream docs for advanced configuration." You will have a running tool by the end of the page. Period.
+
+### 2. Every config is tested and copy-pasteable
+
+We don't write configs from memory. Every `docker-compose.yml` in these docs has been deployed, broken, fixed, and deployed again. You can copy-paste them and they will work.
+
+### 3. Every tool gets an honest verdict
+
+We'll tell you when a tool is incredible. We'll also tell you when it's buggy, when the mobile app is unusable, or when you should just pay for the SaaS version. We don't have sponsors. We don't have affiliate deals. We have opinions.
+
+### 4. We don't waste your time with filler
+
+No "Introduction to What This Tool Is" sections that restate the tool's homepage copy. No "Prerequisites: a computer with an internet connection." If you're reading this, you know what a terminal is. Let's deploy something.
+
+## What Makes Us Different
+
+| Typical Docs | AltStack Docs |
+|---|---|
+| "Configure the environment variables as needed" | Here's every variable, what it does, and the sane default |
+| "Deploy using Docker" (no compose file provided) | Full `docker-compose.yml` ready to copy |
+| Written by the tool's maintainer (biased) | Written by users who deploy these tools (honest) |
+| Assumes you've read 12 other pages first | Self-contained. One page = one working deployment |
+| Last updated 2 years ago | Actively maintained with version-specific notes |
+
+## Who's Behind This
+
+These docs are part of [The AltStack](https://thealtstack.com) — **The World's First Sovereign Infrastructure Engine**. We're a curated directory that helps you find, compare, and deploy alternatives to proprietary tools.
+
+We believe in software independence: the right to run your own tools, on your own servers, under your own terms. These docs are how we make that practical, not just philosophical.
+
+## Where to Start
+
+If you're brand new to self-hosting:
+
+→ **[Quick Start](/quick-start)** — From zero to your first deployment in under 20 minutes
+
+If you know what you're doing and want a config:
+
+→ **[Deploy Guides](/deploy)** — 65+ tools with tested Docker Compose files
+
+If you want a complete toolkit:
+
+→ **[Curated Stacks](/stacks)** — Pre-built bundles for bootstrappers, designers, DevOps, AI, and privacy
diff --git a/docs/components/AIChatLinks.tsx b/docs/components/AIChatLinks.tsx
new file mode 100644
index 0000000..f0e2a2b
--- /dev/null
+++ b/docs/components/AIChatLinks.tsx
@@ -0,0 +1,39 @@
+'use client'
+
+import { usePathname } from 'next/navigation'
+
+export default function AIChatLinks() {
+ const pathname = usePathname()
+ const currentUrl = `https://docs.thealtstack.com${pathname === '/' ? '' : pathname}`
+
+ const prompt = `Read the documentation at ${currentUrl} and use the context from https://docs.thealtstack.com/llms.txt to help me understand or address my problem regarding this page.`
+ const encodedPrompt = encodeURIComponent(prompt)
+
+ const chatGptUrl = `https://chatgpt.com/?q=${encodedPrompt}&hints=search`
+ const claudeUrl = `https://claude.ai/new?q=${encodedPrompt}`
+
+ return (
+
+ )
+}
diff --git a/docs/components/ContactForm.tsx b/docs/components/ContactForm.tsx
new file mode 100644
index 0000000..178d661
--- /dev/null
+++ b/docs/components/ContactForm.tsx
@@ -0,0 +1,127 @@
+'use client'
+
+import { useState } from 'react'
+import { Loader2, Send } from 'lucide-react'
+import { motion } from 'framer-motion'
+
+export default function ContactForm() {
+ const [isLoading, setIsLoading] = useState(false)
+ const [status, setStatus] = useState<'idle' | 'success' | 'error'>('idle')
+
+ async function handleSubmit(e: React.FormEvent) {
+ e.preventDefault()
+ setIsLoading(true)
+
+ // Simulate network request
+ await new Promise(resolve => setTimeout(resolve, 1000))
+
+ const formData = new FormData(e.currentTarget)
+ const data = Object.fromEntries(formData.entries())
+
+ // Construct mailto link as fallback since we don't have a backend
+ const subject = encodeURIComponent(`Contact Form: ${data.subject}`)
+ const body = encodeURIComponent(`Name: ${data.name}\nEmail: ${data.email}\n\nMessage:\n${data.message}`)
+
+ window.location.href = `mailto:hello@thealtstack.com?subject=${subject}&body=${body}`
+
+ setIsLoading(false)
+ setStatus('success')
+ }
+
+ return (
+
+
+
+
+ Get in Touch
+
+
+ Have a question regarding self-hosting or the AltStack? We're here to help.
+
+
+
+
+
+
+ )
+}
diff --git a/docs/mdx-components.tsx b/docs/mdx-components.tsx
new file mode 100644
index 0000000..39e51ba
--- /dev/null
+++ b/docs/mdx-components.tsx
@@ -0,0 +1 @@
+export { useMDXComponents } from 'nextra-theme-docs'
diff --git a/docs/next-env.d.ts b/docs/next-env.d.ts
new file mode 100644
index 0000000..830fb59
--- /dev/null
+++ b/docs/next-env.d.ts
@@ -0,0 +1,6 @@
+///
+///
+///
+
+// NOTE: This file should not be edited
+// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.
diff --git a/docs/next.config.mjs b/docs/next.config.mjs
new file mode 100644
index 0000000..95381d5
--- /dev/null
+++ b/docs/next.config.mjs
@@ -0,0 +1,18 @@
+import nextra from 'nextra'
+
+const withNextra = nextra({
+ defaultShowCopyCode: true,
+ search: {
+ codeblocks: true,
+ },
+})
+
+export default withNextra({
+ reactStrictMode: true,
+ images: {
+ unoptimized: true,
+ },
+})
+
+
+
diff --git a/docs/package-lock.json b/docs/package-lock.json
new file mode 100644
index 0000000..6b78a45
--- /dev/null
+++ b/docs/package-lock.json
@@ -0,0 +1,6919 @@
+{
+ "name": "altstack-docs",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "altstack-docs",
+ "version": "1.0.0",
+ "dependencies": {
+ "@next/mdx": "^15.1.0",
+ "@tailwindcss/postcss": "^4.2.0",
+ "@types/mdx": "^2.0.13",
+ "framer-motion": "^12.34.2",
+ "lucide-react": "^0.574.0",
+ "next": "^15.1.0",
+ "next-themes": "^0.4.6",
+ "nextra": "^4.0.0",
+ "nextra-theme-docs": "^4.0.0",
+ "react": "^19.0.0",
+ "react-dom": "^19.0.0"
+ },
+ "devDependencies": {
+ "@types/node": "^25.3.0",
+ "@types/react": "^19.0.0",
+ "autoprefixer": "^10.4.24",
+ "pagefind": "^1.4.0",
+ "postcss": "^8.5.6",
+ "tailwindcss": "^4.2.0",
+ "typescript": "^5.7.0"
+ }
+ },
+ "node_modules/@alloc/quick-lru": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz",
+ "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@antfu/install-pkg": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@antfu/install-pkg/-/install-pkg-1.1.0.tgz",
+ "integrity": "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==",
+ "dependencies": {
+ "package-manager-detector": "^1.3.0",
+ "tinyexec": "^1.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ }
+ },
+ "node_modules/@braintree/sanitize-url": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-7.1.2.tgz",
+ "integrity": "sha512-jigsZK+sMF/cuiB7sERuo9V7N9jx+dhmHHnQyDSVdpZwVutaBu7WvNYqMDLSgFgfB30n452TP3vjDAvFC973mA=="
+ },
+ "node_modules/@chevrotain/cst-dts-gen": {
+ "version": "11.1.1",
+ "resolved": "https://registry.npmjs.org/@chevrotain/cst-dts-gen/-/cst-dts-gen-11.1.1.tgz",
+ "integrity": "sha512-fRHyv6/f542qQqiRGalrfJl/evD39mAvbJLCekPazhiextEatq1Jx1K/i9gSd5NNO0ds03ek0Cbo/4uVKmOBcw==",
+ "dependencies": {
+ "@chevrotain/gast": "11.1.1",
+ "@chevrotain/types": "11.1.1",
+ "lodash-es": "4.17.23"
+ }
+ },
+ "node_modules/@chevrotain/gast": {
+ "version": "11.1.1",
+ "resolved": "https://registry.npmjs.org/@chevrotain/gast/-/gast-11.1.1.tgz",
+ "integrity": "sha512-Ko/5vPEYy1vn5CbCjjvnSO4U7GgxyGm+dfUZZJIWTlQFkXkyym0jFYrWEU10hyCjrA7rQtiHtBr0EaZqvHFZvg==",
+ "dependencies": {
+ "@chevrotain/types": "11.1.1",
+ "lodash-es": "4.17.23"
+ }
+ },
+ "node_modules/@chevrotain/regexp-to-ast": {
+ "version": "11.1.1",
+ "resolved": "https://registry.npmjs.org/@chevrotain/regexp-to-ast/-/regexp-to-ast-11.1.1.tgz",
+ "integrity": "sha512-ctRw1OKSXkOrR8VTvOxrQ5USEc4sNrfwXHa1NuTcR7wre4YbjPcKw+82C2uylg/TEwFRgwLmbhlln4qkmDyteg=="
+ },
+ "node_modules/@chevrotain/types": {
+ "version": "11.1.1",
+ "resolved": "https://registry.npmjs.org/@chevrotain/types/-/types-11.1.1.tgz",
+ "integrity": "sha512-wb2ToxG8LkgPYnKe9FH8oGn3TMCBdnwiuNC5l5y+CtlaVRbCytU0kbVsk6CGrqTL4ZN4ksJa0TXOYbxpbthtqw=="
+ },
+ "node_modules/@chevrotain/utils": {
+ "version": "11.1.1",
+ "resolved": "https://registry.npmjs.org/@chevrotain/utils/-/utils-11.1.1.tgz",
+ "integrity": "sha512-71eTYMzYXYSFPrbg/ZwftSaSDld7UYlS8OQa3lNnn9jzNtpFbaReRRyghzqS7rI3CDaorqpPJJcXGHK+FE1TVQ=="
+ },
+ "node_modules/@emnapi/runtime": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.8.1.tgz",
+ "integrity": "sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==",
+ "optional": true,
+ "dependencies": {
+ "tslib": "^2.4.0"
+ }
+ },
+ "node_modules/@floating-ui/core": {
+ "version": "1.7.4",
+ "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.4.tgz",
+ "integrity": "sha512-C3HlIdsBxszvm5McXlB8PeOEWfBhcGBTZGkGlWc2U0KFY5IwG5OQEuQ8rq52DZmcHDlPLd+YFBK+cZcytwIFWg==",
+ "dependencies": {
+ "@floating-ui/utils": "^0.2.10"
+ }
+ },
+ "node_modules/@floating-ui/dom": {
+ "version": "1.7.5",
+ "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.5.tgz",
+ "integrity": "sha512-N0bD2kIPInNHUHehXhMke1rBGs1dwqvC9O9KYMyyjK7iXt7GAhnro7UlcuYcGdS/yYOlq0MAVgrow8IbWJwyqg==",
+ "dependencies": {
+ "@floating-ui/core": "^1.7.4",
+ "@floating-ui/utils": "^0.2.10"
+ }
+ },
+ "node_modules/@floating-ui/react": {
+ "version": "0.26.28",
+ "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.26.28.tgz",
+ "integrity": "sha512-yORQuuAtVpiRjpMhdc0wJj06b9JFjrYF4qp96j++v2NBpbi6SEGF7donUJ3TMieerQ6qVkAv1tgr7L4r5roTqw==",
+ "dependencies": {
+ "@floating-ui/react-dom": "^2.1.2",
+ "@floating-ui/utils": "^0.2.8",
+ "tabbable": "^6.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=16.8.0",
+ "react-dom": ">=16.8.0"
+ }
+ },
+ "node_modules/@floating-ui/react-dom": {
+ "version": "2.1.7",
+ "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.7.tgz",
+ "integrity": "sha512-0tLRojf/1Go2JgEVm+3Frg9A3IW8bJgKgdO0BN5RkF//ufuz2joZM63Npau2ff3J6lUVYgDSNzNkR+aH3IVfjg==",
+ "dependencies": {
+ "@floating-ui/dom": "^1.7.5"
+ },
+ "peerDependencies": {
+ "react": ">=16.8.0",
+ "react-dom": ">=16.8.0"
+ }
+ },
+ "node_modules/@floating-ui/utils": {
+ "version": "0.2.10",
+ "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz",
+ "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ=="
+ },
+ "node_modules/@formatjs/intl-localematcher": {
+ "version": "0.5.10",
+ "resolved": "https://registry.npmjs.org/@formatjs/intl-localematcher/-/intl-localematcher-0.5.10.tgz",
+ "integrity": "sha512-af3qATX+m4Rnd9+wHcjJ4w2ijq+rAVP3CCinJQvFv1kgSu1W6jypUmvleJxcewdxmutM8dmIRZFxO/IQBZmP2Q==",
+ "dependencies": {
+ "tslib": "2"
+ }
+ },
+ "node_modules/@headlessui/react": {
+ "version": "2.2.9",
+ "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-2.2.9.tgz",
+ "integrity": "sha512-Mb+Un58gwBn0/yWZfyrCh0TJyurtT+dETj7YHleylHk5od3dv2XqETPGWMyQ5/7sYN7oWdyM1u9MvC0OC8UmzQ==",
+ "dependencies": {
+ "@floating-ui/react": "^0.26.16",
+ "@react-aria/focus": "^3.20.2",
+ "@react-aria/interactions": "^3.25.0",
+ "@tanstack/react-virtual": "^3.13.9",
+ "use-sync-external-store": "^1.5.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "react": "^18 || ^19 || ^19.0.0-rc",
+ "react-dom": "^18 || ^19 || ^19.0.0-rc"
+ }
+ },
+ "node_modules/@iconify/types": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@iconify/types/-/types-2.0.0.tgz",
+ "integrity": "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg=="
+ },
+ "node_modules/@iconify/utils": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@iconify/utils/-/utils-3.1.0.tgz",
+ "integrity": "sha512-Zlzem1ZXhI1iHeeERabLNzBHdOa4VhQbqAcOQaMKuTuyZCpwKbC2R4Dd0Zo3g9EAc+Y4fiarO8HIHRAth7+skw==",
+ "dependencies": {
+ "@antfu/install-pkg": "^1.1.0",
+ "@iconify/types": "^2.0.0",
+ "mlly": "^1.8.0"
+ }
+ },
+ "node_modules/@img/colour": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/@img/colour/-/colour-1.0.0.tgz",
+ "integrity": "sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==",
+ "optional": true,
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@img/sharp-darwin-arm64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.5.tgz",
+ "integrity": "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-darwin-arm64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-darwin-x64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.5.tgz",
+ "integrity": "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-darwin-x64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-libvips-darwin-arm64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.4.tgz",
+ "integrity": "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-darwin-x64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.4.tgz",
+ "integrity": "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-arm": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.4.tgz",
+ "integrity": "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==",
+ "cpu": [
+ "arm"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-arm64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.4.tgz",
+ "integrity": "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-ppc64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.4.tgz",
+ "integrity": "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-riscv64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-riscv64/-/sharp-libvips-linux-riscv64-1.2.4.tgz",
+ "integrity": "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-s390x": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.4.tgz",
+ "integrity": "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-x64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.4.tgz",
+ "integrity": "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linuxmusl-arm64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.4.tgz",
+ "integrity": "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linuxmusl-x64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.4.tgz",
+ "integrity": "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-linux-arm": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.5.tgz",
+ "integrity": "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==",
+ "cpu": [
+ "arm"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-arm": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-arm64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.5.tgz",
+ "integrity": "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-arm64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-ppc64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.5.tgz",
+ "integrity": "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-ppc64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-riscv64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-riscv64/-/sharp-linux-riscv64-0.34.5.tgz",
+ "integrity": "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==",
+ "cpu": [
+ "riscv64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-riscv64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-s390x": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.5.tgz",
+ "integrity": "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==",
+ "cpu": [
+ "s390x"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-s390x": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-x64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.5.tgz",
+ "integrity": "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-x64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linuxmusl-arm64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.5.tgz",
+ "integrity": "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linuxmusl-arm64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linuxmusl-x64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.5.tgz",
+ "integrity": "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linuxmusl-x64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-wasm32": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.5.tgz",
+ "integrity": "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==",
+ "cpu": [
+ "wasm32"
+ ],
+ "optional": true,
+ "dependencies": {
+ "@emnapi/runtime": "^1.7.0"
+ },
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-win32-arm64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.5.tgz",
+ "integrity": "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-win32-ia32": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.5.tgz",
+ "integrity": "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==",
+ "cpu": [
+ "ia32"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-win32-x64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.5.tgz",
+ "integrity": "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.13",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/remapping": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og=="
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@mdx-js/mdx": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.1.1.tgz",
+ "integrity": "sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdx": "^2.0.0",
+ "acorn": "^8.0.0",
+ "collapse-white-space": "^2.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "estree-util-scope": "^1.0.0",
+ "estree-walker": "^3.0.0",
+ "hast-util-to-jsx-runtime": "^2.0.0",
+ "markdown-extensions": "^2.0.0",
+ "recma-build-jsx": "^1.0.0",
+ "recma-jsx": "^1.0.0",
+ "recma-stringify": "^1.0.0",
+ "rehype-recma": "^1.0.0",
+ "remark-mdx": "^3.0.0",
+ "remark-parse": "^11.0.0",
+ "remark-rehype": "^11.0.0",
+ "source-map": "^0.7.0",
+ "unified": "^11.0.0",
+ "unist-util-position-from-estree": "^2.0.0",
+ "unist-util-stringify-position": "^4.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/@mermaid-js/parser": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-1.0.0.tgz",
+ "integrity": "sha512-vvK0Hi/VWndxoh03Mmz6wa1KDriSPjS2XMZL/1l19HFwygiObEEoEwSDxOqyLzzAI6J2PU3261JjTMTO7x+BPw==",
+ "dependencies": {
+ "langium": "^4.0.0"
+ }
+ },
+ "node_modules/@napi-rs/simple-git": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git/-/simple-git-0.1.22.tgz",
+ "integrity": "sha512-bMVoAKhpjTOPHkW/lprDPwv5aD4R4C3Irt8vn+SKA9wudLe9COLxOhurrKRsxmZccUbWXRF7vukNeGUAj5P8kA==",
+ "engines": {
+ "node": ">= 10"
+ },
+ "optionalDependencies": {
+ "@napi-rs/simple-git-android-arm-eabi": "0.1.22",
+ "@napi-rs/simple-git-android-arm64": "0.1.22",
+ "@napi-rs/simple-git-darwin-arm64": "0.1.22",
+ "@napi-rs/simple-git-darwin-x64": "0.1.22",
+ "@napi-rs/simple-git-freebsd-x64": "0.1.22",
+ "@napi-rs/simple-git-linux-arm-gnueabihf": "0.1.22",
+ "@napi-rs/simple-git-linux-arm64-gnu": "0.1.22",
+ "@napi-rs/simple-git-linux-arm64-musl": "0.1.22",
+ "@napi-rs/simple-git-linux-ppc64-gnu": "0.1.22",
+ "@napi-rs/simple-git-linux-s390x-gnu": "0.1.22",
+ "@napi-rs/simple-git-linux-x64-gnu": "0.1.22",
+ "@napi-rs/simple-git-linux-x64-musl": "0.1.22",
+ "@napi-rs/simple-git-win32-arm64-msvc": "0.1.22",
+ "@napi-rs/simple-git-win32-ia32-msvc": "0.1.22",
+ "@napi-rs/simple-git-win32-x64-msvc": "0.1.22"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-android-arm-eabi": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-android-arm-eabi/-/simple-git-android-arm-eabi-0.1.22.tgz",
+ "integrity": "sha512-JQZdnDNm8o43A5GOzwN/0Tz3CDBQtBUNqzVwEopm32uayjdjxev1Csp1JeaqF3v9djLDIvsSE39ecsN2LhCKKQ==",
+ "cpu": [
+ "arm"
+ ],
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-android-arm64": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-android-arm64/-/simple-git-android-arm64-0.1.22.tgz",
+ "integrity": "sha512-46OZ0SkhnvM+fapWjzg/eqbJvClxynUpWYyYBn4jAj7GQs1/Yyc8431spzDmkA8mL0M7Xo8SmbkzTDE7WwYAfg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-darwin-arm64": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-darwin-arm64/-/simple-git-darwin-arm64-0.1.22.tgz",
+ "integrity": "sha512-zH3h0C8Mkn9//MajPI6kHnttywjsBmZ37fhLX/Fiw5XKu84eHA6dRyVtMzoZxj6s+bjNTgaMgMUucxPn9ktxTQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-darwin-x64": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-darwin-x64/-/simple-git-darwin-x64-0.1.22.tgz",
+ "integrity": "sha512-GZN7lRAkGKB6PJxWsoyeYJhh85oOOjVNyl+/uipNX8bR+mFDCqRsCE3rRCFGV9WrZUHXkcuRL2laIRn7lLi3ag==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-freebsd-x64": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-freebsd-x64/-/simple-git-freebsd-x64-0.1.22.tgz",
+ "integrity": "sha512-xyqX1C5I0WBrUgZONxHjZH5a4LqQ9oki3SKFAVpercVYAcx3pq6BkZy1YUOP4qx78WxU1CCNfHBN7V+XO7D99A==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-arm-gnueabihf": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-arm-gnueabihf/-/simple-git-linux-arm-gnueabihf-0.1.22.tgz",
+ "integrity": "sha512-4LOtbp9ll93B9fxRvXiUJd1/RM3uafMJE7dGBZGKWBMGM76+BAcCEUv2BY85EfsU/IgopXI6n09TycRfPWOjxA==",
+ "cpu": [
+ "arm"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-arm64-gnu": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-arm64-gnu/-/simple-git-linux-arm64-gnu-0.1.22.tgz",
+ "integrity": "sha512-GVOjP/JjCzbQ0kSqao7ctC/1sodVtv5VF57rW9BFpo2y6tEYPCqHnkQkTpieuwMNe+TVOhBUC1+wH0d9/knIHg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-arm64-musl": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-arm64-musl/-/simple-git-linux-arm64-musl-0.1.22.tgz",
+ "integrity": "sha512-MOs7fPyJiU/wqOpKzAOmOpxJ/TZfP4JwmvPad/cXTOWYwwyppMlXFRms3i98EU3HOazI/wMU2Ksfda3+TBluWA==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-ppc64-gnu": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-ppc64-gnu/-/simple-git-linux-ppc64-gnu-0.1.22.tgz",
+ "integrity": "sha512-L59dR30VBShRUIZ5/cQHU25upNgKS0AMQ7537J6LCIUEFwwXrKORZKJ8ceR+s3Sr/4jempWVvMdjEpFDE4HYww==",
+ "cpu": [
+ "ppc64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-s390x-gnu": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-s390x-gnu/-/simple-git-linux-s390x-gnu-0.1.22.tgz",
+ "integrity": "sha512-4FHkPlCSIZUGC6HiADffbe6NVoTBMd65pIwcd40IDbtFKOgFMBA+pWRqKiQ21FERGH16Zed7XHJJoY3jpOqtmQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-x64-gnu": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-x64-gnu/-/simple-git-linux-x64-gnu-0.1.22.tgz",
+ "integrity": "sha512-Ei1tM5Ho/dwknF3pOzqkNW9Iv8oFzRxE8uOhrITcdlpxRxVrBVptUF6/0WPdvd7R9747D/q61QG/AVyWsWLFKw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-x64-musl": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-x64-musl/-/simple-git-linux-x64-musl-0.1.22.tgz",
+ "integrity": "sha512-zRYxg7it0p3rLyEJYoCoL2PQJNgArVLyNavHW03TFUAYkYi5bxQ/UFNVpgxMaXohr5yu7qCBqeo9j4DWeysalg==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-win32-arm64-msvc": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-win32-arm64-msvc/-/simple-git-win32-arm64-msvc-0.1.22.tgz",
+ "integrity": "sha512-XGFR1fj+Y9cWACcovV2Ey/R2xQOZKs8t+7KHPerYdJ4PtjVzGznI4c2EBHXtdOIYvkw7tL5rZ7FN1HJKdD5Quw==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-win32-ia32-msvc": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-win32-ia32-msvc/-/simple-git-win32-ia32-msvc-0.1.22.tgz",
+ "integrity": "sha512-Gqr9Y0gs6hcNBA1IXBpoqTFnnIoHuZGhrYqaZzEvGMLrTrpbXrXVEtX3DAAD2RLc1b87CPcJ49a7sre3PU3Rfw==",
+ "cpu": [
+ "ia32"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-win32-x64-msvc": {
+ "version": "0.1.22",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-win32-x64-msvc/-/simple-git-win32-x64-msvc-0.1.22.tgz",
+ "integrity": "sha512-hQjcreHmUcpw4UrtkOron1/TQObfe484lxiXFLLUj7aWnnnOVs1mnXq5/Bo9+3NYZldFpFRJPdPBeHCisXkKJg==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/env": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.12.tgz",
+ "integrity": "sha512-pUvdJN1on574wQHjaBfNGDt9Mz5utDSZFsIIQkMzPgNS8ZvT4H2mwOrOIClwsQOb6EGx5M76/CZr6G8i6pSpLg=="
+ },
+ "node_modules/@next/mdx": {
+ "version": "15.1.0",
+ "resolved": "https://registry.npmjs.org/@next/mdx/-/mdx-15.1.0.tgz",
+ "integrity": "sha512-1USYedy2yRmPdIvQC1b2MBVwiJYrcZnCSHHZZETEuV1rAxjjXedbHmo43kwAv6DL3f9AgDHnl1/s1cqI7xhXdA==",
+ "dependencies": {
+ "source-map": "^0.7.0"
+ },
+ "peerDependencies": {
+ "@mdx-js/loader": ">=0.15.0",
+ "@mdx-js/react": ">=0.15.0"
+ },
+ "peerDependenciesMeta": {
+ "@mdx-js/loader": {
+ "optional": true
+ },
+ "@mdx-js/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@next/swc-darwin-arm64": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.12.tgz",
+ "integrity": "sha512-RnRjBtH8S8eXCpUNkQ+543DUc7ys8y15VxmFU9HRqlo9BG3CcBUiwNtF8SNoi2xvGCVJq1vl2yYq+3oISBS0Zg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-darwin-x64": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.12.tgz",
+ "integrity": "sha512-nqa9/7iQlboF1EFtNhWxQA0rQstmYRSBGxSM6g3GxvxHxcoeqVXfGNr9stJOme674m2V7r4E3+jEhhGvSQhJRA==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-arm64-gnu": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.12.tgz",
+ "integrity": "sha512-dCzAjqhDHwmoB2M4eYfVKqXs99QdQxNQVpftvP1eGVppamXh/OkDAwV737Zr0KPXEqRUMN4uCjh6mjO+XtF3Mw==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-arm64-musl": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.12.tgz",
+ "integrity": "sha512-+fpGWvQiITgf7PUtbWY1H7qUSnBZsPPLyyq03QuAKpVoTy/QUx1JptEDTQMVvQhvizCEuNLEeghrQUyXQOekuw==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-x64-gnu": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.12.tgz",
+ "integrity": "sha512-jSLvgdRRL/hrFAPqEjJf1fFguC719kmcptjNVDJl26BnJIpjL3KH5h6mzR4mAweociLQaqvt4UyzfbFjgAdDcw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-x64-musl": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.12.tgz",
+ "integrity": "sha512-/uaF0WfmYqQgLfPmN6BvULwxY0dufI2mlN2JbOKqqceZh1G4hjREyi7pg03zjfyS6eqNemHAZPSoP84x17vo6w==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-win32-arm64-msvc": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.12.tgz",
+ "integrity": "sha512-xhsL1OvQSfGmlL5RbOmU+FV120urrgFpYLq+6U8C6KIym32gZT6XF/SDE92jKzzlPWskkbjOKCpqk5m4i8PEfg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-win32-x64-msvc": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.12.tgz",
+ "integrity": "sha512-Z1Dh6lhFkxvBDH1FoW6OU/L6prYwPSlwjLiZkExIAh8fbP6iI/M7iGTQAJPYJ9YFlWobCZ1PHbchFhFYb2ADkw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "dependencies": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "dependencies": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@pagefind/darwin-arm64": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/@pagefind/darwin-arm64/-/darwin-arm64-1.4.0.tgz",
+ "integrity": "sha512-2vMqkbv3lbx1Awea90gTaBsvpzgRs7MuSgKDxW0m9oV1GPZCZbZBJg/qL83GIUEN2BFlY46dtUZi54pwH+/pTQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@pagefind/darwin-x64": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/@pagefind/darwin-x64/-/darwin-x64-1.4.0.tgz",
+ "integrity": "sha512-e7JPIS6L9/cJfow+/IAqknsGqEPjJnVXGjpGm25bnq+NPdoD3c/7fAwr1OXkG4Ocjx6ZGSCijXEV4ryMcH2E3A==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@pagefind/freebsd-x64": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/@pagefind/freebsd-x64/-/freebsd-x64-1.4.0.tgz",
+ "integrity": "sha512-WcJVypXSZ+9HpiqZjFXMUobfFfZZ6NzIYtkhQ9eOhZrQpeY5uQFqNWLCk7w9RkMUwBv1HAMDW3YJQl/8OqsV0Q==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@pagefind/linux-arm64": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/@pagefind/linux-arm64/-/linux-arm64-1.4.0.tgz",
+ "integrity": "sha512-PIt8dkqt4W06KGmQjONw7EZbhDF+uXI7i0XtRLN1vjCUxM9vGPdtJc2mUyVPevjomrGz5M86M8bqTr6cgDp1Uw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@pagefind/linux-x64": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/@pagefind/linux-x64/-/linux-x64-1.4.0.tgz",
+ "integrity": "sha512-z4oddcWwQ0UHrTHR8psLnVlz6USGJ/eOlDPTDYZ4cI8TK8PgwRUPQZp9D2iJPNIPcS6Qx/E4TebjuGJOyK8Mmg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@pagefind/windows-x64": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/@pagefind/windows-x64/-/windows-x64-1.4.0.tgz",
+ "integrity": "sha512-NkT+YAdgS2FPCn8mIA9bQhiBs+xmniMGq1LFPDhcFn0+2yIUEiIG06t7bsZlhdjknEQRTSdT7YitP6fC5qwP0g==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@react-aria/focus": {
+ "version": "3.21.4",
+ "resolved": "https://registry.npmjs.org/@react-aria/focus/-/focus-3.21.4.tgz",
+ "integrity": "sha512-6gz+j9ip0/vFRTKJMl3R30MHopn4i19HqqLfSQfElxJD+r9hBnYG1Q6Wd/kl/WRR1+CALn2F+rn06jUnf5sT8Q==",
+ "dependencies": {
+ "@react-aria/interactions": "^3.27.0",
+ "@react-aria/utils": "^3.33.0",
+ "@react-types/shared": "^3.33.0",
+ "@swc/helpers": "^0.5.0",
+ "clsx": "^2.0.0"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1",
+ "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
+ }
+ },
+ "node_modules/@react-aria/interactions": {
+ "version": "3.27.0",
+ "resolved": "https://registry.npmjs.org/@react-aria/interactions/-/interactions-3.27.0.tgz",
+ "integrity": "sha512-D27pOy+0jIfHK60BB26AgqjjRFOYdvVSkwC31b2LicIzRCSPOSP06V4gMHuGmkhNTF4+YWDi1HHYjxIvMeiSlA==",
+ "dependencies": {
+ "@react-aria/ssr": "^3.9.10",
+ "@react-aria/utils": "^3.33.0",
+ "@react-stately/flags": "^3.1.2",
+ "@react-types/shared": "^3.33.0",
+ "@swc/helpers": "^0.5.0"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1",
+ "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
+ }
+ },
+ "node_modules/@react-aria/ssr": {
+ "version": "3.9.10",
+ "resolved": "https://registry.npmjs.org/@react-aria/ssr/-/ssr-3.9.10.tgz",
+ "integrity": "sha512-hvTm77Pf+pMBhuBm760Li0BVIO38jv1IBws1xFm1NoL26PU+fe+FMW5+VZWyANR6nYL65joaJKZqOdTQMkO9IQ==",
+ "dependencies": {
+ "@swc/helpers": "^0.5.0"
+ },
+ "engines": {
+ "node": ">= 12"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
+ }
+ },
+ "node_modules/@react-aria/utils": {
+ "version": "3.33.0",
+ "resolved": "https://registry.npmjs.org/@react-aria/utils/-/utils-3.33.0.tgz",
+ "integrity": "sha512-yvz7CMH8d2VjwbSa5nGXqjU031tYhD8ddax95VzJsHSPyqHDEGfxul8RkhGV6oO7bVqZxVs6xY66NIgae+FHjw==",
+ "dependencies": {
+ "@react-aria/ssr": "^3.9.10",
+ "@react-stately/flags": "^3.1.2",
+ "@react-stately/utils": "^3.11.0",
+ "@react-types/shared": "^3.33.0",
+ "@swc/helpers": "^0.5.0",
+ "clsx": "^2.0.0"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1",
+ "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
+ }
+ },
+ "node_modules/@react-stately/flags": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@react-stately/flags/-/flags-3.1.2.tgz",
+ "integrity": "sha512-2HjFcZx1MyQXoPqcBGALwWWmgFVUk2TuKVIQxCbRq7fPyWXIl6VHcakCLurdtYC2Iks7zizvz0Idv48MQ38DWg==",
+ "dependencies": {
+ "@swc/helpers": "^0.5.0"
+ }
+ },
+ "node_modules/@react-stately/utils": {
+ "version": "3.11.0",
+ "resolved": "https://registry.npmjs.org/@react-stately/utils/-/utils-3.11.0.tgz",
+ "integrity": "sha512-8LZpYowJ9eZmmYLpudbo/eclIRnbhWIJZ994ncmlKlouNzKohtM8qTC6B1w1pwUbiwGdUoyzLuQbeaIor5Dvcw==",
+ "dependencies": {
+ "@swc/helpers": "^0.5.0"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
+ }
+ },
+ "node_modules/@react-types/shared": {
+ "version": "3.33.0",
+ "resolved": "https://registry.npmjs.org/@react-types/shared/-/shared-3.33.0.tgz",
+ "integrity": "sha512-xuUpP6MyuPmJtzNOqF5pzFUIHH2YogyOQfUQHag54PRmWB7AbjuGWBUv0l1UDmz6+AbzAYGmDVAzcRDOu2PFpw==",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
+ }
+ },
+ "node_modules/@shikijs/core": {
+ "version": "1.29.2",
+ "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-1.29.2.tgz",
+ "integrity": "sha512-vju0lY9r27jJfOY4Z7+Rt/nIOjzJpZ3y+nYpqtUZInVoXQ/TJZcfGnNOGnKjFdVZb8qexiCuSlZRKcGfhhTTZQ==",
+ "dependencies": {
+ "@shikijs/engine-javascript": "1.29.2",
+ "@shikijs/engine-oniguruma": "1.29.2",
+ "@shikijs/types": "1.29.2",
+ "@shikijs/vscode-textmate": "^10.0.1",
+ "@types/hast": "^3.0.4",
+ "hast-util-to-html": "^9.0.4"
+ }
+ },
+ "node_modules/@shikijs/engine-javascript": {
+ "version": "1.29.2",
+ "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-1.29.2.tgz",
+ "integrity": "sha512-iNEZv4IrLYPv64Q6k7EPpOCE/nuvGiKl7zxdq0WFuRPF5PAE9PRo2JGq/d8crLusM59BRemJ4eOqrFrC4wiQ+A==",
+ "dependencies": {
+ "@shikijs/types": "1.29.2",
+ "@shikijs/vscode-textmate": "^10.0.1",
+ "oniguruma-to-es": "^2.2.0"
+ }
+ },
+ "node_modules/@shikijs/engine-oniguruma": {
+ "version": "1.29.2",
+ "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-1.29.2.tgz",
+ "integrity": "sha512-7iiOx3SG8+g1MnlzZVDYiaeHe7Ez2Kf2HrJzdmGwkRisT7r4rak0e655AcM/tF9JG/kg5fMNYlLLKglbN7gBqA==",
+ "dependencies": {
+ "@shikijs/types": "1.29.2",
+ "@shikijs/vscode-textmate": "^10.0.1"
+ }
+ },
+ "node_modules/@shikijs/langs": {
+ "version": "1.29.2",
+ "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-1.29.2.tgz",
+ "integrity": "sha512-FIBA7N3LZ+223U7cJDUYd5shmciFQlYkFXlkKVaHsCPgfVLiO+e12FmQE6Tf9vuyEsFe3dIl8qGWKXgEHL9wmQ==",
+ "dependencies": {
+ "@shikijs/types": "1.29.2"
+ }
+ },
+ "node_modules/@shikijs/themes": {
+ "version": "1.29.2",
+ "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-1.29.2.tgz",
+ "integrity": "sha512-i9TNZlsq4uoyqSbluIcZkmPL9Bfi3djVxRnofUHwvx/h6SRW3cwgBC5SML7vsDcWyukY0eCzVN980rqP6qNl9g==",
+ "dependencies": {
+ "@shikijs/types": "1.29.2"
+ }
+ },
+ "node_modules/@shikijs/twoslash": {
+ "version": "1.29.2",
+ "resolved": "https://registry.npmjs.org/@shikijs/twoslash/-/twoslash-1.29.2.tgz",
+ "integrity": "sha512-2S04ppAEa477tiaLfGEn1QJWbZUmbk8UoPbAEw4PifsrxkBXtAtOflIZJNtuCwz8ptc/TPxy7CO7gW4Uoi6o/g==",
+ "dependencies": {
+ "@shikijs/core": "1.29.2",
+ "@shikijs/types": "1.29.2",
+ "twoslash": "^0.2.12"
+ }
+ },
+ "node_modules/@shikijs/types": {
+ "version": "1.29.2",
+ "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-1.29.2.tgz",
+ "integrity": "sha512-VJjK0eIijTZf0QSTODEXCqinjBn0joAHQ+aPSBzrv4O2d/QSbsMw+ZeSRx03kV34Hy7NzUvV/7NqfYGRLrASmw==",
+ "dependencies": {
+ "@shikijs/vscode-textmate": "^10.0.1",
+ "@types/hast": "^3.0.4"
+ }
+ },
+ "node_modules/@shikijs/vscode-textmate": {
+ "version": "10.0.2",
+ "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-10.0.2.tgz",
+ "integrity": "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="
+ },
+ "node_modules/@swc/helpers": {
+ "version": "0.5.15",
+ "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz",
+ "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==",
+ "dependencies": {
+ "tslib": "^2.8.0"
+ }
+ },
+ "node_modules/@tailwindcss/node": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.2.0.tgz",
+ "integrity": "sha512-Yv+fn/o2OmL5fh/Ir62VXItdShnUxfpkMA4Y7jdeC8O81WPB8Kf6TT6GSHvnqgSwDzlB5iT7kDpeXxLsUS0T6Q==",
+ "dependencies": {
+ "@jridgewell/remapping": "^2.3.5",
+ "enhanced-resolve": "^5.19.0",
+ "jiti": "^2.6.1",
+ "lightningcss": "1.31.1",
+ "magic-string": "^0.30.21",
+ "source-map-js": "^1.2.1",
+ "tailwindcss": "4.2.0"
+ }
+ },
+ "node_modules/@tailwindcss/oxide": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.2.0.tgz",
+ "integrity": "sha512-AZqQzADaj742oqn2xjl5JbIOzZB/DGCYF/7bpvhA8KvjUj9HJkag6bBuwZvH1ps6dfgxNHyuJVlzSr2VpMgdTQ==",
+ "engines": {
+ "node": ">= 20"
+ },
+ "optionalDependencies": {
+ "@tailwindcss/oxide-android-arm64": "4.2.0",
+ "@tailwindcss/oxide-darwin-arm64": "4.2.0",
+ "@tailwindcss/oxide-darwin-x64": "4.2.0",
+ "@tailwindcss/oxide-freebsd-x64": "4.2.0",
+ "@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.0",
+ "@tailwindcss/oxide-linux-arm64-gnu": "4.2.0",
+ "@tailwindcss/oxide-linux-arm64-musl": "4.2.0",
+ "@tailwindcss/oxide-linux-x64-gnu": "4.2.0",
+ "@tailwindcss/oxide-linux-x64-musl": "4.2.0",
+ "@tailwindcss/oxide-wasm32-wasi": "4.2.0",
+ "@tailwindcss/oxide-win32-arm64-msvc": "4.2.0",
+ "@tailwindcss/oxide-win32-x64-msvc": "4.2.0"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-android-arm64": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.2.0.tgz",
+ "integrity": "sha512-F0QkHAVaW/JNBWl4CEKWdZ9PMb0khw5DCELAOnu+RtjAfx5Zgw+gqCHFvqg3AirU1IAd181fwOtJQ5I8Yx5wtw==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-darwin-arm64": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.2.0.tgz",
+ "integrity": "sha512-I0QylkXsBsJMZ4nkUNSR04p6+UptjcwhcVo3Zu828ikiEqHjVmQL9RuQ6uT/cVIiKpvtVA25msu/eRV97JeNSA==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-darwin-x64": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.2.0.tgz",
+ "integrity": "sha512-6TmQIn4p09PBrmnkvbYQ0wbZhLtbaksCDx7Y7R3FYYx0yxNA7xg5KP7dowmQ3d2JVdabIHvs3Hx4K3d5uCf8xg==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-freebsd-x64": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.2.0.tgz",
+ "integrity": "sha512-qBudxDvAa2QwGlq9y7VIzhTvp2mLJ6nD/G8/tI70DCDoneaUeLWBJaPcbfzqRIWraj+o969aDQKvKW9dvkUizw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.2.0.tgz",
+ "integrity": "sha512-7XKkitpy5NIjFZNUQPeUyNJNJn1CJeV7rmMR+exHfTuOsg8rxIO9eNV5TSEnqRcaOK77zQpsyUkBWmPy8FgdSg==",
+ "cpu": [
+ "arm"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-linux-arm64-gnu": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.2.0.tgz",
+ "integrity": "sha512-Mff5a5Q3WoQR01pGU1gr29hHM1N93xYrKkGXfPw/aRtK4bOc331Ho4Tgfsm5WDGvpevqMpdlkCojT3qlCQbCpA==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-linux-arm64-musl": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.2.0.tgz",
+ "integrity": "sha512-XKcSStleEVnbH6W/9DHzZv1YhjE4eSS6zOu2eRtYAIh7aV4o3vIBs+t/B15xlqoxt6ef/0uiqJVB6hkHjWD/0A==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-linux-x64-gnu": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.2.0.tgz",
+ "integrity": "sha512-/hlXCBqn9K6fi7eAM0RsobHwJYa5V/xzWspVTzxnX+Ft9v6n+30Pz8+RxCn7sQL/vRHHLS30iQPrHQunu6/vJA==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-linux-x64-musl": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.2.0.tgz",
+ "integrity": "sha512-lKUaygq4G7sWkhQbfdRRBkaq4LY39IriqBQ+Gk6l5nKq6Ay2M2ZZb1tlIyRNgZKS8cbErTwuYSor0IIULC0SHw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-wasm32-wasi": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.2.0.tgz",
+ "integrity": "sha512-xuDjhAsFdUuFP5W9Ze4k/o4AskUtI8bcAGU4puTYprr89QaYFmhYOPfP+d1pH+k9ets6RoE23BXZM1X1jJqoyw==",
+ "bundleDependencies": [
+ "@napi-rs/wasm-runtime",
+ "@emnapi/core",
+ "@emnapi/runtime",
+ "@tybys/wasm-util",
+ "@emnapi/wasi-threads",
+ "tslib"
+ ],
+ "cpu": [
+ "wasm32"
+ ],
+ "optional": true,
+ "dependencies": {
+ "@emnapi/core": "^1.8.1",
+ "@emnapi/runtime": "^1.8.1",
+ "@emnapi/wasi-threads": "^1.1.0",
+ "@napi-rs/wasm-runtime": "^1.1.1",
+ "@tybys/wasm-util": "^0.10.1",
+ "tslib": "^2.8.1"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.0.tgz",
+ "integrity": "sha512-2UU/15y1sWDEDNJXxEIrfWKC2Yb4YgIW5Xz2fKFqGzFWfoMHWFlfa1EJlGO2Xzjkq/tvSarh9ZTjvbxqWvLLXA==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/oxide-win32-x64-msvc": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.2.0.tgz",
+ "integrity": "sha512-CrFadmFoc+z76EV6LPG1jx6XceDsaCG3lFhyLNo/bV9ByPrE+FnBPckXQVP4XRkN76h3Fjt/a+5Er/oA/nCBvQ==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/@tailwindcss/postcss": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.2.0.tgz",
+ "integrity": "sha512-u6YBacGpOm/ixPfKqfgrJEjMfrYmPD7gEFRoygS/hnQaRtV0VCBdpkx5Ouw9pnaLRwwlgGCuJw8xLpaR0hOrQg==",
+ "dependencies": {
+ "@alloc/quick-lru": "^5.2.0",
+ "@tailwindcss/node": "4.2.0",
+ "@tailwindcss/oxide": "4.2.0",
+ "postcss": "^8.5.6",
+ "tailwindcss": "4.2.0"
+ }
+ },
+ "node_modules/@tanstack/react-virtual": {
+ "version": "3.13.18",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.18.tgz",
+ "integrity": "sha512-dZkhyfahpvlaV0rIKnvQiVoWPyURppl6w4m9IwMDpuIjcJ1sD9YGWrt0wISvgU7ewACXx2Ct46WPgI6qAD4v6A==",
+ "dependencies": {
+ "@tanstack/virtual-core": "3.13.18"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/@tanstack/virtual-core": {
+ "version": "3.13.18",
+ "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.18.tgz",
+ "integrity": "sha512-Mx86Hqu1k39icq2Zusq+Ey2J6dDWTjDvEv43PJtRCoEYTLyfaPnxIQ6iy7YAOK0NV/qOEmZQ/uCufrppZxTgcg==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ }
+ },
+ "node_modules/@theguild/remark-npm2yarn": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/@theguild/remark-npm2yarn/-/remark-npm2yarn-0.3.3.tgz",
+ "integrity": "sha512-ma6DvR03gdbvwqfKx1omqhg9May/VYGdMHvTzB4VuxkyS7KzfZ/lzrj43hmcsggpMje0x7SADA/pcMph0ejRnA==",
+ "dependencies": {
+ "npm-to-yarn": "^3.0.0",
+ "unist-util-visit": "^5.0.0"
+ }
+ },
+ "node_modules/@types/d3": {
+ "version": "7.4.3",
+ "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz",
+ "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/d3-axis": "*",
+ "@types/d3-brush": "*",
+ "@types/d3-chord": "*",
+ "@types/d3-color": "*",
+ "@types/d3-contour": "*",
+ "@types/d3-delaunay": "*",
+ "@types/d3-dispatch": "*",
+ "@types/d3-drag": "*",
+ "@types/d3-dsv": "*",
+ "@types/d3-ease": "*",
+ "@types/d3-fetch": "*",
+ "@types/d3-force": "*",
+ "@types/d3-format": "*",
+ "@types/d3-geo": "*",
+ "@types/d3-hierarchy": "*",
+ "@types/d3-interpolate": "*",
+ "@types/d3-path": "*",
+ "@types/d3-polygon": "*",
+ "@types/d3-quadtree": "*",
+ "@types/d3-random": "*",
+ "@types/d3-scale": "*",
+ "@types/d3-scale-chromatic": "*",
+ "@types/d3-selection": "*",
+ "@types/d3-shape": "*",
+ "@types/d3-time": "*",
+ "@types/d3-time-format": "*",
+ "@types/d3-timer": "*",
+ "@types/d3-transition": "*",
+ "@types/d3-zoom": "*"
+ }
+ },
+ "node_modules/@types/d3-array": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz",
+ "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw=="
+ },
+ "node_modules/@types/d3-axis": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz",
+ "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-brush": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz",
+ "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-chord": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz",
+ "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg=="
+ },
+ "node_modules/@types/d3-color": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
+ "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A=="
+ },
+ "node_modules/@types/d3-contour": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz",
+ "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-delaunay": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz",
+ "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw=="
+ },
+ "node_modules/@types/d3-dispatch": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.7.tgz",
+ "integrity": "sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA=="
+ },
+ "node_modules/@types/d3-drag": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz",
+ "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-dsv": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz",
+ "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g=="
+ },
+ "node_modules/@types/d3-ease": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz",
+ "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA=="
+ },
+ "node_modules/@types/d3-fetch": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz",
+ "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==",
+ "dependencies": {
+ "@types/d3-dsv": "*"
+ }
+ },
+ "node_modules/@types/d3-force": {
+ "version": "3.0.10",
+ "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz",
+ "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw=="
+ },
+ "node_modules/@types/d3-format": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz",
+ "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g=="
+ },
+ "node_modules/@types/d3-geo": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz",
+ "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==",
+ "dependencies": {
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-hierarchy": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz",
+ "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg=="
+ },
+ "node_modules/@types/d3-interpolate": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
+ "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
+ "dependencies": {
+ "@types/d3-color": "*"
+ }
+ },
+ "node_modules/@types/d3-path": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz",
+ "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg=="
+ },
+ "node_modules/@types/d3-polygon": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz",
+ "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA=="
+ },
+ "node_modules/@types/d3-quadtree": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz",
+ "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg=="
+ },
+ "node_modules/@types/d3-random": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz",
+ "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ=="
+ },
+ "node_modules/@types/d3-scale": {
+ "version": "4.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz",
+ "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==",
+ "dependencies": {
+ "@types/d3-time": "*"
+ }
+ },
+ "node_modules/@types/d3-scale-chromatic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz",
+ "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ=="
+ },
+ "node_modules/@types/d3-selection": {
+ "version": "3.0.11",
+ "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz",
+ "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w=="
+ },
+ "node_modules/@types/d3-shape": {
+ "version": "3.1.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.8.tgz",
+ "integrity": "sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==",
+ "dependencies": {
+ "@types/d3-path": "*"
+ }
+ },
+ "node_modules/@types/d3-time": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz",
+ "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g=="
+ },
+ "node_modules/@types/d3-time-format": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz",
+ "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg=="
+ },
+ "node_modules/@types/d3-timer": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
+ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw=="
+ },
+ "node_modules/@types/d3-transition": {
+ "version": "3.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz",
+ "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-zoom": {
+ "version": "3.0.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz",
+ "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==",
+ "dependencies": {
+ "@types/d3-interpolate": "*",
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/debug": {
+ "version": "4.1.12",
+ "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
+ "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==",
+ "dependencies": {
+ "@types/ms": "*"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="
+ },
+ "node_modules/@types/estree-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz",
+ "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==",
+ "dependencies": {
+ "@types/estree": "*"
+ }
+ },
+ "node_modules/@types/geojson": {
+ "version": "7946.0.16",
+ "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz",
+ "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg=="
+ },
+ "node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/katex": {
+ "version": "0.16.8",
+ "resolved": "https://registry.npmjs.org/@types/katex/-/katex-0.16.8.tgz",
+ "integrity": "sha512-trgaNyfU+Xh2Tc+ABIb44a5AYUpicB3uwirOioeOkNPPbmgRNtcWyDeeFRzjPZENO9Vq8gvVqfhaaXWLlevVwg=="
+ },
+ "node_modules/@types/mdast": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz",
+ "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/mdx": {
+ "version": "2.0.13",
+ "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz",
+ "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw=="
+ },
+ "node_modules/@types/ms": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz",
+ "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="
+ },
+ "node_modules/@types/nlcst": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/@types/nlcst/-/nlcst-2.0.3.tgz",
+ "integrity": "sha512-vSYNSDe6Ix3q+6Z7ri9lyWqgGhJTmzRjZRqyq15N0Z/1/UnVsno9G/N40NBijoYx2seFDIl0+B2mgAb9mezUCA==",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "25.3.0",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-25.3.0.tgz",
+ "integrity": "sha512-4K3bqJpXpqfg2XKGK9bpDTc6xO/xoUP/RBWS7AtRMug6zZFaRekiLzjVtAoZMquxoAbzBvy5nxQ7veS5eYzf8A==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~7.18.0"
+ }
+ },
+ "node_modules/@types/react": {
+ "version": "19.2.14",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz",
+ "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==",
+ "devOptional": true,
+ "dependencies": {
+ "csstype": "^3.2.2"
+ }
+ },
+ "node_modules/@types/trusted-types": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz",
+ "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
+ "optional": true
+ },
+ "node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="
+ },
+ "node_modules/@typescript/vfs": {
+ "version": "1.6.4",
+ "resolved": "https://registry.npmjs.org/@typescript/vfs/-/vfs-1.6.4.tgz",
+ "integrity": "sha512-PJFXFS4ZJKiJ9Qiuix6Dz/OwEIqHD7Dme1UwZhTK11vR+5dqW2ACbdndWQexBzCx+CPuMe5WBYQWCsFyGlQLlQ==",
+ "dependencies": {
+ "debug": "^4.4.3"
+ },
+ "peerDependencies": {
+ "typescript": "*"
+ }
+ },
+ "node_modules/@ungap/structured-clone": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz",
+ "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="
+ },
+ "node_modules/@xmldom/xmldom": {
+ "version": "0.9.8",
+ "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.9.8.tgz",
+ "integrity": "sha512-p96FSY54r+WJ50FIOsCOjyj/wavs8921hG5+kVMmZgKcvIKxMXHTrjNJvRgWa/zuX3B6t2lijLNFaOyuxUH+2A==",
+ "engines": {
+ "node": ">=14.6"
+ }
+ },
+ "node_modules/acorn": {
+ "version": "8.16.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz",
+ "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==",
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-jsx": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+ "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+ "peerDependencies": {
+ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/arg": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
+ "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg=="
+ },
+ "node_modules/array-iterate": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/array-iterate/-/array-iterate-2.0.1.tgz",
+ "integrity": "sha512-I1jXZMjAgCMmxT4qxXfPXa6SthSoE8h6gkSI9BGGNv8mP8G/v0blc+qFnZu6K42vTOiuME596QaLO0TP3Lk0xg==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/astring": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz",
+ "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==",
+ "bin": {
+ "astring": "bin/astring"
+ }
+ },
+ "node_modules/autoprefixer": {
+ "version": "10.4.24",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.24.tgz",
+ "integrity": "sha512-uHZg7N9ULTVbutaIsDRoUkoS8/h3bdsmVJYZ5l3wv8Cp/6UIIoRDm90hZ+BwxUj/hGBEzLxdHNSKuFpn8WOyZw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/autoprefixer"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "browserslist": "^4.28.1",
+ "caniuse-lite": "^1.0.30001766",
+ "fraction.js": "^5.3.4",
+ "picocolors": "^1.1.1",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "bin": {
+ "autoprefixer": "bin/autoprefixer"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/bail": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
+ "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.10.0",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.0.tgz",
+ "integrity": "sha512-lIyg0szRfYbiy67j9KN8IyeD7q7hcmqnJ1ddWmNt19ItGpNN64mnllmxUNFIOdOm6by97jlL6wfpTTJrmnjWAA==",
+ "dev": true,
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.cjs"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/better-react-mathjax": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/better-react-mathjax/-/better-react-mathjax-2.3.0.tgz",
+ "integrity": "sha512-K0ceQC+jQmB+NLDogO5HCpqmYf18AU2FxDbLdduYgkHYWZApFggkHE4dIaXCV1NqeoscESYXXo1GSkY6fA295w==",
+ "dependencies": {
+ "mathjax-full": "^3.2.2"
+ },
+ "peerDependencies": {
+ "react": ">=16.8"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "dependencies": {
+ "fill-range": "^7.1.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.28.1",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz",
+ "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "baseline-browser-mapping": "^2.9.0",
+ "caniuse-lite": "^1.0.30001759",
+ "electron-to-chromium": "^1.5.263",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.2.0"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001770",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001770.tgz",
+ "integrity": "sha512-x/2CLQ1jHENRbHg5PSId2sXq1CIO1CISvwWAj027ltMVG2UNgW+w9oH2+HzgEIRFembL8bUlXtfbBHR1fCg2xw==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ]
+ },
+ "node_modules/ccount": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz",
+ "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/chalk": {
+ "version": "5.6.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz",
+ "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==",
+ "engines": {
+ "node": "^12.17.0 || ^14.13 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/character-entities": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
+ "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-html4": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz",
+ "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-legacy": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz",
+ "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-reference-invalid": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz",
+ "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/chevrotain": {
+ "version": "11.1.1",
+ "resolved": "https://registry.npmjs.org/chevrotain/-/chevrotain-11.1.1.tgz",
+ "integrity": "sha512-f0yv5CPKaFxfsPTBzX7vGuim4oIC1/gcS7LUGdBSwl2dU6+FON6LVUksdOo1qJjoUvXNn45urgh8C+0a24pACQ==",
+ "dependencies": {
+ "@chevrotain/cst-dts-gen": "11.1.1",
+ "@chevrotain/gast": "11.1.1",
+ "@chevrotain/regexp-to-ast": "11.1.1",
+ "@chevrotain/types": "11.1.1",
+ "@chevrotain/utils": "11.1.1",
+ "lodash-es": "4.17.23"
+ }
+ },
+ "node_modules/chevrotain-allstar": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/chevrotain-allstar/-/chevrotain-allstar-0.3.1.tgz",
+ "integrity": "sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==",
+ "dependencies": {
+ "lodash-es": "^4.17.21"
+ },
+ "peerDependencies": {
+ "chevrotain": "^11.0.0"
+ }
+ },
+ "node_modules/client-only": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz",
+ "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA=="
+ },
+ "node_modules/clipboardy": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/clipboardy/-/clipboardy-4.0.0.tgz",
+ "integrity": "sha512-5mOlNS0mhX0707P2I0aZ2V/cmHUEO/fL7VFLqszkhUsxt7RwnmrInf/eEQKlf5GzvYeHIjT+Ov1HRfNmymlG0w==",
+ "dependencies": {
+ "execa": "^8.0.1",
+ "is-wsl": "^3.1.0",
+ "is64bit": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/clsx": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
+ "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/collapse-white-space": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz",
+ "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/comma-separated-tokens": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz",
+ "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/commander": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz",
+ "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==",
+ "engines": {
+ "node": ">= 12"
+ }
+ },
+ "node_modules/compute-scroll-into-view": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-3.1.1.tgz",
+ "integrity": "sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw=="
+ },
+ "node_modules/confbox": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz",
+ "integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w=="
+ },
+ "node_modules/cose-base": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz",
+ "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==",
+ "dependencies": {
+ "layout-base": "^1.0.0"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/csstype": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
+ "devOptional": true
+ },
+ "node_modules/cytoscape": {
+ "version": "3.33.1",
+ "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.33.1.tgz",
+ "integrity": "sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==",
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/cytoscape-cose-bilkent": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz",
+ "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==",
+ "dependencies": {
+ "cose-base": "^1.0.0"
+ },
+ "peerDependencies": {
+ "cytoscape": "^3.2.0"
+ }
+ },
+ "node_modules/cytoscape-fcose": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz",
+ "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==",
+ "dependencies": {
+ "cose-base": "^2.2.0"
+ },
+ "peerDependencies": {
+ "cytoscape": "^3.2.0"
+ }
+ },
+ "node_modules/cytoscape-fcose/node_modules/cose-base": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz",
+ "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==",
+ "dependencies": {
+ "layout-base": "^2.0.0"
+ }
+ },
+ "node_modules/cytoscape-fcose/node_modules/layout-base": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz",
+ "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg=="
+ },
+ "node_modules/d3": {
+ "version": "7.9.0",
+ "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz",
+ "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==",
+ "dependencies": {
+ "d3-array": "3",
+ "d3-axis": "3",
+ "d3-brush": "3",
+ "d3-chord": "3",
+ "d3-color": "3",
+ "d3-contour": "4",
+ "d3-delaunay": "6",
+ "d3-dispatch": "3",
+ "d3-drag": "3",
+ "d3-dsv": "3",
+ "d3-ease": "3",
+ "d3-fetch": "3",
+ "d3-force": "3",
+ "d3-format": "3",
+ "d3-geo": "3",
+ "d3-hierarchy": "3",
+ "d3-interpolate": "3",
+ "d3-path": "3",
+ "d3-polygon": "3",
+ "d3-quadtree": "3",
+ "d3-random": "3",
+ "d3-scale": "4",
+ "d3-scale-chromatic": "3",
+ "d3-selection": "3",
+ "d3-shape": "3",
+ "d3-time": "3",
+ "d3-time-format": "4",
+ "d3-timer": "3",
+ "d3-transition": "3",
+ "d3-zoom": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-array": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
+ "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
+ "dependencies": {
+ "internmap": "1 - 2"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-axis": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz",
+ "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-brush": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz",
+ "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "3",
+ "d3-transition": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-chord": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz",
+ "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==",
+ "dependencies": {
+ "d3-path": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-contour": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz",
+ "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==",
+ "dependencies": {
+ "d3-array": "^3.2.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-delaunay": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz",
+ "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==",
+ "dependencies": {
+ "delaunator": "5"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-drag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz",
+ "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-selection": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dsv": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz",
+ "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==",
+ "dependencies": {
+ "commander": "7",
+ "iconv-lite": "0.6",
+ "rw": "1"
+ },
+ "bin": {
+ "csv2json": "bin/dsv2json.js",
+ "csv2tsv": "bin/dsv2dsv.js",
+ "dsv2dsv": "bin/dsv2dsv.js",
+ "dsv2json": "bin/dsv2json.js",
+ "json2csv": "bin/json2dsv.js",
+ "json2dsv": "bin/json2dsv.js",
+ "json2tsv": "bin/json2dsv.js",
+ "tsv2csv": "bin/dsv2dsv.js",
+ "tsv2json": "bin/dsv2json.js"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dsv/node_modules/commander": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
+ "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==",
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-fetch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz",
+ "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==",
+ "dependencies": {
+ "d3-dsv": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-force": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz",
+ "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-quadtree": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-format": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.2.tgz",
+ "integrity": "sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-geo": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz",
+ "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==",
+ "dependencies": {
+ "d3-array": "2.5.0 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-hierarchy": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz",
+ "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-path": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
+ "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-polygon": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz",
+ "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-quadtree": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz",
+ "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-random": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz",
+ "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-sankey": {
+ "version": "0.12.3",
+ "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz",
+ "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==",
+ "dependencies": {
+ "d3-array": "1 - 2",
+ "d3-shape": "^1.2.0"
+ }
+ },
+ "node_modules/d3-sankey/node_modules/d3-array": {
+ "version": "2.12.1",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz",
+ "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==",
+ "dependencies": {
+ "internmap": "^1.0.0"
+ }
+ },
+ "node_modules/d3-sankey/node_modules/d3-path": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz",
+ "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg=="
+ },
+ "node_modules/d3-sankey/node_modules/d3-shape": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz",
+ "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==",
+ "dependencies": {
+ "d3-path": "1"
+ }
+ },
+ "node_modules/d3-sankey/node_modules/internmap": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz",
+ "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw=="
+ },
+ "node_modules/d3-scale": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
+ "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
+ "dependencies": {
+ "d3-array": "2.10.0 - 3",
+ "d3-format": "1 - 3",
+ "d3-interpolate": "1.2.0 - 3",
+ "d3-time": "2.1.1 - 3",
+ "d3-time-format": "2 - 4"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-scale-chromatic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz",
+ "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-interpolate": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-selection": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
+ "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-shape": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
+ "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
+ "dependencies": {
+ "d3-path": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
+ "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
+ "dependencies": {
+ "d3-array": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time-format": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
+ "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
+ "dependencies": {
+ "d3-time": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-transition": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz",
+ "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-dispatch": "1 - 3",
+ "d3-ease": "1 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "peerDependencies": {
+ "d3-selection": "2 - 3"
+ }
+ },
+ "node_modules/d3-zoom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz",
+ "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "2 - 3",
+ "d3-transition": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/dagre-d3-es": {
+ "version": "7.0.13",
+ "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.13.tgz",
+ "integrity": "sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==",
+ "dependencies": {
+ "d3": "^7.9.0",
+ "lodash-es": "^4.17.21"
+ }
+ },
+ "node_modules/dayjs": {
+ "version": "1.11.19",
+ "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.19.tgz",
+ "integrity": "sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw=="
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decode-named-character-reference": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz",
+ "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==",
+ "dependencies": {
+ "character-entities": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/delaunator": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz",
+ "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==",
+ "dependencies": {
+ "robust-predicates": "^3.0.2"
+ }
+ },
+ "node_modules/dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/detect-libc": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz",
+ "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/devlop": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz",
+ "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==",
+ "dependencies": {
+ "dequal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/dompurify": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.1.tgz",
+ "integrity": "sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==",
+ "optionalDependencies": {
+ "@types/trusted-types": "^2.0.7"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.286",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.286.tgz",
+ "integrity": "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==",
+ "dev": true
+ },
+ "node_modules/emoji-regex-xs": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex-xs/-/emoji-regex-xs-1.0.0.tgz",
+ "integrity": "sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg=="
+ },
+ "node_modules/enhanced-resolve": {
+ "version": "5.19.0",
+ "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.19.0.tgz",
+ "integrity": "sha512-phv3E1Xl4tQOShqSte26C7Fl84EwUdZsyOuSSk9qtAGyyQs2s3jJzComh+Abf4g187lUUAvH+H26omrqia2aGg==",
+ "dependencies": {
+ "graceful-fs": "^4.2.4",
+ "tapable": "^2.3.0"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/entities": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz",
+ "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==",
+ "engines": {
+ "node": ">=0.12"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/esast-util-from-estree": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz",
+ "integrity": "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-visit": "^2.0.0",
+ "unist-util-position-from-estree": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/esast-util-from-js": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz",
+ "integrity": "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "acorn": "^8.0.0",
+ "esast-util-from-estree": "^2.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
+ "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/esm": {
+ "version": "3.2.25",
+ "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz",
+ "integrity": "sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/estree-util-attach-comments": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz",
+ "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==",
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-build-jsx": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz",
+ "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "estree-walker": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-is-identifier-name": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz",
+ "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-scope": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-scope/-/estree-util-scope-1.0.0.tgz",
+ "integrity": "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "devlop": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-to-js": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz",
+ "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "astring": "^1.8.0",
+ "source-map": "^0.7.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-value-to-estree": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.5.0.tgz",
+ "integrity": "sha512-aMV56R27Gv3QmfmF1MY12GWkGzzeAezAX+UplqHVASfjc9wNzI/X6hC0S9oxq61WT4aQesLGslWP9tKk6ghRZQ==",
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/remcohaszing"
+ }
+ },
+ "node_modules/estree-util-visit": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz",
+ "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-walker": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
+ "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ }
+ },
+ "node_modules/execa": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz",
+ "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==",
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^8.0.1",
+ "human-signals": "^5.0.0",
+ "is-stream": "^3.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^5.1.0",
+ "onetime": "^6.0.0",
+ "signal-exit": "^4.1.0",
+ "strip-final-newline": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=16.17"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/execa?sponsor=1"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+ },
+ "node_modules/fast-glob": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
+ "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==",
+ "dependencies": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.8"
+ },
+ "engines": {
+ "node": ">=8.6.0"
+ }
+ },
+ "node_modules/fastq": {
+ "version": "1.20.1",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz",
+ "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==",
+ "dependencies": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "node_modules/fault": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz",
+ "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==",
+ "dependencies": {
+ "format": "^0.2.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/format": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz",
+ "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==",
+ "engines": {
+ "node": ">=0.4.x"
+ }
+ },
+ "node_modules/fraction.js": {
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz",
+ "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==",
+ "dev": true,
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/rawify"
+ }
+ },
+ "node_modules/framer-motion": {
+ "version": "12.34.2",
+ "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.34.2.tgz",
+ "integrity": "sha512-CcnYTzbRybm1/OE8QLXfXI8gR1cx5T4dF3D2kn5IyqsGNeLAKl2iFHb2BzFyXBGqESntDt6rPYl4Jhrb7tdB8g==",
+ "dependencies": {
+ "motion-dom": "^12.34.2",
+ "motion-utils": "^12.29.2",
+ "tslib": "^2.4.0"
+ },
+ "peerDependencies": {
+ "@emotion/is-prop-valid": "*",
+ "react": "^18.0.0 || ^19.0.0",
+ "react-dom": "^18.0.0 || ^19.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@emotion/is-prop-valid": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ },
+ "react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/get-stream": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz",
+ "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==",
+ "engines": {
+ "node": ">=16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/github-slugger": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz",
+ "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="
+ },
+ "node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.11",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
+ },
+ "node_modules/hachure-fill": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/hachure-fill/-/hachure-fill-0.5.2.tgz",
+ "integrity": "sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg=="
+ },
+ "node_modules/hast-util-from-dom": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-from-dom/-/hast-util-from-dom-5.0.1.tgz",
+ "integrity": "sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "hastscript": "^9.0.0",
+ "web-namespaces": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-html": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/hast-util-from-html/-/hast-util-from-html-2.0.3.tgz",
+ "integrity": "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "devlop": "^1.1.0",
+ "hast-util-from-parse5": "^8.0.0",
+ "parse5": "^7.0.0",
+ "vfile": "^6.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-html-isomorphic": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-from-html-isomorphic/-/hast-util-from-html-isomorphic-2.0.0.tgz",
+ "integrity": "sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "hast-util-from-dom": "^5.0.0",
+ "hast-util-from-html": "^2.0.0",
+ "unist-util-remove-position": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-parse5": {
+ "version": "8.0.3",
+ "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz",
+ "integrity": "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "devlop": "^1.0.0",
+ "hastscript": "^9.0.0",
+ "property-information": "^7.0.0",
+ "vfile": "^6.0.0",
+ "vfile-location": "^5.0.0",
+ "web-namespaces": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-is-element": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz",
+ "integrity": "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-parse-selector": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz",
+ "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-raw": {
+ "version": "9.1.0",
+ "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz",
+ "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "@ungap/structured-clone": "^1.0.0",
+ "hast-util-from-parse5": "^8.0.0",
+ "hast-util-to-parse5": "^8.0.0",
+ "html-void-elements": "^3.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "parse5": "^7.0.0",
+ "unist-util-position": "^5.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0",
+ "web-namespaces": "^2.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-estree": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.3.tgz",
+ "integrity": "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-attach-comments": "^3.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "hast-util-whitespace": "^3.0.0",
+ "mdast-util-mdx-expression": "^2.0.0",
+ "mdast-util-mdx-jsx": "^3.0.0",
+ "mdast-util-mdxjs-esm": "^2.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "style-to-js": "^1.0.0",
+ "unist-util-position": "^5.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-html": {
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.5.tgz",
+ "integrity": "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "ccount": "^2.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "hast-util-whitespace": "^3.0.0",
+ "html-void-elements": "^3.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "stringify-entities": "^4.0.0",
+ "zwitch": "^2.0.4"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-jsx-runtime": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz",
+ "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "hast-util-whitespace": "^3.0.0",
+ "mdast-util-mdx-expression": "^2.0.0",
+ "mdast-util-mdx-jsx": "^3.0.0",
+ "mdast-util-mdxjs-esm": "^2.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "style-to-js": "^1.0.0",
+ "unist-util-position": "^5.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-parse5": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.1.tgz",
+ "integrity": "sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "devlop": "^1.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "web-namespaces": "^2.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-string": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-to-string/-/hast-util-to-string-3.0.1.tgz",
+ "integrity": "sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A==",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-text": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz",
+ "integrity": "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "hast-util-is-element": "^3.0.0",
+ "unist-util-find-after": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-whitespace": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz",
+ "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hastscript": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz",
+ "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "hast-util-parse-selector": "^4.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/html-void-elements": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz",
+ "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/human-signals": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz",
+ "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==",
+ "engines": {
+ "node": ">=16.17.0"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/inline-style-parser": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz",
+ "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA=="
+ },
+ "node_modules/internmap": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
+ "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/is-alphabetical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz",
+ "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-alphanumerical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz",
+ "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==",
+ "dependencies": {
+ "is-alphabetical": "^2.0.0",
+ "is-decimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-decimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz",
+ "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-docker": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz",
+ "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==",
+ "bin": {
+ "is-docker": "cli.js"
+ },
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-hexadecimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz",
+ "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-inside-container": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz",
+ "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==",
+ "dependencies": {
+ "is-docker": "^3.0.0"
+ },
+ "bin": {
+ "is-inside-container": "cli.js"
+ },
+ "engines": {
+ "node": ">=14.16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
+ "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-stream": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
+ "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-wsl": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.1.tgz",
+ "integrity": "sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw==",
+ "dependencies": {
+ "is-inside-container": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is64bit": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is64bit/-/is64bit-2.0.0.tgz",
+ "integrity": "sha512-jv+8jaWCl0g2lSBkNSVXdzfBA0npK1HGC2KtWM9FumFRoGS94g3NbCCLVnCYHLjp4GrW2KZeeSTMo5ddtznmGw==",
+ "dependencies": {
+ "system-architecture": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
+ },
+ "node_modules/jiti": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz",
+ "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==",
+ "bin": {
+ "jiti": "lib/jiti-cli.mjs"
+ }
+ },
+ "node_modules/katex": {
+ "version": "0.16.28",
+ "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.28.tgz",
+ "integrity": "sha512-YHzO7721WbmAL6Ov1uzN/l5mY5WWWhJBSW+jq4tkfZfsxmo1hu6frS0EOswvjBUnWE6NtjEs48SFn5CQESRLZg==",
+ "funding": [
+ "https://opencollective.com/katex",
+ "https://github.com/sponsors/katex"
+ ],
+ "dependencies": {
+ "commander": "^8.3.0"
+ },
+ "bin": {
+ "katex": "cli.js"
+ }
+ },
+ "node_modules/khroma": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz",
+ "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw=="
+ },
+ "node_modules/langium": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/langium/-/langium-4.2.1.tgz",
+ "integrity": "sha512-zu9QWmjpzJcomzdJQAHgDVhLGq5bLosVak1KVa40NzQHXfqr4eAHupvnPOVXEoLkg6Ocefvf/93d//SB7du4YQ==",
+ "dependencies": {
+ "chevrotain": "~11.1.1",
+ "chevrotain-allstar": "~0.3.1",
+ "vscode-languageserver": "~9.0.1",
+ "vscode-languageserver-textdocument": "~1.0.11",
+ "vscode-uri": "~3.1.0"
+ },
+ "engines": {
+ "node": ">=20.10.0",
+ "npm": ">=10.2.3"
+ }
+ },
+ "node_modules/layout-base": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz",
+ "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg=="
+ },
+ "node_modules/lightningcss": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.31.1.tgz",
+ "integrity": "sha512-l51N2r93WmGUye3WuFoN5k10zyvrVs0qfKBhyC5ogUQ6Ew6JUSswh78mbSO+IU3nTWsyOArqPCcShdQSadghBQ==",
+ "dependencies": {
+ "detect-libc": "^2.0.3"
+ },
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ },
+ "optionalDependencies": {
+ "lightningcss-android-arm64": "1.31.1",
+ "lightningcss-darwin-arm64": "1.31.1",
+ "lightningcss-darwin-x64": "1.31.1",
+ "lightningcss-freebsd-x64": "1.31.1",
+ "lightningcss-linux-arm-gnueabihf": "1.31.1",
+ "lightningcss-linux-arm64-gnu": "1.31.1",
+ "lightningcss-linux-arm64-musl": "1.31.1",
+ "lightningcss-linux-x64-gnu": "1.31.1",
+ "lightningcss-linux-x64-musl": "1.31.1",
+ "lightningcss-win32-arm64-msvc": "1.31.1",
+ "lightningcss-win32-x64-msvc": "1.31.1"
+ }
+ },
+ "node_modules/lightningcss-android-arm64": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.31.1.tgz",
+ "integrity": "sha512-HXJF3x8w9nQ4jbXRiNppBCqeZPIAfUo8zE/kOEGbW5NZvGc/K7nMxbhIr+YlFlHW5mpbg/YFPdbnCh1wAXCKFg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-darwin-arm64": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.31.1.tgz",
+ "integrity": "sha512-02uTEqf3vIfNMq3h/z2cJfcOXnQ0GRwQrkmPafhueLb2h7mqEidiCzkE4gBMEH65abHRiQvhdcQ+aP0D0g67sg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-darwin-x64": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.31.1.tgz",
+ "integrity": "sha512-1ObhyoCY+tGxtsz1lSx5NXCj3nirk0Y0kB/g8B8DT+sSx4G9djitg9ejFnjb3gJNWo7qXH4DIy2SUHvpoFwfTA==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-freebsd-x64": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.31.1.tgz",
+ "integrity": "sha512-1RINmQKAItO6ISxYgPwszQE1BrsVU5aB45ho6O42mu96UiZBxEXsuQ7cJW4zs4CEodPUioj/QrXW1r9pLUM74A==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-linux-arm-gnueabihf": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.31.1.tgz",
+ "integrity": "sha512-OOCm2//MZJ87CdDK62rZIu+aw9gBv4azMJuA8/KB74wmfS3lnC4yoPHm0uXZ/dvNNHmnZnB8XLAZzObeG0nS1g==",
+ "cpu": [
+ "arm"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-linux-arm64-gnu": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.31.1.tgz",
+ "integrity": "sha512-WKyLWztD71rTnou4xAD5kQT+982wvca7E6QoLpoawZ1gP9JM0GJj4Tp5jMUh9B3AitHbRZ2/H3W5xQmdEOUlLg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-linux-arm64-musl": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.31.1.tgz",
+ "integrity": "sha512-mVZ7Pg2zIbe3XlNbZJdjs86YViQFoJSpc41CbVmKBPiGmC4YrfeOyz65ms2qpAobVd7WQsbW4PdsSJEMymyIMg==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-linux-x64-gnu": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.31.1.tgz",
+ "integrity": "sha512-xGlFWRMl+0KvUhgySdIaReQdB4FNudfUTARn7q0hh/V67PVGCs3ADFjw+6++kG1RNd0zdGRlEKa+T13/tQjPMA==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-linux-x64-musl": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.31.1.tgz",
+ "integrity": "sha512-eowF8PrKHw9LpoZii5tdZwnBcYDxRw2rRCyvAXLi34iyeYfqCQNA9rmUM0ce62NlPhCvof1+9ivRaTY6pSKDaA==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-win32-arm64-msvc": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.31.1.tgz",
+ "integrity": "sha512-aJReEbSEQzx1uBlQizAOBSjcmr9dCdL3XuC/6HLXAxmtErsj2ICo5yYggg1qOODQMtnjNQv2UHb9NpOuFtYe4w==",
+ "cpu": [
+ "arm64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lightningcss-win32-x64-msvc": {
+ "version": "1.31.1",
+ "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.31.1.tgz",
+ "integrity": "sha512-I9aiFrbd7oYHwlnQDqr1Roz+fTz61oDDJX7n9tYF9FJymH1cIN1DtKw3iYt6b8WZgEjoNwVSncwF4wx/ZedMhw==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
+ }
+ },
+ "node_modules/lodash-es": {
+ "version": "4.17.23",
+ "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.23.tgz",
+ "integrity": "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="
+ },
+ "node_modules/longest-streak": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
+ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/lucide-react": {
+ "version": "0.574.0",
+ "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.574.0.tgz",
+ "integrity": "sha512-dJ8xb5juiZVIbdSn3HTyHsjjIwUwZ4FNwV0RtYDScOyySOeie1oXZTymST6YPJ4Qwt3Po8g4quhYl4OxtACiuQ==",
+ "peerDependencies": {
+ "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/magic-string": {
+ "version": "0.30.21",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz",
+ "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.5"
+ }
+ },
+ "node_modules/markdown-extensions": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz",
+ "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==",
+ "engines": {
+ "node": ">=16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/markdown-table": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz",
+ "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/marked": {
+ "version": "16.4.2",
+ "resolved": "https://registry.npmjs.org/marked/-/marked-16.4.2.tgz",
+ "integrity": "sha512-TI3V8YYWvkVf3KJe1dRkpnjs68JUPyEa5vjKrp1XEEJUAOaQc+Qj+L1qWbPd0SJuAdQkFU0h73sXXqwDYxsiDA==",
+ "bin": {
+ "marked": "bin/marked.js"
+ },
+ "engines": {
+ "node": ">= 20"
+ }
+ },
+ "node_modules/mathjax-full": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/mathjax-full/-/mathjax-full-3.2.2.tgz",
+ "integrity": "sha512-+LfG9Fik+OuI8SLwsiR02IVdjcnRCy5MufYLi0C3TdMT56L/pjB0alMVGgoWJF8pN9Rc7FESycZB9BMNWIid5w==",
+ "deprecated": "Version 4 replaces this package with the scoped package @mathjax/src",
+ "dependencies": {
+ "esm": "^3.2.25",
+ "mhchemparser": "^4.1.0",
+ "mj-context-menu": "^0.6.1",
+ "speech-rule-engine": "^4.0.6"
+ }
+ },
+ "node_modules/mdast-util-find-and-replace": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz",
+ "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "escape-string-regexp": "^5.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-from-markdown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz",
+ "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark": "^4.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-frontmatter": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz",
+ "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "escape-string-regexp": "^5.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "micromark-extension-frontmatter": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz",
+ "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==",
+ "dependencies": {
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-gfm-autolink-literal": "^2.0.0",
+ "mdast-util-gfm-footnote": "^2.0.0",
+ "mdast-util-gfm-strikethrough": "^2.0.0",
+ "mdast-util-gfm-table": "^2.0.0",
+ "mdast-util-gfm-task-list-item": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-autolink-literal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz",
+ "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "ccount": "^2.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-find-and-replace": "^3.0.0",
+ "micromark-util-character": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-footnote": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz",
+ "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.1.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-strikethrough": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz",
+ "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-table": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz",
+ "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "markdown-table": "^3.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-task-list-item": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz",
+ "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-math": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-math/-/mdast-util-math-3.0.0.tgz",
+ "integrity": "sha512-Tl9GBNeG/AhJnQM221bJR2HPvLOSnLE/T9cJI9tlc6zwQk2nPk/4f0cHkOdEixQPC/j8UtKDdITswvLAy1OZ1w==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.1.0",
+ "unist-util-remove-position": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz",
+ "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==",
+ "dependencies": {
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-mdx-expression": "^2.0.0",
+ "mdast-util-mdx-jsx": "^3.0.0",
+ "mdast-util-mdxjs-esm": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-expression": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz",
+ "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz",
+ "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "ccount": "^2.0.0",
+ "devlop": "^1.1.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "parse-entities": "^4.0.0",
+ "stringify-entities": "^4.0.0",
+ "unist-util-stringify-position": "^4.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdxjs-esm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz",
+ "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-phrasing": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz",
+ "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast": {
+ "version": "13.2.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz",
+ "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@ungap/structured-clone": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "trim-lines": "^3.0.0",
+ "unist-util-position": "^5.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz",
+ "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-phrasing": "^4.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "unist-util-visit": "^5.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz",
+ "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="
+ },
+ "node_modules/merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/mermaid": {
+ "version": "11.12.3",
+ "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.12.3.tgz",
+ "integrity": "sha512-wN5ZSgJQIC+CHJut9xaKWsknLxaFBwCPwPkGTSUYrTiHORWvpT8RxGk849HPnpUAQ+/9BPRqYb80jTpearrHzQ==",
+ "dependencies": {
+ "@braintree/sanitize-url": "^7.1.1",
+ "@iconify/utils": "^3.0.1",
+ "@mermaid-js/parser": "^1.0.0",
+ "@types/d3": "^7.4.3",
+ "cytoscape": "^3.29.3",
+ "cytoscape-cose-bilkent": "^4.1.0",
+ "cytoscape-fcose": "^2.2.0",
+ "d3": "^7.9.0",
+ "d3-sankey": "^0.12.3",
+ "dagre-d3-es": "7.0.13",
+ "dayjs": "^1.11.18",
+ "dompurify": "^3.2.5",
+ "katex": "^0.16.22",
+ "khroma": "^2.1.0",
+ "lodash-es": "^4.17.23",
+ "marked": "^16.2.1",
+ "roughjs": "^4.6.6",
+ "stylis": "^4.3.6",
+ "ts-dedent": "^2.2.0",
+ "uuid": "^11.1.0"
+ }
+ },
+ "node_modules/mhchemparser": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/mhchemparser/-/mhchemparser-4.2.1.tgz",
+ "integrity": "sha512-kYmyrCirqJf3zZ9t/0wGgRZ4/ZJw//VwaRVGA75C4nhE60vtnIzhl9J9ndkX/h6hxSN7pjg/cE0VxbnNM+bnDQ=="
+ },
+ "node_modules/micromark": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz",
+ "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "@types/debug": "^4.0.0",
+ "debug": "^4.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-core-commonmark": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-combine-extensions": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-core-commonmark": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz",
+ "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-factory-destination": "^2.0.0",
+ "micromark-factory-label": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-factory-title": "^2.0.0",
+ "micromark-factory-whitespace": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-html-tag-name": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-extension-frontmatter": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz",
+ "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==",
+ "dependencies": {
+ "fault": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz",
+ "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==",
+ "dependencies": {
+ "micromark-extension-gfm-autolink-literal": "^2.0.0",
+ "micromark-extension-gfm-footnote": "^2.0.0",
+ "micromark-extension-gfm-strikethrough": "^2.0.0",
+ "micromark-extension-gfm-table": "^2.0.0",
+ "micromark-extension-gfm-tagfilter": "^2.0.0",
+ "micromark-extension-gfm-task-list-item": "^2.0.0",
+ "micromark-util-combine-extensions": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-autolink-literal": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz",
+ "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-footnote": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz",
+ "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-core-commonmark": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-strikethrough": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz",
+ "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-table": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz",
+ "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-tagfilter": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz",
+ "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==",
+ "dependencies": {
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-task-list-item": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz",
+ "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-math": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-math/-/micromark-extension-math-3.1.0.tgz",
+ "integrity": "sha512-lvEqd+fHjATVs+2v/8kg9i5Q0AP2k85H0WUOwpIVvUML8BapsMvh1XAogmQjOCsLpoKRCVQqEkQBB3NhVBcsOg==",
+ "dependencies": {
+ "@types/katex": "^0.16.0",
+ "devlop": "^1.0.0",
+ "katex": "^0.16.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdx-expression": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.1.tgz",
+ "integrity": "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-factory-mdx-expression": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-events-to-acorn": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-extension-mdx-jsx": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.2.tgz",
+ "integrity": "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "micromark-factory-mdx-expression": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-events-to-acorn": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdx-md": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz",
+ "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==",
+ "dependencies": {
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdxjs": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz",
+ "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==",
+ "dependencies": {
+ "acorn": "^8.0.0",
+ "acorn-jsx": "^5.0.0",
+ "micromark-extension-mdx-expression": "^3.0.0",
+ "micromark-extension-mdx-jsx": "^3.0.0",
+ "micromark-extension-mdx-md": "^2.0.0",
+ "micromark-extension-mdxjs-esm": "^3.0.0",
+ "micromark-util-combine-extensions": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdxjs-esm": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz",
+ "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-core-commonmark": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-events-to-acorn": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unist-util-position-from-estree": "^2.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-factory-destination": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz",
+ "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-label": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz",
+ "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-mdx-expression": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.3.tgz",
+ "integrity": "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-events-to-acorn": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unist-util-position-from-estree": "^2.0.0",
+ "vfile-message": "^4.0.0"
+ }
+ },
+ "node_modules/micromark-factory-space": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz",
+ "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-title": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz",
+ "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-whitespace": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz",
+ "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-character": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz",
+ "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-chunked": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz",
+ "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-classify-character": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz",
+ "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-combine-extensions": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz",
+ "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-numeric-character-reference": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz",
+ "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-string": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz",
+ "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-encode": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz",
+ "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ]
+ },
+ "node_modules/micromark-util-events-to-acorn": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.3.tgz",
+ "integrity": "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/unist": "^3.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-visit": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "vfile-message": "^4.0.0"
+ }
+ },
+ "node_modules/micromark-util-html-tag-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz",
+ "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ]
+ },
+ "node_modules/micromark-util-normalize-identifier": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz",
+ "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-resolve-all": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz",
+ "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-sanitize-uri": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz",
+ "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-subtokenize": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz",
+ "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-symbol": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz",
+ "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ]
+ },
+ "node_modules/micromark-util-types": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz",
+ "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ]
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+ "dependencies": {
+ "braces": "^3.0.3",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/mimic-fn": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
+ "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/mj-context-menu": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/mj-context-menu/-/mj-context-menu-0.6.1.tgz",
+ "integrity": "sha512-7NO5s6n10TIV96d4g2uDpG7ZDpIhMh0QNfGdJw/W47JswFcosz457wqz/b5sAKvl12sxINGFCn80NZHKwxQEXA=="
+ },
+ "node_modules/mlly": {
+ "version": "1.8.0",
+ "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.8.0.tgz",
+ "integrity": "sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==",
+ "dependencies": {
+ "acorn": "^8.15.0",
+ "pathe": "^2.0.3",
+ "pkg-types": "^1.3.1",
+ "ufo": "^1.6.1"
+ }
+ },
+ "node_modules/motion-dom": {
+ "version": "12.34.2",
+ "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-12.34.2.tgz",
+ "integrity": "sha512-n7gknp7gHcW7DUcmet0JVPLVHmE3j9uWwDp5VbE3IkCNnW5qdu0mOhjNYzXMkrQjrgr+h6Db3EDM2QBhW2qNxQ==",
+ "dependencies": {
+ "motion-utils": "^12.29.2"
+ }
+ },
+ "node_modules/motion-utils": {
+ "version": "12.29.2",
+ "resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-12.29.2.tgz",
+ "integrity": "sha512-G3kc34H2cX2gI63RqU+cZq+zWRRPSsNIOjpdl9TN4AQwC4sgwYPl/Q/Obf/d53nOm569T0fYK+tcoSV50BWx8A=="
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/negotiator": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz",
+ "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/next": {
+ "version": "15.5.12",
+ "resolved": "https://registry.npmjs.org/next/-/next-15.5.12.tgz",
+ "integrity": "sha512-Fi/wQ4Etlrn60rz78bebG1i1SR20QxvV8tVp6iJspjLUSHcZoeUXCt+vmWoEcza85ElZzExK/jJ/F6SvtGktjA==",
+ "dependencies": {
+ "@next/env": "15.5.12",
+ "@swc/helpers": "0.5.15",
+ "caniuse-lite": "^1.0.30001579",
+ "postcss": "8.4.31",
+ "styled-jsx": "5.1.6"
+ },
+ "bin": {
+ "next": "dist/bin/next"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^19.8.0 || >= 20.0.0"
+ },
+ "optionalDependencies": {
+ "@next/swc-darwin-arm64": "15.5.12",
+ "@next/swc-darwin-x64": "15.5.12",
+ "@next/swc-linux-arm64-gnu": "15.5.12",
+ "@next/swc-linux-arm64-musl": "15.5.12",
+ "@next/swc-linux-x64-gnu": "15.5.12",
+ "@next/swc-linux-x64-musl": "15.5.12",
+ "@next/swc-win32-arm64-msvc": "15.5.12",
+ "@next/swc-win32-x64-msvc": "15.5.12",
+ "sharp": "^0.34.3"
+ },
+ "peerDependencies": {
+ "@opentelemetry/api": "^1.1.0",
+ "@playwright/test": "^1.51.1",
+ "babel-plugin-react-compiler": "*",
+ "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0",
+ "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0",
+ "sass": "^1.3.0"
+ },
+ "peerDependenciesMeta": {
+ "@opentelemetry/api": {
+ "optional": true
+ },
+ "@playwright/test": {
+ "optional": true
+ },
+ "babel-plugin-react-compiler": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/next-themes": {
+ "version": "0.4.6",
+ "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.4.6.tgz",
+ "integrity": "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==",
+ "peerDependencies": {
+ "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc"
+ }
+ },
+ "node_modules/next/node_modules/postcss": {
+ "version": "8.4.31",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
+ "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "nanoid": "^3.3.6",
+ "picocolors": "^1.0.0",
+ "source-map-js": "^1.0.2"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/nextra": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/nextra/-/nextra-4.0.0.tgz",
+ "integrity": "sha512-qcJ4cudhl7ooMWaqCtrpfkq93/OqtLzkQn98MncuHTZbjhAuqXQQz4rQqc5AI0GUX97cF//OhDgYjSEVpSRWEg==",
+ "dependencies": {
+ "@formatjs/intl-localematcher": "^0.5.4",
+ "@headlessui/react": "^2.1.2",
+ "@mdx-js/mdx": "^3.0.0",
+ "@napi-rs/simple-git": "^0.1.9",
+ "@shikijs/twoslash": "^1.0.0",
+ "@theguild/remark-mermaid": "^0.2.0",
+ "@theguild/remark-npm2yarn": "^0.3.2",
+ "better-react-mathjax": "^2.0.3",
+ "clsx": "^2.1.0",
+ "estree-util-to-js": "^2.0.0",
+ "estree-util-value-to-estree": "^3.0.1",
+ "fast-glob": "^3.3.2",
+ "github-slugger": "^2.0.0",
+ "hast-util-to-estree": "^3.1.0",
+ "katex": "^0.16.9",
+ "mdast-util-from-markdown": "^2.0.1",
+ "mdast-util-gfm": "^3.0.0",
+ "mdast-util-to-hast": "^13.2.0",
+ "negotiator": "^1.0.0",
+ "react-compiler-runtime": "0.0.0-experimental-22c6e49-20241219",
+ "react-medium-image-zoom": "^5.2.12",
+ "rehype-katex": "^7.0.0",
+ "rehype-pretty-code": "0.14.0",
+ "rehype-raw": "^7.0.0",
+ "remark-frontmatter": "^5.0.0",
+ "remark-gfm": "^4.0.0",
+ "remark-math": "^6.0.0",
+ "remark-reading-time": "^2.0.1",
+ "remark-smartypants": "^3.0.0",
+ "shiki": "^1.0.0",
+ "slash": "^5.1.0",
+ "title": "^4.0.1",
+ "unist-util-remove": "^4.0.0",
+ "unist-util-visit": "^5.0.0",
+ "yaml": "^2.3.2",
+ "zod": "^3.22.3",
+ "zod-validation-error": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "next": ">=14",
+ "react": ">=18",
+ "react-dom": ">=18"
+ }
+ },
+ "node_modules/nextra-theme-docs": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/nextra-theme-docs/-/nextra-theme-docs-4.0.0.tgz",
+ "integrity": "sha512-IgX73GjkSLureZtP9gxanm3/N5hmznyXH4VCuDFZEk1DnX/PDyN77oXoaRYqDY8XaNwZ1EPGvYGd7RcMUiiGWw==",
+ "dependencies": {
+ "@headlessui/react": "^2.1.2",
+ "clsx": "^2.1.0",
+ "next-themes": "^0.4.0",
+ "react-compiler-runtime": "0.0.0-experimental-22c6e49-20241219",
+ "scroll-into-view-if-needed": "^3.1.0",
+ "zod": "^3.22.3",
+ "zod-validation-error": "^3.0.0",
+ "zustand": "^5.0.1"
+ },
+ "peerDependencies": {
+ "next": ">=14",
+ "nextra": "4.0.0",
+ "react": ">=18",
+ "react-dom": ">=18"
+ }
+ },
+ "node_modules/nextra/node_modules/@theguild/remark-mermaid": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/@theguild/remark-mermaid/-/remark-mermaid-0.2.0.tgz",
+ "integrity": "sha512-o8n57TJy0OI4PCrNw8z6S+vpHtrwoQZzTA5Y3fL0U1NDRIoMg/78duWgEBFsCZcWM1G6zjE91yg1aKCsDwgE2Q==",
+ "dependencies": {
+ "mermaid": "^11.0.0",
+ "unist-util-visit": "^5.0.0"
+ },
+ "peerDependencies": {
+ "react": "^18.2.0"
+ }
+ },
+ "node_modules/nlcst-to-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/nlcst-to-string/-/nlcst-to-string-4.0.0.tgz",
+ "integrity": "sha512-YKLBCcUYKAg0FNlOBT6aI91qFmSiFKiluk655WzPF+DDMA02qIyy8uiRqI8QXtcFpEvll12LpL5MXqEmAZ+dcA==",
+ "dependencies": {
+ "@types/nlcst": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
+ "dev": true
+ },
+ "node_modules/npm-run-path": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz",
+ "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==",
+ "dependencies": {
+ "path-key": "^4.0.0"
+ },
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/npm-run-path/node_modules/path-key": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
+ "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/npm-to-yarn": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/npm-to-yarn/-/npm-to-yarn-3.0.1.tgz",
+ "integrity": "sha512-tt6PvKu4WyzPwWUzy/hvPFqn+uwXO0K1ZHka8az3NnrhWJDmSqI8ncWq0fkL0k/lmmi5tAC11FXwXuh0rFbt1A==",
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/nebrelbug/npm-to-yarn?sponsor=1"
+ }
+ },
+ "node_modules/onetime": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
+ "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
+ "dependencies": {
+ "mimic-fn": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/oniguruma-to-es": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-2.3.0.tgz",
+ "integrity": "sha512-bwALDxriqfKGfUufKGGepCzu9x7nJQuoRoAFp4AnwehhC2crqrDIAP/uN2qdlsAvSMpeRC3+Yzhqc7hLmle5+g==",
+ "dependencies": {
+ "emoji-regex-xs": "^1.0.0",
+ "regex": "^5.1.1",
+ "regex-recursion": "^5.1.1"
+ }
+ },
+ "node_modules/package-manager-detector": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/package-manager-detector/-/package-manager-detector-1.6.0.tgz",
+ "integrity": "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA=="
+ },
+ "node_modules/pagefind": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/pagefind/-/pagefind-1.4.0.tgz",
+ "integrity": "sha512-z2kY1mQlL4J8q5EIsQkLzQjilovKzfNVhX8De6oyE6uHpfFtyBaqUpcl/XzJC/4fjD8vBDyh1zolimIcVrCn9g==",
+ "dev": true,
+ "bin": {
+ "pagefind": "lib/runner/bin.cjs"
+ },
+ "optionalDependencies": {
+ "@pagefind/darwin-arm64": "1.4.0",
+ "@pagefind/darwin-x64": "1.4.0",
+ "@pagefind/freebsd-x64": "1.4.0",
+ "@pagefind/linux-arm64": "1.4.0",
+ "@pagefind/linux-x64": "1.4.0",
+ "@pagefind/windows-x64": "1.4.0"
+ }
+ },
+ "node_modules/parse-entities": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz",
+ "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "character-entities-legacy": "^3.0.0",
+ "character-reference-invalid": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "is-alphanumerical": "^2.0.0",
+ "is-decimal": "^2.0.0",
+ "is-hexadecimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/parse-entities/node_modules/@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="
+ },
+ "node_modules/parse-latin": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/parse-latin/-/parse-latin-7.0.0.tgz",
+ "integrity": "sha512-mhHgobPPua5kZ98EF4HWiH167JWBfl4pvAIXXdbaVohtK7a6YBOy56kvhCqduqyo/f3yrHFWmqmiMg/BkBkYYQ==",
+ "dependencies": {
+ "@types/nlcst": "^2.0.0",
+ "@types/unist": "^3.0.0",
+ "nlcst-to-string": "^4.0.0",
+ "unist-util-modify-children": "^4.0.0",
+ "unist-util-visit-children": "^3.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/parse-numeric-range": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz",
+ "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ=="
+ },
+ "node_modules/parse5": {
+ "version": "7.3.0",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz",
+ "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==",
+ "dependencies": {
+ "entities": "^6.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/inikulin/parse5?sponsor=1"
+ }
+ },
+ "node_modules/path-data-parser": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/path-data-parser/-/path-data-parser-0.1.0.tgz",
+ "integrity": "sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w=="
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pathe": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
+ "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pkg-types": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.3.1.tgz",
+ "integrity": "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==",
+ "dependencies": {
+ "confbox": "^0.1.8",
+ "mlly": "^1.7.4",
+ "pathe": "^2.0.1"
+ }
+ },
+ "node_modules/points-on-curve": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/points-on-curve/-/points-on-curve-0.2.0.tgz",
+ "integrity": "sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A=="
+ },
+ "node_modules/points-on-path": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/points-on-path/-/points-on-path-0.2.1.tgz",
+ "integrity": "sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==",
+ "dependencies": {
+ "path-data-parser": "0.1.0",
+ "points-on-curve": "0.2.0"
+ }
+ },
+ "node_modules/postcss": {
+ "version": "8.5.6",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
+ "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "nanoid": "^3.3.11",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/postcss-value-parser": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
+ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==",
+ "dev": true
+ },
+ "node_modules/property-information": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
+ "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/react": {
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz",
+ "integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-compiler-runtime": {
+ "version": "0.0.0-experimental-22c6e49-20241219",
+ "resolved": "https://registry.npmjs.org/react-compiler-runtime/-/react-compiler-runtime-0.0.0-experimental-22c6e49-20241219.tgz",
+ "integrity": "sha512-bOAGaRL1ldfIIpbDsl+uV025Ta6RS6/cOjvvh8r2Vo7KtqB+RSvihVYRsWQz7ECKNPWdq5MClS845acwAwieDw==",
+ "peerDependencies": {
+ "react": "^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz",
+ "integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==",
+ "dependencies": {
+ "scheduler": "^0.27.0"
+ },
+ "peerDependencies": {
+ "react": "^19.2.3"
+ }
+ },
+ "node_modules/react-medium-image-zoom": {
+ "version": "5.4.0",
+ "resolved": "https://registry.npmjs.org/react-medium-image-zoom/-/react-medium-image-zoom-5.4.0.tgz",
+ "integrity": "sha512-BsE+EnFVQzFIlyuuQrZ9iTwyKpKkqdFZV1ImEQN573QPqGrIUuNni7aF+sZwDcxlsuOMayCr6oO/PZR/yJnbRg==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/rpearce"
+ }
+ ],
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/reading-time": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz",
+ "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg=="
+ },
+ "node_modules/recma-build-jsx": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz",
+ "integrity": "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "estree-util-build-jsx": "^3.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/recma-jsx": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/recma-jsx/-/recma-jsx-1.0.1.tgz",
+ "integrity": "sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==",
+ "dependencies": {
+ "acorn-jsx": "^5.0.0",
+ "estree-util-to-js": "^2.0.0",
+ "recma-parse": "^1.0.0",
+ "recma-stringify": "^1.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ },
+ "peerDependencies": {
+ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/recma-parse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/recma-parse/-/recma-parse-1.0.0.tgz",
+ "integrity": "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "esast-util-from-js": "^2.0.0",
+ "unified": "^11.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/recma-stringify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/recma-stringify/-/recma-stringify-1.0.0.tgz",
+ "integrity": "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "estree-util-to-js": "^2.0.0",
+ "unified": "^11.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/regex": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/regex/-/regex-5.1.1.tgz",
+ "integrity": "sha512-dN5I359AVGPnwzJm2jN1k0W9LPZ+ePvoOeVMMfqIMFz53sSwXkxaJoxr50ptnsC771lK95BnTrVSZxq0b9yCGw==",
+ "dependencies": {
+ "regex-utilities": "^2.3.0"
+ }
+ },
+ "node_modules/regex-recursion": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-5.1.1.tgz",
+ "integrity": "sha512-ae7SBCbzVNrIjgSbh7wMznPcQel1DNlDtzensnFxpiNpXt1U2ju/bHugH422r+4LAVS1FpW1YCwilmnNsjum9w==",
+ "dependencies": {
+ "regex": "^5.1.1",
+ "regex-utilities": "^2.3.0"
+ }
+ },
+ "node_modules/regex-utilities": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/regex-utilities/-/regex-utilities-2.3.0.tgz",
+ "integrity": "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng=="
+ },
+ "node_modules/rehype-katex": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.1.tgz",
+ "integrity": "sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/katex": "^0.16.0",
+ "hast-util-from-html-isomorphic": "^2.0.0",
+ "hast-util-to-text": "^4.0.0",
+ "katex": "^0.16.0",
+ "unist-util-visit-parents": "^6.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rehype-parse": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/rehype-parse/-/rehype-parse-9.0.1.tgz",
+ "integrity": "sha512-ksCzCD0Fgfh7trPDxr2rSylbwq9iYDkSn8TCDmEJ49ljEUBxDVCzCHv7QNzZOfODanX4+bWQ4WZqLCRWYLfhag==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "hast-util-from-html": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rehype-pretty-code": {
+ "version": "0.14.0",
+ "resolved": "https://registry.npmjs.org/rehype-pretty-code/-/rehype-pretty-code-0.14.0.tgz",
+ "integrity": "sha512-hBeKF/Wkkf3zyUS8lal9RCUuhypDWLQc+h9UrP9Pav25FUm/AQAVh4m5gdvJxh4Oz+U+xKvdsV01p1LdvsZTiQ==",
+ "dependencies": {
+ "@types/hast": "^3.0.4",
+ "hast-util-to-string": "^3.0.0",
+ "parse-numeric-range": "^1.3.0",
+ "rehype-parse": "^9.0.0",
+ "unified": "^11.0.5",
+ "unist-util-visit": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "shiki": "^1.3.0"
+ }
+ },
+ "node_modules/rehype-raw": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz",
+ "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "hast-util-raw": "^9.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rehype-recma": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/rehype-recma/-/rehype-recma-1.0.0.tgz",
+ "integrity": "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "hast-util-to-estree": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-frontmatter": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz",
+ "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-frontmatter": "^2.0.0",
+ "micromark-extension-frontmatter": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-gfm": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz",
+ "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-gfm": "^3.0.0",
+ "micromark-extension-gfm": "^3.0.0",
+ "remark-parse": "^11.0.0",
+ "remark-stringify": "^11.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-math": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/remark-math/-/remark-math-6.0.0.tgz",
+ "integrity": "sha512-MMqgnP74Igy+S3WwnhQ7kqGlEerTETXMvJhrUzDikVZ2/uogJCb+WHUg97hK9/jcfc0dkD73s3LN8zU49cTEtA==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-math": "^3.0.0",
+ "micromark-extension-math": "^3.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-mdx": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.1.1.tgz",
+ "integrity": "sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==",
+ "dependencies": {
+ "mdast-util-mdx": "^3.0.0",
+ "micromark-extension-mdxjs": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-parse": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz",
+ "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-reading-time": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/remark-reading-time/-/remark-reading-time-2.0.2.tgz",
+ "integrity": "sha512-ILjIuR0dQQ8pELPgaFvz7ralcSN62rD/L1pTUJgWb4gfua3ZwYEI8mnKGxEQCbrXSUF/OvycTkcUbifGOtOn5A==",
+ "dependencies": {
+ "estree-util-is-identifier-name": "^2.0.0",
+ "estree-util-value-to-estree": "^3.3.3",
+ "reading-time": "^1.3.0",
+ "unist-util-visit": "^3.1.0"
+ }
+ },
+ "node_modules/remark-reading-time/node_modules/@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="
+ },
+ "node_modules/remark-reading-time/node_modules/estree-util-is-identifier-name": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-2.1.0.tgz",
+ "integrity": "sha512-bEN9VHRyXAUOjkKVQVvArFym08BTWB0aJPppZZr0UNyAqWsLaVfAqP7hbaTJjzHifmB5ebnR8Wm7r7yGN/HonQ==",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-reading-time/node_modules/unist-util-is": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
+ "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
+ "dependencies": {
+ "@types/unist": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-reading-time/node_modules/unist-util-visit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-3.1.0.tgz",
+ "integrity": "sha512-Szoh+R/Ll68QWAyQyZZpQzZQm2UPbxibDvaY8Xc9SUtYgPsDzx5AWSk++UUt2hJuow8mvwR+rG+LQLw+KsuAKA==",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-reading-time/node_modules/unist-util-visit-parents": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-4.1.1.tgz",
+ "integrity": "sha512-1xAFJXAKpnnJl8G7K5KgU7FY55y3GcLIXqkzUj5QF/QVP7biUm0K0O2oqVkYsdjzJKifYeWn9+o6piAK2hGSHw==",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype": {
+ "version": "11.1.2",
+ "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz",
+ "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "unified": "^11.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-smartypants": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/remark-smartypants/-/remark-smartypants-3.0.2.tgz",
+ "integrity": "sha512-ILTWeOriIluwEvPjv67v7Blgrcx+LZOkAUVtKI3putuhlZm84FnqDORNXPPm+HY3NdZOMhyDwZ1E+eZB/Df5dA==",
+ "dependencies": {
+ "retext": "^9.0.0",
+ "retext-smartypants": "^6.0.0",
+ "unified": "^11.0.4",
+ "unist-util-visit": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=16.0.0"
+ }
+ },
+ "node_modules/remark-stringify": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz",
+ "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/retext": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/retext/-/retext-9.0.0.tgz",
+ "integrity": "sha512-sbMDcpHCNjvlheSgMfEcVrZko3cDzdbe1x/e7G66dFp0Ff7Mldvi2uv6JkJQzdRcvLYE8CA8Oe8siQx8ZOgTcA==",
+ "dependencies": {
+ "@types/nlcst": "^2.0.0",
+ "retext-latin": "^4.0.0",
+ "retext-stringify": "^4.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/retext-latin": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/retext-latin/-/retext-latin-4.0.0.tgz",
+ "integrity": "sha512-hv9woG7Fy0M9IlRQloq/N6atV82NxLGveq+3H2WOi79dtIYWN8OaxogDm77f8YnVXJL2VD3bbqowu5E3EMhBYA==",
+ "dependencies": {
+ "@types/nlcst": "^2.0.0",
+ "parse-latin": "^7.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/retext-smartypants": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/retext-smartypants/-/retext-smartypants-6.2.0.tgz",
+ "integrity": "sha512-kk0jOU7+zGv//kfjXEBjdIryL1Acl4i9XNkHxtM7Tm5lFiCog576fjNC9hjoR7LTKQ0DsPWy09JummSsH1uqfQ==",
+ "dependencies": {
+ "@types/nlcst": "^2.0.0",
+ "nlcst-to-string": "^4.0.0",
+ "unist-util-visit": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/retext-stringify": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/retext-stringify/-/retext-stringify-4.0.0.tgz",
+ "integrity": "sha512-rtfN/0o8kL1e+78+uxPTqu1Klt0yPzKuQ2BfWwwfgIUSayyzxpM1PJzkKt4V8803uB9qSy32MvI7Xep9khTpiA==",
+ "dependencies": {
+ "@types/nlcst": "^2.0.0",
+ "nlcst-to-string": "^4.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/reusify": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
+ "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==",
+ "engines": {
+ "iojs": ">=1.0.0",
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/robust-predicates": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz",
+ "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg=="
+ },
+ "node_modules/roughjs": {
+ "version": "4.6.6",
+ "resolved": "https://registry.npmjs.org/roughjs/-/roughjs-4.6.6.tgz",
+ "integrity": "sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==",
+ "dependencies": {
+ "hachure-fill": "^0.5.2",
+ "path-data-parser": "^0.1.0",
+ "points-on-curve": "^0.2.0",
+ "points-on-path": "^0.2.1"
+ }
+ },
+ "node_modules/run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "dependencies": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "node_modules/rw": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz",
+ "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ=="
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+ },
+ "node_modules/scheduler": {
+ "version": "0.27.0",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",
+ "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="
+ },
+ "node_modules/scroll-into-view-if-needed": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/scroll-into-view-if-needed/-/scroll-into-view-if-needed-3.1.0.tgz",
+ "integrity": "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==",
+ "dependencies": {
+ "compute-scroll-into-view": "^3.0.2"
+ }
+ },
+ "node_modules/semver": {
+ "version": "7.7.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz",
+ "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==",
+ "optional": true,
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/sharp": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.5.tgz",
+ "integrity": "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==",
+ "hasInstallScript": true,
+ "optional": true,
+ "dependencies": {
+ "@img/colour": "^1.0.0",
+ "detect-libc": "^2.1.2",
+ "semver": "^7.7.3"
+ },
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-darwin-arm64": "0.34.5",
+ "@img/sharp-darwin-x64": "0.34.5",
+ "@img/sharp-libvips-darwin-arm64": "1.2.4",
+ "@img/sharp-libvips-darwin-x64": "1.2.4",
+ "@img/sharp-libvips-linux-arm": "1.2.4",
+ "@img/sharp-libvips-linux-arm64": "1.2.4",
+ "@img/sharp-libvips-linux-ppc64": "1.2.4",
+ "@img/sharp-libvips-linux-riscv64": "1.2.4",
+ "@img/sharp-libvips-linux-s390x": "1.2.4",
+ "@img/sharp-libvips-linux-x64": "1.2.4",
+ "@img/sharp-libvips-linuxmusl-arm64": "1.2.4",
+ "@img/sharp-libvips-linuxmusl-x64": "1.2.4",
+ "@img/sharp-linux-arm": "0.34.5",
+ "@img/sharp-linux-arm64": "0.34.5",
+ "@img/sharp-linux-ppc64": "0.34.5",
+ "@img/sharp-linux-riscv64": "0.34.5",
+ "@img/sharp-linux-s390x": "0.34.5",
+ "@img/sharp-linux-x64": "0.34.5",
+ "@img/sharp-linuxmusl-arm64": "0.34.5",
+ "@img/sharp-linuxmusl-x64": "0.34.5",
+ "@img/sharp-wasm32": "0.34.5",
+ "@img/sharp-win32-arm64": "0.34.5",
+ "@img/sharp-win32-ia32": "0.34.5",
+ "@img/sharp-win32-x64": "0.34.5"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shiki": {
+ "version": "1.29.2",
+ "resolved": "https://registry.npmjs.org/shiki/-/shiki-1.29.2.tgz",
+ "integrity": "sha512-njXuliz/cP+67jU2hukkxCNuH1yUi4QfdZZY+sMr5PPrIyXSu5iTb/qYC4BiWWB0vZ+7TbdvYUCeL23zpwCfbg==",
+ "dependencies": {
+ "@shikijs/core": "1.29.2",
+ "@shikijs/engine-javascript": "1.29.2",
+ "@shikijs/engine-oniguruma": "1.29.2",
+ "@shikijs/langs": "1.29.2",
+ "@shikijs/themes": "1.29.2",
+ "@shikijs/types": "1.29.2",
+ "@shikijs/vscode-textmate": "^10.0.1",
+ "@types/hast": "^3.0.4"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
+ "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/slash": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz",
+ "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==",
+ "engines": {
+ "node": ">=14.16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/source-map": {
+ "version": "0.7.6",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.6.tgz",
+ "integrity": "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==",
+ "engines": {
+ "node": ">= 12"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/space-separated-tokens": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz",
+ "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/speech-rule-engine": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/speech-rule-engine/-/speech-rule-engine-4.1.2.tgz",
+ "integrity": "sha512-S6ji+flMEga+1QU79NDbwZ8Ivf0S/MpupQQiIC0rTpU/ZTKgcajijJJb1OcByBQDjrXCN1/DJtGz4ZJeBMPGJw==",
+ "dependencies": {
+ "@xmldom/xmldom": "0.9.8",
+ "commander": "13.1.0",
+ "wicked-good-xpath": "1.3.0"
+ },
+ "bin": {
+ "sre": "bin/sre"
+ }
+ },
+ "node_modules/speech-rule-engine/node_modules/commander": {
+ "version": "13.1.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-13.1.0.tgz",
+ "integrity": "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/stringify-entities": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
+ "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==",
+ "dependencies": {
+ "character-entities-html4": "^2.0.0",
+ "character-entities-legacy": "^3.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/strip-final-newline": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
+ "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/style-to-js": {
+ "version": "1.1.21",
+ "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz",
+ "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==",
+ "dependencies": {
+ "style-to-object": "1.0.14"
+ }
+ },
+ "node_modules/style-to-object": {
+ "version": "1.0.14",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz",
+ "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==",
+ "dependencies": {
+ "inline-style-parser": "0.2.7"
+ }
+ },
+ "node_modules/styled-jsx": {
+ "version": "5.1.6",
+ "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz",
+ "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==",
+ "dependencies": {
+ "client-only": "0.0.1"
+ },
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "peerDependencies": {
+ "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0"
+ },
+ "peerDependenciesMeta": {
+ "@babel/core": {
+ "optional": true
+ },
+ "babel-plugin-macros": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/stylis": {
+ "version": "4.3.6",
+ "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz",
+ "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ=="
+ },
+ "node_modules/system-architecture": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/system-architecture/-/system-architecture-0.1.0.tgz",
+ "integrity": "sha512-ulAk51I9UVUyJgxlv9M6lFot2WP3e7t8Kz9+IS6D4rVba1tR9kON+Ey69f+1R4Q8cd45Lod6a4IcJIxnzGc/zA==",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/tabbable": {
+ "version": "6.4.0",
+ "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.4.0.tgz",
+ "integrity": "sha512-05PUHKSNE8ou2dwIxTngl4EzcnsCDZGJ/iCLtDflR/SHB/ny14rXc+qU5P4mG9JkusiV7EivzY9Mhm55AzAvCg=="
+ },
+ "node_modules/tailwindcss": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.0.tgz",
+ "integrity": "sha512-yYzTZ4++b7fNYxFfpnberEEKu43w44aqDMNM9MHMmcKuCH7lL8jJ4yJ7LGHv7rSwiqM0nkiobF9I6cLlpS2P7Q=="
+ },
+ "node_modules/tapable": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz",
+ "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==",
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/tinyexec": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz",
+ "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/title": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/title/-/title-4.0.1.tgz",
+ "integrity": "sha512-xRnPkJx9nvE5MF6LkB5e8QJjE2FW8269wTu/LQdf7zZqBgPly0QJPf/CWAo7srj5so4yXfoLEdCFgurlpi47zg==",
+ "dependencies": {
+ "arg": "^5.0.0",
+ "chalk": "^5.0.0",
+ "clipboardy": "^4.0.0"
+ },
+ "bin": {
+ "title": "dist/esm/bin.js"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/trim-lines": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz",
+ "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/trough": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz",
+ "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/ts-dedent": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz",
+ "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==",
+ "engines": {
+ "node": ">=6.10"
+ }
+ },
+ "node_modules/tslib": {
+ "version": "2.8.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="
+ },
+ "node_modules/twoslash": {
+ "version": "0.2.12",
+ "resolved": "https://registry.npmjs.org/twoslash/-/twoslash-0.2.12.tgz",
+ "integrity": "sha512-tEHPASMqi7kqwfJbkk7hc/4EhlrKCSLcur+TcvYki3vhIfaRMXnXjaYFgXpoZRbT6GdprD4tGuVBEmTpUgLBsw==",
+ "dependencies": {
+ "@typescript/vfs": "^1.6.0",
+ "twoslash-protocol": "0.2.12"
+ },
+ "peerDependencies": {
+ "typescript": "*"
+ }
+ },
+ "node_modules/twoslash-protocol": {
+ "version": "0.2.12",
+ "resolved": "https://registry.npmjs.org/twoslash-protocol/-/twoslash-protocol-0.2.12.tgz",
+ "integrity": "sha512-5qZLXVYfZ9ABdjqbvPc4RWMr7PrpPaaDSeaYY55vl/w1j6H6kzsWK/urAEIXlzYlyrFmyz1UbwIt+AA0ck+wbg=="
+ },
+ "node_modules/typescript": {
+ "version": "5.9.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
+ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/ufo": {
+ "version": "1.6.3",
+ "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.3.tgz",
+ "integrity": "sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q=="
+ },
+ "node_modules/undici-types": {
+ "version": "7.18.2",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz",
+ "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==",
+ "dev": true
+ },
+ "node_modules/unified": {
+ "version": "11.0.5",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz",
+ "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "bail": "^2.0.0",
+ "devlop": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-plain-obj": "^4.0.0",
+ "trough": "^2.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-find-after": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz",
+ "integrity": "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-is": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz",
+ "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-modify-children": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-modify-children/-/unist-util-modify-children-4.0.0.tgz",
+ "integrity": "sha512-+tdN5fGNddvsQdIzUF3Xx82CU9sMM+fA0dLgR9vOmT0oPT2jH+P1nd5lSqfCfXAw+93NhcXNY2qqvTUtE4cQkw==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "array-iterate": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz",
+ "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position-from-estree": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz",
+ "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-remove": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-4.0.0.tgz",
+ "integrity": "sha512-b4gokeGId57UVRX/eVKej5gXqGlc9+trkORhFJpu9raqZkZhU0zm8Doi05+HaiBsMEIJowL+2WtQ5ItjsngPXg==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-remove-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz",
+ "integrity": "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-visit": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-stringify-position": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz",
+ "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz",
+ "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-children": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-children/-/unist-util-visit-children-3.0.0.tgz",
+ "integrity": "sha512-RgmdTfSBOg04sdPcpTSD1jzoNBjt9a80/ZCzp5cI9n1qPzLZWF9YdvWGN2zmTumP1HWhXKdUWexjy/Wy/lJ7tA==",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz",
+ "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz",
+ "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/use-sync-external-store": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz",
+ "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/uuid": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz",
+ "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==",
+ "funding": [
+ "https://github.com/sponsors/broofa",
+ "https://github.com/sponsors/ctavan"
+ ],
+ "bin": {
+ "uuid": "dist/esm/bin/uuid"
+ }
+ },
+ "node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-location": {
+ "version": "5.0.3",
+ "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz",
+ "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-message": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz",
+ "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vscode-jsonrpc": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz",
+ "integrity": "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==",
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/vscode-languageserver": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/vscode-languageserver/-/vscode-languageserver-9.0.1.tgz",
+ "integrity": "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==",
+ "dependencies": {
+ "vscode-languageserver-protocol": "3.17.5"
+ },
+ "bin": {
+ "installServerIntoExtension": "bin/installServerIntoExtension"
+ }
+ },
+ "node_modules/vscode-languageserver-protocol": {
+ "version": "3.17.5",
+ "resolved": "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz",
+ "integrity": "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==",
+ "dependencies": {
+ "vscode-jsonrpc": "8.2.0",
+ "vscode-languageserver-types": "3.17.5"
+ }
+ },
+ "node_modules/vscode-languageserver-textdocument": {
+ "version": "1.0.12",
+ "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.12.tgz",
+ "integrity": "sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA=="
+ },
+ "node_modules/vscode-languageserver-types": {
+ "version": "3.17.5",
+ "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz",
+ "integrity": "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg=="
+ },
+ "node_modules/vscode-uri": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz",
+ "integrity": "sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ=="
+ },
+ "node_modules/web-namespaces": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz",
+ "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/wicked-good-xpath": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/wicked-good-xpath/-/wicked-good-xpath-1.3.0.tgz",
+ "integrity": "sha512-Gd9+TUn5nXdwj/hFsPVx5cuHHiF5Bwuc30jZ4+ronF1qHK5O7HD0sgmXWSEgwKquT3ClLoKPVbO6qGwVwLzvAw=="
+ },
+ "node_modules/yaml": {
+ "version": "2.8.2",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz",
+ "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==",
+ "bin": {
+ "yaml": "bin.mjs"
+ },
+ "engines": {
+ "node": ">= 14.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/eemeli"
+ }
+ },
+ "node_modules/zod": {
+ "version": "3.25.76",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
+ "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ },
+ "node_modules/zod-validation-error": {
+ "version": "3.5.4",
+ "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-3.5.4.tgz",
+ "integrity": "sha512-+hEiRIiPobgyuFlEojnqjJnhFvg4r/i3cqgcm67eehZf/WBaK3g6cD02YU9mtdVxZjv8CzCA9n/Rhrs3yAAvAw==",
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "peerDependencies": {
+ "zod": "^3.24.4"
+ }
+ },
+ "node_modules/zustand": {
+ "version": "5.0.11",
+ "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.11.tgz",
+ "integrity": "sha512-fdZY+dk7zn/vbWNCYmzZULHRrss0jx5pPFiOuMZ/5HJN6Yv3u+1Wswy/4MpZEkEGhtNH+pwxZB8OKgUBPzYAGg==",
+ "engines": {
+ "node": ">=12.20.0"
+ },
+ "peerDependencies": {
+ "@types/react": ">=18.0.0",
+ "immer": ">=9.0.6",
+ "react": ">=18.0.0",
+ "use-sync-external-store": ">=1.2.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "immer": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ },
+ "use-sync-external-store": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/zwitch": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
+ "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ }
+ }
+}
diff --git a/docs/package.json b/docs/package.json
new file mode 100644
index 0000000..2025d02
--- /dev/null
+++ b/docs/package.json
@@ -0,0 +1,34 @@
+{
+ "name": "altstack-docs",
+ "version": "1.0.0",
+ "private": true,
+ "description": "The AltStack Documentation Hub",
+ "scripts": {
+ "dev": "next dev -p 3001",
+ "build": "next build",
+ "postbuild": "pagefind --site .next/server/app --output-path public/_pagefind",
+ "start": "next start -p 3001"
+ },
+ "dependencies": {
+ "@next/mdx": "^15.1.0",
+ "@tailwindcss/postcss": "^4.2.0",
+ "@types/mdx": "^2.0.13",
+ "framer-motion": "^12.34.2",
+ "lucide-react": "^0.574.0",
+ "next": "^15.1.0",
+ "next-themes": "^0.4.6",
+ "nextra": "^4.0.0",
+ "nextra-theme-docs": "^4.0.0",
+ "react": "^19.0.0",
+ "react-dom": "^19.0.0"
+ },
+ "devDependencies": {
+ "@types/node": "^25.3.0",
+ "@types/react": "^19.0.0",
+ "autoprefixer": "^10.4.24",
+ "pagefind": "^1.4.0",
+ "postcss": "^8.5.6",
+ "tailwindcss": "^4.2.0",
+ "typescript": "^5.7.0"
+ }
+}
\ No newline at end of file
diff --git a/docs/postcss.config.mjs b/docs/postcss.config.mjs
new file mode 100644
index 0000000..0bf7a0d
--- /dev/null
+++ b/docs/postcss.config.mjs
@@ -0,0 +1,7 @@
+export default {
+ plugins: {
+ '@tailwindcss/postcss': {
+ config: './tailwind.config.js',
+ },
+ },
+}
diff --git a/docs/public/_pagefind/fragment/en_09c8b74.pf_fragment b/docs/public/_pagefind/fragment/en_09c8b74.pf_fragment
new file mode 100644
index 0000000..7780ace
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_09c8b74.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_119336d.pf_fragment b/docs/public/_pagefind/fragment/en_119336d.pf_fragment
new file mode 100644
index 0000000..0cfa633
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_119336d.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_1390751.pf_fragment b/docs/public/_pagefind/fragment/en_1390751.pf_fragment
new file mode 100644
index 0000000..a308676
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_1390751.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_192c427.pf_fragment b/docs/public/_pagefind/fragment/en_192c427.pf_fragment
new file mode 100644
index 0000000..3e2af72
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_192c427.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_196ce1d.pf_fragment b/docs/public/_pagefind/fragment/en_196ce1d.pf_fragment
new file mode 100644
index 0000000..37533c6
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_196ce1d.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_1a6ad7b.pf_fragment b/docs/public/_pagefind/fragment/en_1a6ad7b.pf_fragment
new file mode 100644
index 0000000..03f2533
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_1a6ad7b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_1b49a31.pf_fragment b/docs/public/_pagefind/fragment/en_1b49a31.pf_fragment
new file mode 100644
index 0000000..d85ba23
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_1b49a31.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_1b4ddf4.pf_fragment b/docs/public/_pagefind/fragment/en_1b4ddf4.pf_fragment
new file mode 100644
index 0000000..3ac7c32
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_1b4ddf4.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_1e5bd9c.pf_fragment b/docs/public/_pagefind/fragment/en_1e5bd9c.pf_fragment
new file mode 100644
index 0000000..5e6c488
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_1e5bd9c.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_1ff0976.pf_fragment b/docs/public/_pagefind/fragment/en_1ff0976.pf_fragment
new file mode 100644
index 0000000..fb88745
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_1ff0976.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_2043924.pf_fragment b/docs/public/_pagefind/fragment/en_2043924.pf_fragment
new file mode 100644
index 0000000..7c4b0eb
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_2043924.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_22befb1.pf_fragment b/docs/public/_pagefind/fragment/en_22befb1.pf_fragment
new file mode 100644
index 0000000..a4ddc6a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_22befb1.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_268722a.pf_fragment b/docs/public/_pagefind/fragment/en_268722a.pf_fragment
new file mode 100644
index 0000000..327c0f0
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_268722a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_2864a02.pf_fragment b/docs/public/_pagefind/fragment/en_2864a02.pf_fragment
new file mode 100644
index 0000000..ef97937
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_2864a02.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_2b36d45.pf_fragment b/docs/public/_pagefind/fragment/en_2b36d45.pf_fragment
new file mode 100644
index 0000000..5396c36
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_2b36d45.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_2d7fd13.pf_fragment b/docs/public/_pagefind/fragment/en_2d7fd13.pf_fragment
new file mode 100644
index 0000000..6c72d72
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_2d7fd13.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_2fc9d98.pf_fragment b/docs/public/_pagefind/fragment/en_2fc9d98.pf_fragment
new file mode 100644
index 0000000..98b9d75
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_2fc9d98.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_30c8b08.pf_fragment b/docs/public/_pagefind/fragment/en_30c8b08.pf_fragment
new file mode 100644
index 0000000..f2a57c4
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_30c8b08.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_3382e9b.pf_fragment b/docs/public/_pagefind/fragment/en_3382e9b.pf_fragment
new file mode 100644
index 0000000..97d4e32
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_3382e9b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_34bb624.pf_fragment b/docs/public/_pagefind/fragment/en_34bb624.pf_fragment
new file mode 100644
index 0000000..6651239
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_34bb624.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_35a38de.pf_fragment b/docs/public/_pagefind/fragment/en_35a38de.pf_fragment
new file mode 100644
index 0000000..fba6c87
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_35a38de.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_35c269c.pf_fragment b/docs/public/_pagefind/fragment/en_35c269c.pf_fragment
new file mode 100644
index 0000000..d77731d
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_35c269c.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_35e4e42.pf_fragment b/docs/public/_pagefind/fragment/en_35e4e42.pf_fragment
new file mode 100644
index 0000000..906bef4
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_35e4e42.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_3669a42.pf_fragment b/docs/public/_pagefind/fragment/en_3669a42.pf_fragment
new file mode 100644
index 0000000..3302956
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_3669a42.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_3878152.pf_fragment b/docs/public/_pagefind/fragment/en_3878152.pf_fragment
new file mode 100644
index 0000000..c2ea398
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_3878152.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_3983221.pf_fragment b/docs/public/_pagefind/fragment/en_3983221.pf_fragment
new file mode 100644
index 0000000..1693235
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_3983221.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_3b74b16.pf_fragment b/docs/public/_pagefind/fragment/en_3b74b16.pf_fragment
new file mode 100644
index 0000000..6fc86a2
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_3b74b16.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_3c28854.pf_fragment b/docs/public/_pagefind/fragment/en_3c28854.pf_fragment
new file mode 100644
index 0000000..bc66c13
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_3c28854.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_3cd24ec.pf_fragment b/docs/public/_pagefind/fragment/en_3cd24ec.pf_fragment
new file mode 100644
index 0000000..aace488
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_3cd24ec.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_3ec63fc.pf_fragment b/docs/public/_pagefind/fragment/en_3ec63fc.pf_fragment
new file mode 100644
index 0000000..84bac51
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_3ec63fc.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_40197ac.pf_fragment b/docs/public/_pagefind/fragment/en_40197ac.pf_fragment
new file mode 100644
index 0000000..01751f4
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_40197ac.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_40b1fa1.pf_fragment b/docs/public/_pagefind/fragment/en_40b1fa1.pf_fragment
new file mode 100644
index 0000000..99fd516
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_40b1fa1.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_41345fb.pf_fragment b/docs/public/_pagefind/fragment/en_41345fb.pf_fragment
new file mode 100644
index 0000000..523faf3
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_41345fb.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_44e54f5.pf_fragment b/docs/public/_pagefind/fragment/en_44e54f5.pf_fragment
new file mode 100644
index 0000000..0ee68dd
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_44e54f5.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_46964b8.pf_fragment b/docs/public/_pagefind/fragment/en_46964b8.pf_fragment
new file mode 100644
index 0000000..a42fa9d
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_46964b8.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_475a571.pf_fragment b/docs/public/_pagefind/fragment/en_475a571.pf_fragment
new file mode 100644
index 0000000..fde0e58
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_475a571.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_4b6e181.pf_fragment b/docs/public/_pagefind/fragment/en_4b6e181.pf_fragment
new file mode 100644
index 0000000..53f66e2
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_4b6e181.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_511e705.pf_fragment b/docs/public/_pagefind/fragment/en_511e705.pf_fragment
new file mode 100644
index 0000000..103938f
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_511e705.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_545574f.pf_fragment b/docs/public/_pagefind/fragment/en_545574f.pf_fragment
new file mode 100644
index 0000000..a958518
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_545574f.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_560e091.pf_fragment b/docs/public/_pagefind/fragment/en_560e091.pf_fragment
new file mode 100644
index 0000000..fe7bc4a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_560e091.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_5748875.pf_fragment b/docs/public/_pagefind/fragment/en_5748875.pf_fragment
new file mode 100644
index 0000000..d8ff494
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_5748875.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_58bf853.pf_fragment b/docs/public/_pagefind/fragment/en_58bf853.pf_fragment
new file mode 100644
index 0000000..6ffc7ed
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_58bf853.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_5b203ae.pf_fragment b/docs/public/_pagefind/fragment/en_5b203ae.pf_fragment
new file mode 100644
index 0000000..6ebd77c
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_5b203ae.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_5b35e74.pf_fragment b/docs/public/_pagefind/fragment/en_5b35e74.pf_fragment
new file mode 100644
index 0000000..a1607ff
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_5b35e74.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_60ae1b3.pf_fragment b/docs/public/_pagefind/fragment/en_60ae1b3.pf_fragment
new file mode 100644
index 0000000..2855502
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_60ae1b3.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_616aeec.pf_fragment b/docs/public/_pagefind/fragment/en_616aeec.pf_fragment
new file mode 100644
index 0000000..421c70e
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_616aeec.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_62335a5.pf_fragment b/docs/public/_pagefind/fragment/en_62335a5.pf_fragment
new file mode 100644
index 0000000..a0af67f
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_62335a5.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_629b7f7.pf_fragment b/docs/public/_pagefind/fragment/en_629b7f7.pf_fragment
new file mode 100644
index 0000000..e5e4943
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_629b7f7.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_632dc9b.pf_fragment b/docs/public/_pagefind/fragment/en_632dc9b.pf_fragment
new file mode 100644
index 0000000..ad16b2e
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_632dc9b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_65f019c.pf_fragment b/docs/public/_pagefind/fragment/en_65f019c.pf_fragment
new file mode 100644
index 0000000..a8b19ee
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_65f019c.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_667bebc.pf_fragment b/docs/public/_pagefind/fragment/en_667bebc.pf_fragment
new file mode 100644
index 0000000..2dbc50f
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_667bebc.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_66dd891.pf_fragment b/docs/public/_pagefind/fragment/en_66dd891.pf_fragment
new file mode 100644
index 0000000..fddd27a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_66dd891.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_6790988.pf_fragment b/docs/public/_pagefind/fragment/en_6790988.pf_fragment
new file mode 100644
index 0000000..fadfcc1
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_6790988.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_6872134.pf_fragment b/docs/public/_pagefind/fragment/en_6872134.pf_fragment
new file mode 100644
index 0000000..a3a6ae2
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_6872134.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_6b1da0a.pf_fragment b/docs/public/_pagefind/fragment/en_6b1da0a.pf_fragment
new file mode 100644
index 0000000..21db965
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_6b1da0a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_6d379d5.pf_fragment b/docs/public/_pagefind/fragment/en_6d379d5.pf_fragment
new file mode 100644
index 0000000..7e3a2ca
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_6d379d5.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_6f46e0c.pf_fragment b/docs/public/_pagefind/fragment/en_6f46e0c.pf_fragment
new file mode 100644
index 0000000..14dafe7
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_6f46e0c.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_6fab986.pf_fragment b/docs/public/_pagefind/fragment/en_6fab986.pf_fragment
new file mode 100644
index 0000000..cd20aee
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_6fab986.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_6fe2f29.pf_fragment b/docs/public/_pagefind/fragment/en_6fe2f29.pf_fragment
new file mode 100644
index 0000000..7894fc3
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_6fe2f29.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_70cacd1.pf_fragment b/docs/public/_pagefind/fragment/en_70cacd1.pf_fragment
new file mode 100644
index 0000000..7506dd1
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_70cacd1.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_71b8658.pf_fragment b/docs/public/_pagefind/fragment/en_71b8658.pf_fragment
new file mode 100644
index 0000000..499c0d6
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_71b8658.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_72bf449.pf_fragment b/docs/public/_pagefind/fragment/en_72bf449.pf_fragment
new file mode 100644
index 0000000..18f83ed
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_72bf449.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_733de33.pf_fragment b/docs/public/_pagefind/fragment/en_733de33.pf_fragment
new file mode 100644
index 0000000..0ff685c
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_733de33.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_745e9b8.pf_fragment b/docs/public/_pagefind/fragment/en_745e9b8.pf_fragment
new file mode 100644
index 0000000..8914de7
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_745e9b8.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_749b696.pf_fragment b/docs/public/_pagefind/fragment/en_749b696.pf_fragment
new file mode 100644
index 0000000..eb9b7bb
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_749b696.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_74a1318.pf_fragment b/docs/public/_pagefind/fragment/en_74a1318.pf_fragment
new file mode 100644
index 0000000..306c5a3
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_74a1318.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_771d4c7.pf_fragment b/docs/public/_pagefind/fragment/en_771d4c7.pf_fragment
new file mode 100644
index 0000000..b417420
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_771d4c7.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_778956a.pf_fragment b/docs/public/_pagefind/fragment/en_778956a.pf_fragment
new file mode 100644
index 0000000..59e103d
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_778956a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_77c672b.pf_fragment b/docs/public/_pagefind/fragment/en_77c672b.pf_fragment
new file mode 100644
index 0000000..fdc9d26
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_77c672b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_7819412.pf_fragment b/docs/public/_pagefind/fragment/en_7819412.pf_fragment
new file mode 100644
index 0000000..1558e6a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_7819412.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_7972422.pf_fragment b/docs/public/_pagefind/fragment/en_7972422.pf_fragment
new file mode 100644
index 0000000..f490d31
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_7972422.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_7a12877.pf_fragment b/docs/public/_pagefind/fragment/en_7a12877.pf_fragment
new file mode 100644
index 0000000..70278e9
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_7a12877.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_7af9e4c.pf_fragment b/docs/public/_pagefind/fragment/en_7af9e4c.pf_fragment
new file mode 100644
index 0000000..a958ed0
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_7af9e4c.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_7dc8cc5.pf_fragment b/docs/public/_pagefind/fragment/en_7dc8cc5.pf_fragment
new file mode 100644
index 0000000..f491404
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_7dc8cc5.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_7dd3b2b.pf_fragment b/docs/public/_pagefind/fragment/en_7dd3b2b.pf_fragment
new file mode 100644
index 0000000..44d63f4
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_7dd3b2b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_7e36b33.pf_fragment b/docs/public/_pagefind/fragment/en_7e36b33.pf_fragment
new file mode 100644
index 0000000..6db6332
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_7e36b33.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_82ceb50.pf_fragment b/docs/public/_pagefind/fragment/en_82ceb50.pf_fragment
new file mode 100644
index 0000000..45335d9
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_82ceb50.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_83b0b8b.pf_fragment b/docs/public/_pagefind/fragment/en_83b0b8b.pf_fragment
new file mode 100644
index 0000000..2de6980
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_83b0b8b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_856d958.pf_fragment b/docs/public/_pagefind/fragment/en_856d958.pf_fragment
new file mode 100644
index 0000000..c094e71
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_856d958.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_86e7fbd.pf_fragment b/docs/public/_pagefind/fragment/en_86e7fbd.pf_fragment
new file mode 100644
index 0000000..789ba42
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_86e7fbd.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_8908cad.pf_fragment b/docs/public/_pagefind/fragment/en_8908cad.pf_fragment
new file mode 100644
index 0000000..e31a02c
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_8908cad.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_898cd6a.pf_fragment b/docs/public/_pagefind/fragment/en_898cd6a.pf_fragment
new file mode 100644
index 0000000..f135160
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_898cd6a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_89f14e3.pf_fragment b/docs/public/_pagefind/fragment/en_89f14e3.pf_fragment
new file mode 100644
index 0000000..38d76a3
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_89f14e3.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_8b62c1c.pf_fragment b/docs/public/_pagefind/fragment/en_8b62c1c.pf_fragment
new file mode 100644
index 0000000..27c2bcd
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_8b62c1c.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_8eacea5.pf_fragment b/docs/public/_pagefind/fragment/en_8eacea5.pf_fragment
new file mode 100644
index 0000000..97dd1f4
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_8eacea5.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_8ede16b.pf_fragment b/docs/public/_pagefind/fragment/en_8ede16b.pf_fragment
new file mode 100644
index 0000000..3bd6e20
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_8ede16b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_8fe8472.pf_fragment b/docs/public/_pagefind/fragment/en_8fe8472.pf_fragment
new file mode 100644
index 0000000..72feabb
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_8fe8472.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_919077f.pf_fragment b/docs/public/_pagefind/fragment/en_919077f.pf_fragment
new file mode 100644
index 0000000..d0ab95a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_919077f.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_9392c5e.pf_fragment b/docs/public/_pagefind/fragment/en_9392c5e.pf_fragment
new file mode 100644
index 0000000..4d1feb0
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_9392c5e.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_93c95ee.pf_fragment b/docs/public/_pagefind/fragment/en_93c95ee.pf_fragment
new file mode 100644
index 0000000..d25cf99
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_93c95ee.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_96f1ecf.pf_fragment b/docs/public/_pagefind/fragment/en_96f1ecf.pf_fragment
new file mode 100644
index 0000000..f6bb6af
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_96f1ecf.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_98fee77.pf_fragment b/docs/public/_pagefind/fragment/en_98fee77.pf_fragment
new file mode 100644
index 0000000..a19f2e9
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_98fee77.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_9ba9e4b.pf_fragment b/docs/public/_pagefind/fragment/en_9ba9e4b.pf_fragment
new file mode 100644
index 0000000..0d06e4d
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_9ba9e4b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_9c4b3c3.pf_fragment b/docs/public/_pagefind/fragment/en_9c4b3c3.pf_fragment
new file mode 100644
index 0000000..bdd6dce
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_9c4b3c3.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_9eaf242.pf_fragment b/docs/public/_pagefind/fragment/en_9eaf242.pf_fragment
new file mode 100644
index 0000000..9d249e1
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_9eaf242.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_a19dea4.pf_fragment b/docs/public/_pagefind/fragment/en_a19dea4.pf_fragment
new file mode 100644
index 0000000..2d2358e
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_a19dea4.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_a83ee6e.pf_fragment b/docs/public/_pagefind/fragment/en_a83ee6e.pf_fragment
new file mode 100644
index 0000000..b08c078
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_a83ee6e.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_ab108b3.pf_fragment b/docs/public/_pagefind/fragment/en_ab108b3.pf_fragment
new file mode 100644
index 0000000..270c3e0
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_ab108b3.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_ab45a7e.pf_fragment b/docs/public/_pagefind/fragment/en_ab45a7e.pf_fragment
new file mode 100644
index 0000000..31a5eb2
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_ab45a7e.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_ad55db6.pf_fragment b/docs/public/_pagefind/fragment/en_ad55db6.pf_fragment
new file mode 100644
index 0000000..a387666
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_ad55db6.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_af2345a.pf_fragment b/docs/public/_pagefind/fragment/en_af2345a.pf_fragment
new file mode 100644
index 0000000..d2082fc
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_af2345a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_b1bbd89.pf_fragment b/docs/public/_pagefind/fragment/en_b1bbd89.pf_fragment
new file mode 100644
index 0000000..2a7db0d
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_b1bbd89.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_b36c575.pf_fragment b/docs/public/_pagefind/fragment/en_b36c575.pf_fragment
new file mode 100644
index 0000000..dfcb286
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_b36c575.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_b55d523.pf_fragment b/docs/public/_pagefind/fragment/en_b55d523.pf_fragment
new file mode 100644
index 0000000..70f0171
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_b55d523.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_b6f698e.pf_fragment b/docs/public/_pagefind/fragment/en_b6f698e.pf_fragment
new file mode 100644
index 0000000..c870e4a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_b6f698e.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_b720b04.pf_fragment b/docs/public/_pagefind/fragment/en_b720b04.pf_fragment
new file mode 100644
index 0000000..72185ec
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_b720b04.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_bb91741.pf_fragment b/docs/public/_pagefind/fragment/en_bb91741.pf_fragment
new file mode 100644
index 0000000..9a2bafc
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_bb91741.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_bc9b196.pf_fragment b/docs/public/_pagefind/fragment/en_bc9b196.pf_fragment
new file mode 100644
index 0000000..9c38b15
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_bc9b196.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_bf8d819.pf_fragment b/docs/public/_pagefind/fragment/en_bf8d819.pf_fragment
new file mode 100644
index 0000000..67a311c
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_bf8d819.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c130b86.pf_fragment b/docs/public/_pagefind/fragment/en_c130b86.pf_fragment
new file mode 100644
index 0000000..80778fe
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c130b86.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c1b84c4.pf_fragment b/docs/public/_pagefind/fragment/en_c1b84c4.pf_fragment
new file mode 100644
index 0000000..b7a7c1c
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c1b84c4.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c347810.pf_fragment b/docs/public/_pagefind/fragment/en_c347810.pf_fragment
new file mode 100644
index 0000000..ed19ea6
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c347810.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c359f7a.pf_fragment b/docs/public/_pagefind/fragment/en_c359f7a.pf_fragment
new file mode 100644
index 0000000..3f88802
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c359f7a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c371838.pf_fragment b/docs/public/_pagefind/fragment/en_c371838.pf_fragment
new file mode 100644
index 0000000..7f0db00
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c371838.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c3dca22.pf_fragment b/docs/public/_pagefind/fragment/en_c3dca22.pf_fragment
new file mode 100644
index 0000000..c57a23b
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c3dca22.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c3f46cc.pf_fragment b/docs/public/_pagefind/fragment/en_c3f46cc.pf_fragment
new file mode 100644
index 0000000..527dc4a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c3f46cc.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c67af1a.pf_fragment b/docs/public/_pagefind/fragment/en_c67af1a.pf_fragment
new file mode 100644
index 0000000..7dc0832
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c67af1a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_c972dc6.pf_fragment b/docs/public/_pagefind/fragment/en_c972dc6.pf_fragment
new file mode 100644
index 0000000..bfc8553
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_c972dc6.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_ca168f7.pf_fragment b/docs/public/_pagefind/fragment/en_ca168f7.pf_fragment
new file mode 100644
index 0000000..5376240
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_ca168f7.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_cb4d19a.pf_fragment b/docs/public/_pagefind/fragment/en_cb4d19a.pf_fragment
new file mode 100644
index 0000000..52e3f72
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_cb4d19a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_cd5dff2.pf_fragment b/docs/public/_pagefind/fragment/en_cd5dff2.pf_fragment
new file mode 100644
index 0000000..f7a1168
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_cd5dff2.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_cddf89d.pf_fragment b/docs/public/_pagefind/fragment/en_cddf89d.pf_fragment
new file mode 100644
index 0000000..5fe158f
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_cddf89d.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_ce45b6d.pf_fragment b/docs/public/_pagefind/fragment/en_ce45b6d.pf_fragment
new file mode 100644
index 0000000..16a7bd7
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_ce45b6d.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_d474e86.pf_fragment b/docs/public/_pagefind/fragment/en_d474e86.pf_fragment
new file mode 100644
index 0000000..48ad3b7
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_d474e86.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_d5c1751.pf_fragment b/docs/public/_pagefind/fragment/en_d5c1751.pf_fragment
new file mode 100644
index 0000000..8e2aad4
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_d5c1751.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_d6e2679.pf_fragment b/docs/public/_pagefind/fragment/en_d6e2679.pf_fragment
new file mode 100644
index 0000000..c430b28
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_d6e2679.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_d6fc7ee.pf_fragment b/docs/public/_pagefind/fragment/en_d6fc7ee.pf_fragment
new file mode 100644
index 0000000..6c2f1eb
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_d6fc7ee.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_dbe6ed6.pf_fragment b/docs/public/_pagefind/fragment/en_dbe6ed6.pf_fragment
new file mode 100644
index 0000000..f387ad6
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_dbe6ed6.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_dcd4f0f.pf_fragment b/docs/public/_pagefind/fragment/en_dcd4f0f.pf_fragment
new file mode 100644
index 0000000..b1be009
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_dcd4f0f.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_de61827.pf_fragment b/docs/public/_pagefind/fragment/en_de61827.pf_fragment
new file mode 100644
index 0000000..579a58b
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_de61827.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_e21c227.pf_fragment b/docs/public/_pagefind/fragment/en_e21c227.pf_fragment
new file mode 100644
index 0000000..0ad84f0
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_e21c227.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_e38341b.pf_fragment b/docs/public/_pagefind/fragment/en_e38341b.pf_fragment
new file mode 100644
index 0000000..45c0947
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_e38341b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_e59da4d.pf_fragment b/docs/public/_pagefind/fragment/en_e59da4d.pf_fragment
new file mode 100644
index 0000000..80bfae2
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_e59da4d.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_e72b582.pf_fragment b/docs/public/_pagefind/fragment/en_e72b582.pf_fragment
new file mode 100644
index 0000000..666cb92
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_e72b582.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_e771239.pf_fragment b/docs/public/_pagefind/fragment/en_e771239.pf_fragment
new file mode 100644
index 0000000..e4a4e31
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_e771239.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_e995f94.pf_fragment b/docs/public/_pagefind/fragment/en_e995f94.pf_fragment
new file mode 100644
index 0000000..0325ce9
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_e995f94.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_e9cc062.pf_fragment b/docs/public/_pagefind/fragment/en_e9cc062.pf_fragment
new file mode 100644
index 0000000..086c4eb
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_e9cc062.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_ea10d6d.pf_fragment b/docs/public/_pagefind/fragment/en_ea10d6d.pf_fragment
new file mode 100644
index 0000000..53f322a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_ea10d6d.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_eafe25e.pf_fragment b/docs/public/_pagefind/fragment/en_eafe25e.pf_fragment
new file mode 100644
index 0000000..cf39374
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_eafe25e.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_ee98405.pf_fragment b/docs/public/_pagefind/fragment/en_ee98405.pf_fragment
new file mode 100644
index 0000000..4c20fd2
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_ee98405.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_ef70d61.pf_fragment b/docs/public/_pagefind/fragment/en_ef70d61.pf_fragment
new file mode 100644
index 0000000..c460cc1
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_ef70d61.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f0555ca.pf_fragment b/docs/public/_pagefind/fragment/en_f0555ca.pf_fragment
new file mode 100644
index 0000000..657d651
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f0555ca.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f17847a.pf_fragment b/docs/public/_pagefind/fragment/en_f17847a.pf_fragment
new file mode 100644
index 0000000..9709dbf
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f17847a.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f17c8f4.pf_fragment b/docs/public/_pagefind/fragment/en_f17c8f4.pf_fragment
new file mode 100644
index 0000000..6cc9dac
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f17c8f4.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f2b6388.pf_fragment b/docs/public/_pagefind/fragment/en_f2b6388.pf_fragment
new file mode 100644
index 0000000..671e356
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f2b6388.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f2d5cb2.pf_fragment b/docs/public/_pagefind/fragment/en_f2d5cb2.pf_fragment
new file mode 100644
index 0000000..b2cdf7a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f2d5cb2.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f4308a5.pf_fragment b/docs/public/_pagefind/fragment/en_f4308a5.pf_fragment
new file mode 100644
index 0000000..349367f
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f4308a5.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f62dcce.pf_fragment b/docs/public/_pagefind/fragment/en_f62dcce.pf_fragment
new file mode 100644
index 0000000..e38741a
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f62dcce.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f65140b.pf_fragment b/docs/public/_pagefind/fragment/en_f65140b.pf_fragment
new file mode 100644
index 0000000..05565c7
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f65140b.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f675344.pf_fragment b/docs/public/_pagefind/fragment/en_f675344.pf_fragment
new file mode 100644
index 0000000..ecd5afb
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f675344.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f67645f.pf_fragment b/docs/public/_pagefind/fragment/en_f67645f.pf_fragment
new file mode 100644
index 0000000..fb9e911
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f67645f.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f6cd1e5.pf_fragment b/docs/public/_pagefind/fragment/en_f6cd1e5.pf_fragment
new file mode 100644
index 0000000..b3e88f0
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f6cd1e5.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f748ec4.pf_fragment b/docs/public/_pagefind/fragment/en_f748ec4.pf_fragment
new file mode 100644
index 0000000..c02286d
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f748ec4.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_f9f0ab1.pf_fragment b/docs/public/_pagefind/fragment/en_f9f0ab1.pf_fragment
new file mode 100644
index 0000000..2b93680
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_f9f0ab1.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_fc97485.pf_fragment b/docs/public/_pagefind/fragment/en_fc97485.pf_fragment
new file mode 100644
index 0000000..5333880
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_fc97485.pf_fragment differ
diff --git a/docs/public/_pagefind/fragment/en_fede86c.pf_fragment b/docs/public/_pagefind/fragment/en_fede86c.pf_fragment
new file mode 100644
index 0000000..4ec11f3
Binary files /dev/null and b/docs/public/_pagefind/fragment/en_fede86c.pf_fragment differ
diff --git a/docs/public/_pagefind/index/en_25794b2.pf_index b/docs/public/_pagefind/index/en_25794b2.pf_index
new file mode 100644
index 0000000..122ebb7
Binary files /dev/null and b/docs/public/_pagefind/index/en_25794b2.pf_index differ
diff --git a/docs/public/_pagefind/index/en_381724d.pf_index b/docs/public/_pagefind/index/en_381724d.pf_index
new file mode 100644
index 0000000..640e71d
Binary files /dev/null and b/docs/public/_pagefind/index/en_381724d.pf_index differ
diff --git a/docs/public/_pagefind/index/en_49fe39e.pf_index b/docs/public/_pagefind/index/en_49fe39e.pf_index
new file mode 100644
index 0000000..ff35a04
Binary files /dev/null and b/docs/public/_pagefind/index/en_49fe39e.pf_index differ
diff --git a/docs/public/_pagefind/index/en_4aefb7a.pf_index b/docs/public/_pagefind/index/en_4aefb7a.pf_index
new file mode 100644
index 0000000..f3e2783
Binary files /dev/null and b/docs/public/_pagefind/index/en_4aefb7a.pf_index differ
diff --git a/docs/public/_pagefind/index/en_78c6363.pf_index b/docs/public/_pagefind/index/en_78c6363.pf_index
new file mode 100644
index 0000000..c0df5f4
Binary files /dev/null and b/docs/public/_pagefind/index/en_78c6363.pf_index differ
diff --git a/docs/public/_pagefind/index/en_791288e.pf_index b/docs/public/_pagefind/index/en_791288e.pf_index
new file mode 100644
index 0000000..c72d7ff
Binary files /dev/null and b/docs/public/_pagefind/index/en_791288e.pf_index differ
diff --git a/docs/public/_pagefind/index/en_dbe389d.pf_index b/docs/public/_pagefind/index/en_dbe389d.pf_index
new file mode 100644
index 0000000..dfbfa86
Binary files /dev/null and b/docs/public/_pagefind/index/en_dbe389d.pf_index differ
diff --git a/docs/public/_pagefind/index/en_dcccf33.pf_index b/docs/public/_pagefind/index/en_dcccf33.pf_index
new file mode 100644
index 0000000..b9d7395
Binary files /dev/null and b/docs/public/_pagefind/index/en_dcccf33.pf_index differ
diff --git a/docs/public/_pagefind/pagefind-entry.json b/docs/public/_pagefind/pagefind-entry.json
new file mode 100644
index 0000000..cb378e2
--- /dev/null
+++ b/docs/public/_pagefind/pagefind-entry.json
@@ -0,0 +1 @@
+{"version":"1.4.0","languages":{"en":{"hash":"en_1755c4f2e4","wasm":"en","page_count":90}},"include_characters":["_","‿","⁀","⁔","︳","︴","﹍","﹎","﹏","_"]}
\ No newline at end of file
diff --git a/docs/public/_pagefind/pagefind-highlight.js b/docs/public/_pagefind/pagefind-highlight.js
new file mode 100644
index 0000000..b818955
--- /dev/null
+++ b/docs/public/_pagefind/pagefind-highlight.js
@@ -0,0 +1,1064 @@
+var __create = Object.create;
+var __defProp = Object.defineProperty;
+var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
+var __getOwnPropNames = Object.getOwnPropertyNames;
+var __getProtoOf = Object.getPrototypeOf;
+var __hasOwnProp = Object.prototype.hasOwnProperty;
+var __commonJS = (cb, mod) => function __require() {
+ return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
+};
+var __copyProps = (to, from, except, desc) => {
+ if (from && typeof from === "object" || typeof from === "function") {
+ for (let key of __getOwnPropNames(from))
+ if (!__hasOwnProp.call(to, key) && key !== except)
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
+ }
+ return to;
+};
+var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
+ // If the importer is in node compatibility mode or this is not an ESM
+ // file that has been converted to a CommonJS file using a Babel-
+ // compatible transform (i.e. "__esModule" has not been set), then set
+ // "default" to the CommonJS "module.exports" for node compatibility.
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
+ mod
+));
+
+// node_modules/mark.js/dist/mark.js
+var require_mark = __commonJS({
+ "node_modules/mark.js/dist/mark.js"(exports, module) {
+ (function(global, factory) {
+ typeof exports === "object" && typeof module !== "undefined" ? module.exports = factory() : typeof define === "function" && define.amd ? define(factory) : global.Mark = factory();
+ })(exports, (function() {
+ "use strict";
+ var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function(obj) {
+ return typeof obj;
+ } : function(obj) {
+ return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
+ };
+ var classCallCheck = function(instance, Constructor) {
+ if (!(instance instanceof Constructor)) {
+ throw new TypeError("Cannot call a class as a function");
+ }
+ };
+ var createClass = /* @__PURE__ */ (function() {
+ function defineProperties(target, props) {
+ for (var i = 0; i < props.length; i++) {
+ var descriptor = props[i];
+ descriptor.enumerable = descriptor.enumerable || false;
+ descriptor.configurable = true;
+ if ("value" in descriptor) descriptor.writable = true;
+ Object.defineProperty(target, descriptor.key, descriptor);
+ }
+ }
+ return function(Constructor, protoProps, staticProps) {
+ if (protoProps) defineProperties(Constructor.prototype, protoProps);
+ if (staticProps) defineProperties(Constructor, staticProps);
+ return Constructor;
+ };
+ })();
+ var _extends = Object.assign || function(target) {
+ for (var i = 1; i < arguments.length; i++) {
+ var source = arguments[i];
+ for (var key in source) {
+ if (Object.prototype.hasOwnProperty.call(source, key)) {
+ target[key] = source[key];
+ }
+ }
+ }
+ return target;
+ };
+ var DOMIterator = (function() {
+ function DOMIterator2(ctx) {
+ var iframes = arguments.length > 1 && arguments[1] !== void 0 ? arguments[1] : true;
+ var exclude = arguments.length > 2 && arguments[2] !== void 0 ? arguments[2] : [];
+ var iframesTimeout = arguments.length > 3 && arguments[3] !== void 0 ? arguments[3] : 5e3;
+ classCallCheck(this, DOMIterator2);
+ this.ctx = ctx;
+ this.iframes = iframes;
+ this.exclude = exclude;
+ this.iframesTimeout = iframesTimeout;
+ }
+ createClass(DOMIterator2, [{
+ key: "getContexts",
+ value: function getContexts() {
+ var ctx = void 0, filteredCtx = [];
+ if (typeof this.ctx === "undefined" || !this.ctx) {
+ ctx = [];
+ } else if (NodeList.prototype.isPrototypeOf(this.ctx)) {
+ ctx = Array.prototype.slice.call(this.ctx);
+ } else if (Array.isArray(this.ctx)) {
+ ctx = this.ctx;
+ } else if (typeof this.ctx === "string") {
+ ctx = Array.prototype.slice.call(document.querySelectorAll(this.ctx));
+ } else {
+ ctx = [this.ctx];
+ }
+ ctx.forEach(function(ctx2) {
+ var isDescendant = filteredCtx.filter(function(contexts) {
+ return contexts.contains(ctx2);
+ }).length > 0;
+ if (filteredCtx.indexOf(ctx2) === -1 && !isDescendant) {
+ filteredCtx.push(ctx2);
+ }
+ });
+ return filteredCtx;
+ }
+ }, {
+ key: "getIframeContents",
+ value: function getIframeContents(ifr, successFn) {
+ var errorFn = arguments.length > 2 && arguments[2] !== void 0 ? arguments[2] : function() {
+ };
+ var doc = void 0;
+ try {
+ var ifrWin = ifr.contentWindow;
+ doc = ifrWin.document;
+ if (!ifrWin || !doc) {
+ throw new Error("iframe inaccessible");
+ }
+ } catch (e) {
+ errorFn();
+ }
+ if (doc) {
+ successFn(doc);
+ }
+ }
+ }, {
+ key: "isIframeBlank",
+ value: function isIframeBlank(ifr) {
+ var bl = "about:blank", src = ifr.getAttribute("src").trim(), href = ifr.contentWindow.location.href;
+ return href === bl && src !== bl && src;
+ }
+ }, {
+ key: "observeIframeLoad",
+ value: function observeIframeLoad(ifr, successFn, errorFn) {
+ var _this = this;
+ var called = false, tout = null;
+ var listener = function listener2() {
+ if (called) {
+ return;
+ }
+ called = true;
+ clearTimeout(tout);
+ try {
+ if (!_this.isIframeBlank(ifr)) {
+ ifr.removeEventListener("load", listener2);
+ _this.getIframeContents(ifr, successFn, errorFn);
+ }
+ } catch (e) {
+ errorFn();
+ }
+ };
+ ifr.addEventListener("load", listener);
+ tout = setTimeout(listener, this.iframesTimeout);
+ }
+ }, {
+ key: "onIframeReady",
+ value: function onIframeReady(ifr, successFn, errorFn) {
+ try {
+ if (ifr.contentWindow.document.readyState === "complete") {
+ if (this.isIframeBlank(ifr)) {
+ this.observeIframeLoad(ifr, successFn, errorFn);
+ } else {
+ this.getIframeContents(ifr, successFn, errorFn);
+ }
+ } else {
+ this.observeIframeLoad(ifr, successFn, errorFn);
+ }
+ } catch (e) {
+ errorFn();
+ }
+ }
+ }, {
+ key: "waitForIframes",
+ value: function waitForIframes(ctx, done) {
+ var _this2 = this;
+ var eachCalled = 0;
+ this.forEachIframe(ctx, function() {
+ return true;
+ }, function(ifr) {
+ eachCalled++;
+ _this2.waitForIframes(ifr.querySelector("html"), function() {
+ if (!--eachCalled) {
+ done();
+ }
+ });
+ }, function(handled) {
+ if (!handled) {
+ done();
+ }
+ });
+ }
+ }, {
+ key: "forEachIframe",
+ value: function forEachIframe(ctx, filter, each) {
+ var _this3 = this;
+ var end = arguments.length > 3 && arguments[3] !== void 0 ? arguments[3] : function() {
+ };
+ var ifr = ctx.querySelectorAll("iframe"), open = ifr.length, handled = 0;
+ ifr = Array.prototype.slice.call(ifr);
+ var checkEnd = function checkEnd2() {
+ if (--open <= 0) {
+ end(handled);
+ }
+ };
+ if (!open) {
+ checkEnd();
+ }
+ ifr.forEach(function(ifr2) {
+ if (DOMIterator2.matches(ifr2, _this3.exclude)) {
+ checkEnd();
+ } else {
+ _this3.onIframeReady(ifr2, function(con) {
+ if (filter(ifr2)) {
+ handled++;
+ each(con);
+ }
+ checkEnd();
+ }, checkEnd);
+ }
+ });
+ }
+ }, {
+ key: "createIterator",
+ value: function createIterator(ctx, whatToShow, filter) {
+ return document.createNodeIterator(ctx, whatToShow, filter, false);
+ }
+ }, {
+ key: "createInstanceOnIframe",
+ value: function createInstanceOnIframe(contents) {
+ return new DOMIterator2(contents.querySelector("html"), this.iframes);
+ }
+ }, {
+ key: "compareNodeIframe",
+ value: function compareNodeIframe(node, prevNode, ifr) {
+ var compCurr = node.compareDocumentPosition(ifr), prev = Node.DOCUMENT_POSITION_PRECEDING;
+ if (compCurr & prev) {
+ if (prevNode !== null) {
+ var compPrev = prevNode.compareDocumentPosition(ifr), after = Node.DOCUMENT_POSITION_FOLLOWING;
+ if (compPrev & after) {
+ return true;
+ }
+ } else {
+ return true;
+ }
+ }
+ return false;
+ }
+ }, {
+ key: "getIteratorNode",
+ value: function getIteratorNode(itr) {
+ var prevNode = itr.previousNode();
+ var node = void 0;
+ if (prevNode === null) {
+ node = itr.nextNode();
+ } else {
+ node = itr.nextNode() && itr.nextNode();
+ }
+ return {
+ prevNode,
+ node
+ };
+ }
+ }, {
+ key: "checkIframeFilter",
+ value: function checkIframeFilter(node, prevNode, currIfr, ifr) {
+ var key = false, handled = false;
+ ifr.forEach(function(ifrDict, i) {
+ if (ifrDict.val === currIfr) {
+ key = i;
+ handled = ifrDict.handled;
+ }
+ });
+ if (this.compareNodeIframe(node, prevNode, currIfr)) {
+ if (key === false && !handled) {
+ ifr.push({
+ val: currIfr,
+ handled: true
+ });
+ } else if (key !== false && !handled) {
+ ifr[key].handled = true;
+ }
+ return true;
+ }
+ if (key === false) {
+ ifr.push({
+ val: currIfr,
+ handled: false
+ });
+ }
+ return false;
+ }
+ }, {
+ key: "handleOpenIframes",
+ value: function handleOpenIframes(ifr, whatToShow, eCb, fCb) {
+ var _this4 = this;
+ ifr.forEach(function(ifrDict) {
+ if (!ifrDict.handled) {
+ _this4.getIframeContents(ifrDict.val, function(con) {
+ _this4.createInstanceOnIframe(con).forEachNode(whatToShow, eCb, fCb);
+ });
+ }
+ });
+ }
+ }, {
+ key: "iterateThroughNodes",
+ value: function iterateThroughNodes(whatToShow, ctx, eachCb, filterCb, doneCb) {
+ var _this5 = this;
+ var itr = this.createIterator(ctx, whatToShow, filterCb);
+ var ifr = [], elements = [], node = void 0, prevNode = void 0, retrieveNodes = function retrieveNodes2() {
+ var _getIteratorNode = _this5.getIteratorNode(itr);
+ prevNode = _getIteratorNode.prevNode;
+ node = _getIteratorNode.node;
+ return node;
+ };
+ while (retrieveNodes()) {
+ if (this.iframes) {
+ this.forEachIframe(ctx, function(currIfr) {
+ return _this5.checkIframeFilter(node, prevNode, currIfr, ifr);
+ }, function(con) {
+ _this5.createInstanceOnIframe(con).forEachNode(whatToShow, function(ifrNode) {
+ return elements.push(ifrNode);
+ }, filterCb);
+ });
+ }
+ elements.push(node);
+ }
+ elements.forEach(function(node2) {
+ eachCb(node2);
+ });
+ if (this.iframes) {
+ this.handleOpenIframes(ifr, whatToShow, eachCb, filterCb);
+ }
+ doneCb();
+ }
+ }, {
+ key: "forEachNode",
+ value: function forEachNode(whatToShow, each, filter) {
+ var _this6 = this;
+ var done = arguments.length > 3 && arguments[3] !== void 0 ? arguments[3] : function() {
+ };
+ var contexts = this.getContexts();
+ var open = contexts.length;
+ if (!open) {
+ done();
+ }
+ contexts.forEach(function(ctx) {
+ var ready = function ready2() {
+ _this6.iterateThroughNodes(whatToShow, ctx, each, filter, function() {
+ if (--open <= 0) {
+ done();
+ }
+ });
+ };
+ if (_this6.iframes) {
+ _this6.waitForIframes(ctx, ready);
+ } else {
+ ready();
+ }
+ });
+ }
+ }], [{
+ key: "matches",
+ value: function matches(element, selector) {
+ var selectors = typeof selector === "string" ? [selector] : selector, fn = element.matches || element.matchesSelector || element.msMatchesSelector || element.mozMatchesSelector || element.oMatchesSelector || element.webkitMatchesSelector;
+ if (fn) {
+ var match = false;
+ selectors.every(function(sel) {
+ if (fn.call(element, sel)) {
+ match = true;
+ return false;
+ }
+ return true;
+ });
+ return match;
+ } else {
+ return false;
+ }
+ }
+ }]);
+ return DOMIterator2;
+ })();
+ var Mark$1 = (function() {
+ function Mark3(ctx) {
+ classCallCheck(this, Mark3);
+ this.ctx = ctx;
+ this.ie = false;
+ var ua = window.navigator.userAgent;
+ if (ua.indexOf("MSIE") > -1 || ua.indexOf("Trident") > -1) {
+ this.ie = true;
+ }
+ }
+ createClass(Mark3, [{
+ key: "log",
+ value: function log(msg) {
+ var level = arguments.length > 1 && arguments[1] !== void 0 ? arguments[1] : "debug";
+ var log2 = this.opt.log;
+ if (!this.opt.debug) {
+ return;
+ }
+ if ((typeof log2 === "undefined" ? "undefined" : _typeof(log2)) === "object" && typeof log2[level] === "function") {
+ log2[level]("mark.js: " + msg);
+ }
+ }
+ }, {
+ key: "escapeStr",
+ value: function escapeStr(str) {
+ return str.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g, "\\$&");
+ }
+ }, {
+ key: "createRegExp",
+ value: function createRegExp(str) {
+ if (this.opt.wildcards !== "disabled") {
+ str = this.setupWildcardsRegExp(str);
+ }
+ str = this.escapeStr(str);
+ if (Object.keys(this.opt.synonyms).length) {
+ str = this.createSynonymsRegExp(str);
+ }
+ if (this.opt.ignoreJoiners || this.opt.ignorePunctuation.length) {
+ str = this.setupIgnoreJoinersRegExp(str);
+ }
+ if (this.opt.diacritics) {
+ str = this.createDiacriticsRegExp(str);
+ }
+ str = this.createMergedBlanksRegExp(str);
+ if (this.opt.ignoreJoiners || this.opt.ignorePunctuation.length) {
+ str = this.createJoinersRegExp(str);
+ }
+ if (this.opt.wildcards !== "disabled") {
+ str = this.createWildcardsRegExp(str);
+ }
+ str = this.createAccuracyRegExp(str);
+ return str;
+ }
+ }, {
+ key: "createSynonymsRegExp",
+ value: function createSynonymsRegExp(str) {
+ var syn = this.opt.synonyms, sens = this.opt.caseSensitive ? "" : "i", joinerPlaceholder = this.opt.ignoreJoiners || this.opt.ignorePunctuation.length ? "\0" : "";
+ for (var index in syn) {
+ if (syn.hasOwnProperty(index)) {
+ var value = syn[index], k1 = this.opt.wildcards !== "disabled" ? this.setupWildcardsRegExp(index) : this.escapeStr(index), k2 = this.opt.wildcards !== "disabled" ? this.setupWildcardsRegExp(value) : this.escapeStr(value);
+ if (k1 !== "" && k2 !== "") {
+ str = str.replace(new RegExp("(" + this.escapeStr(k1) + "|" + this.escapeStr(k2) + ")", "gm" + sens), joinerPlaceholder + ("(" + this.processSynomyms(k1) + "|") + (this.processSynomyms(k2) + ")") + joinerPlaceholder);
+ }
+ }
+ }
+ return str;
+ }
+ }, {
+ key: "processSynomyms",
+ value: function processSynomyms(str) {
+ if (this.opt.ignoreJoiners || this.opt.ignorePunctuation.length) {
+ str = this.setupIgnoreJoinersRegExp(str);
+ }
+ return str;
+ }
+ }, {
+ key: "setupWildcardsRegExp",
+ value: function setupWildcardsRegExp(str) {
+ str = str.replace(/(?:\\)*\?/g, function(val) {
+ return val.charAt(0) === "\\" ? "?" : "";
+ });
+ return str.replace(/(?:\\)*\*/g, function(val) {
+ return val.charAt(0) === "\\" ? "*" : "";
+ });
+ }
+ }, {
+ key: "createWildcardsRegExp",
+ value: function createWildcardsRegExp(str) {
+ var spaces = this.opt.wildcards === "withSpaces";
+ return str.replace(/\u0001/g, spaces ? "[\\S\\s]?" : "\\S?").replace(/\u0002/g, spaces ? "[\\S\\s]*?" : "\\S*");
+ }
+ }, {
+ key: "setupIgnoreJoinersRegExp",
+ value: function setupIgnoreJoinersRegExp(str) {
+ return str.replace(/[^(|)\\]/g, function(val, indx, original) {
+ var nextChar = original.charAt(indx + 1);
+ if (/[(|)\\]/.test(nextChar) || nextChar === "") {
+ return val;
+ } else {
+ return val + "\0";
+ }
+ });
+ }
+ }, {
+ key: "createJoinersRegExp",
+ value: function createJoinersRegExp(str) {
+ var joiner = [];
+ var ignorePunctuation = this.opt.ignorePunctuation;
+ if (Array.isArray(ignorePunctuation) && ignorePunctuation.length) {
+ joiner.push(this.escapeStr(ignorePunctuation.join("")));
+ }
+ if (this.opt.ignoreJoiners) {
+ joiner.push("\\u00ad\\u200b\\u200c\\u200d");
+ }
+ return joiner.length ? str.split(/\u0000+/).join("[" + joiner.join("") + "]*") : str;
+ }
+ }, {
+ key: "createDiacriticsRegExp",
+ value: function createDiacriticsRegExp(str) {
+ var sens = this.opt.caseSensitive ? "" : "i", dct = this.opt.caseSensitive ? ["a\xE0\xE1\u1EA3\xE3\u1EA1\u0103\u1EB1\u1EAF\u1EB3\u1EB5\u1EB7\xE2\u1EA7\u1EA5\u1EA9\u1EAB\u1EAD\xE4\xE5\u0101\u0105", "A\xC0\xC1\u1EA2\xC3\u1EA0\u0102\u1EB0\u1EAE\u1EB2\u1EB4\u1EB6\xC2\u1EA6\u1EA4\u1EA8\u1EAA\u1EAC\xC4\xC5\u0100\u0104", "c\xE7\u0107\u010D", "C\xC7\u0106\u010C", "d\u0111\u010F", "D\u0110\u010E", "e\xE8\xE9\u1EBB\u1EBD\u1EB9\xEA\u1EC1\u1EBF\u1EC3\u1EC5\u1EC7\xEB\u011B\u0113\u0119", "E\xC8\xC9\u1EBA\u1EBC\u1EB8\xCA\u1EC0\u1EBE\u1EC2\u1EC4\u1EC6\xCB\u011A\u0112\u0118", "i\xEC\xED\u1EC9\u0129\u1ECB\xEE\xEF\u012B", "I\xCC\xCD\u1EC8\u0128\u1ECA\xCE\xCF\u012A", "l\u0142", "L\u0141", "n\xF1\u0148\u0144", "N\xD1\u0147\u0143", "o\xF2\xF3\u1ECF\xF5\u1ECD\xF4\u1ED3\u1ED1\u1ED5\u1ED7\u1ED9\u01A1\u1EDF\u1EE1\u1EDB\u1EDD\u1EE3\xF6\xF8\u014D", "O\xD2\xD3\u1ECE\xD5\u1ECC\xD4\u1ED2\u1ED0\u1ED4\u1ED6\u1ED8\u01A0\u1EDE\u1EE0\u1EDA\u1EDC\u1EE2\xD6\xD8\u014C", "r\u0159", "R\u0158", "s\u0161\u015B\u0219\u015F", "S\u0160\u015A\u0218\u015E", "t\u0165\u021B\u0163", "T\u0164\u021A\u0162", "u\xF9\xFA\u1EE7\u0169\u1EE5\u01B0\u1EEB\u1EE9\u1EED\u1EEF\u1EF1\xFB\xFC\u016F\u016B", "U\xD9\xDA\u1EE6\u0168\u1EE4\u01AF\u1EEA\u1EE8\u1EEC\u1EEE\u1EF0\xDB\xDC\u016E\u016A", "y\xFD\u1EF3\u1EF7\u1EF9\u1EF5\xFF", "Y\xDD\u1EF2\u1EF6\u1EF8\u1EF4\u0178", "z\u017E\u017C\u017A", "Z\u017D\u017B\u0179"] : ["a\xE0\xE1\u1EA3\xE3\u1EA1\u0103\u1EB1\u1EAF\u1EB3\u1EB5\u1EB7\xE2\u1EA7\u1EA5\u1EA9\u1EAB\u1EAD\xE4\xE5\u0101\u0105A\xC0\xC1\u1EA2\xC3\u1EA0\u0102\u1EB0\u1EAE\u1EB2\u1EB4\u1EB6\xC2\u1EA6\u1EA4\u1EA8\u1EAA\u1EAC\xC4\xC5\u0100\u0104", "c\xE7\u0107\u010DC\xC7\u0106\u010C", "d\u0111\u010FD\u0110\u010E", "e\xE8\xE9\u1EBB\u1EBD\u1EB9\xEA\u1EC1\u1EBF\u1EC3\u1EC5\u1EC7\xEB\u011B\u0113\u0119E\xC8\xC9\u1EBA\u1EBC\u1EB8\xCA\u1EC0\u1EBE\u1EC2\u1EC4\u1EC6\xCB\u011A\u0112\u0118", "i\xEC\xED\u1EC9\u0129\u1ECB\xEE\xEF\u012BI\xCC\xCD\u1EC8\u0128\u1ECA\xCE\xCF\u012A", "l\u0142L\u0141", "n\xF1\u0148\u0144N\xD1\u0147\u0143", "o\xF2\xF3\u1ECF\xF5\u1ECD\xF4\u1ED3\u1ED1\u1ED5\u1ED7\u1ED9\u01A1\u1EDF\u1EE1\u1EDB\u1EDD\u1EE3\xF6\xF8\u014DO\xD2\xD3\u1ECE\xD5\u1ECC\xD4\u1ED2\u1ED0\u1ED4\u1ED6\u1ED8\u01A0\u1EDE\u1EE0\u1EDA\u1EDC\u1EE2\xD6\xD8\u014C", "r\u0159R\u0158", "s\u0161\u015B\u0219\u015FS\u0160\u015A\u0218\u015E", "t\u0165\u021B\u0163T\u0164\u021A\u0162", "u\xF9\xFA\u1EE7\u0169\u1EE5\u01B0\u1EEB\u1EE9\u1EED\u1EEF\u1EF1\xFB\xFC\u016F\u016BU\xD9\xDA\u1EE6\u0168\u1EE4\u01AF\u1EEA\u1EE8\u1EEC\u1EEE\u1EF0\xDB\xDC\u016E\u016A", "y\xFD\u1EF3\u1EF7\u1EF9\u1EF5\xFFY\xDD\u1EF2\u1EF6\u1EF8\u1EF4\u0178", "z\u017E\u017C\u017AZ\u017D\u017B\u0179"];
+ var handled = [];
+ str.split("").forEach(function(ch) {
+ dct.every(function(dct2) {
+ if (dct2.indexOf(ch) !== -1) {
+ if (handled.indexOf(dct2) > -1) {
+ return false;
+ }
+ str = str.replace(new RegExp("[" + dct2 + "]", "gm" + sens), "[" + dct2 + "]");
+ handled.push(dct2);
+ }
+ return true;
+ });
+ });
+ return str;
+ }
+ }, {
+ key: "createMergedBlanksRegExp",
+ value: function createMergedBlanksRegExp(str) {
+ return str.replace(/[\s]+/gmi, "[\\s]+");
+ }
+ }, {
+ key: "createAccuracyRegExp",
+ value: function createAccuracyRegExp(str) {
+ var _this = this;
+ var chars = "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\xA1\xBF";
+ var acc = this.opt.accuracy, val = typeof acc === "string" ? acc : acc.value, ls = typeof acc === "string" ? [] : acc.limiters, lsJoin = "";
+ ls.forEach(function(limiter) {
+ lsJoin += "|" + _this.escapeStr(limiter);
+ });
+ switch (val) {
+ case "partially":
+ default:
+ return "()(" + str + ")";
+ case "complementary":
+ lsJoin = "\\s" + (lsJoin ? lsJoin : this.escapeStr(chars));
+ return "()([^" + lsJoin + "]*" + str + "[^" + lsJoin + "]*)";
+ case "exactly":
+ return "(^|\\s" + lsJoin + ")(" + str + ")(?=$|\\s" + lsJoin + ")";
+ }
+ }
+ }, {
+ key: "getSeparatedKeywords",
+ value: function getSeparatedKeywords(sv) {
+ var _this2 = this;
+ var stack = [];
+ sv.forEach(function(kw) {
+ if (!_this2.opt.separateWordSearch) {
+ if (kw.trim() && stack.indexOf(kw) === -1) {
+ stack.push(kw);
+ }
+ } else {
+ kw.split(" ").forEach(function(kwSplitted) {
+ if (kwSplitted.trim() && stack.indexOf(kwSplitted) === -1) {
+ stack.push(kwSplitted);
+ }
+ });
+ }
+ });
+ return {
+ "keywords": stack.sort(function(a, b) {
+ return b.length - a.length;
+ }),
+ "length": stack.length
+ };
+ }
+ }, {
+ key: "isNumeric",
+ value: function isNumeric(value) {
+ return Number(parseFloat(value)) == value;
+ }
+ }, {
+ key: "checkRanges",
+ value: function checkRanges(array) {
+ var _this3 = this;
+ if (!Array.isArray(array) || Object.prototype.toString.call(array[0]) !== "[object Object]") {
+ this.log("markRanges() will only accept an array of objects");
+ this.opt.noMatch(array);
+ return [];
+ }
+ var stack = [];
+ var last = 0;
+ array.sort(function(a, b) {
+ return a.start - b.start;
+ }).forEach(function(item) {
+ var _callNoMatchOnInvalid = _this3.callNoMatchOnInvalidRanges(item, last), start = _callNoMatchOnInvalid.start, end = _callNoMatchOnInvalid.end, valid = _callNoMatchOnInvalid.valid;
+ if (valid) {
+ item.start = start;
+ item.length = end - start;
+ stack.push(item);
+ last = end;
+ }
+ });
+ return stack;
+ }
+ }, {
+ key: "callNoMatchOnInvalidRanges",
+ value: function callNoMatchOnInvalidRanges(range, last) {
+ var start = void 0, end = void 0, valid = false;
+ if (range && typeof range.start !== "undefined") {
+ start = parseInt(range.start, 10);
+ end = start + parseInt(range.length, 10);
+ if (this.isNumeric(range.start) && this.isNumeric(range.length) && end - last > 0 && end - start > 0) {
+ valid = true;
+ } else {
+ this.log("Ignoring invalid or overlapping range: " + ("" + JSON.stringify(range)));
+ this.opt.noMatch(range);
+ }
+ } else {
+ this.log("Ignoring invalid range: " + JSON.stringify(range));
+ this.opt.noMatch(range);
+ }
+ return {
+ start,
+ end,
+ valid
+ };
+ }
+ }, {
+ key: "checkWhitespaceRanges",
+ value: function checkWhitespaceRanges(range, originalLength, string) {
+ var end = void 0, valid = true, max = string.length, offset = originalLength - max, start = parseInt(range.start, 10) - offset;
+ start = start > max ? max : start;
+ end = start + parseInt(range.length, 10);
+ if (end > max) {
+ end = max;
+ this.log("End range automatically set to the max value of " + max);
+ }
+ if (start < 0 || end - start < 0 || start > max || end > max) {
+ valid = false;
+ this.log("Invalid range: " + JSON.stringify(range));
+ this.opt.noMatch(range);
+ } else if (string.substring(start, end).replace(/\s+/g, "") === "") {
+ valid = false;
+ this.log("Skipping whitespace only range: " + JSON.stringify(range));
+ this.opt.noMatch(range);
+ }
+ return {
+ start,
+ end,
+ valid
+ };
+ }
+ }, {
+ key: "getTextNodes",
+ value: function getTextNodes(cb) {
+ var _this4 = this;
+ var val = "", nodes = [];
+ this.iterator.forEachNode(NodeFilter.SHOW_TEXT, function(node) {
+ nodes.push({
+ start: val.length,
+ end: (val += node.textContent).length,
+ node
+ });
+ }, function(node) {
+ if (_this4.matchesExclude(node.parentNode)) {
+ return NodeFilter.FILTER_REJECT;
+ } else {
+ return NodeFilter.FILTER_ACCEPT;
+ }
+ }, function() {
+ cb({
+ value: val,
+ nodes
+ });
+ });
+ }
+ }, {
+ key: "matchesExclude",
+ value: function matchesExclude(el) {
+ return DOMIterator.matches(el, this.opt.exclude.concat(["script", "style", "title", "head", "html"]));
+ }
+ }, {
+ key: "wrapRangeInTextNode",
+ value: function wrapRangeInTextNode(node, start, end) {
+ var hEl = !this.opt.element ? "mark" : this.opt.element, startNode = node.splitText(start), ret = startNode.splitText(end - start);
+ var repl = document.createElement(hEl);
+ repl.setAttribute("data-markjs", "true");
+ if (this.opt.className) {
+ repl.setAttribute("class", this.opt.className);
+ }
+ repl.textContent = startNode.textContent;
+ startNode.parentNode.replaceChild(repl, startNode);
+ return ret;
+ }
+ }, {
+ key: "wrapRangeInMappedTextNode",
+ value: function wrapRangeInMappedTextNode(dict, start, end, filterCb, eachCb) {
+ var _this5 = this;
+ dict.nodes.every(function(n, i) {
+ var sibl = dict.nodes[i + 1];
+ if (typeof sibl === "undefined" || sibl.start > start) {
+ if (!filterCb(n.node)) {
+ return false;
+ }
+ var s = start - n.start, e = (end > n.end ? n.end : end) - n.start, startStr = dict.value.substr(0, n.start), endStr = dict.value.substr(e + n.start);
+ n.node = _this5.wrapRangeInTextNode(n.node, s, e);
+ dict.value = startStr + endStr;
+ dict.nodes.forEach(function(k, j) {
+ if (j >= i) {
+ if (dict.nodes[j].start > 0 && j !== i) {
+ dict.nodes[j].start -= e;
+ }
+ dict.nodes[j].end -= e;
+ }
+ });
+ end -= e;
+ eachCb(n.node.previousSibling, n.start);
+ if (end > n.end) {
+ start = n.end;
+ } else {
+ return false;
+ }
+ }
+ return true;
+ });
+ }
+ }, {
+ key: "wrapMatches",
+ value: function wrapMatches(regex, ignoreGroups, filterCb, eachCb, endCb) {
+ var _this6 = this;
+ var matchIdx = ignoreGroups === 0 ? 0 : ignoreGroups + 1;
+ this.getTextNodes(function(dict) {
+ dict.nodes.forEach(function(node) {
+ node = node.node;
+ var match = void 0;
+ while ((match = regex.exec(node.textContent)) !== null && match[matchIdx] !== "") {
+ if (!filterCb(match[matchIdx], node)) {
+ continue;
+ }
+ var pos = match.index;
+ if (matchIdx !== 0) {
+ for (var i = 1; i < matchIdx; i++) {
+ pos += match[i].length;
+ }
+ }
+ node = _this6.wrapRangeInTextNode(node, pos, pos + match[matchIdx].length);
+ eachCb(node.previousSibling);
+ regex.lastIndex = 0;
+ }
+ });
+ endCb();
+ });
+ }
+ }, {
+ key: "wrapMatchesAcrossElements",
+ value: function wrapMatchesAcrossElements(regex, ignoreGroups, filterCb, eachCb, endCb) {
+ var _this7 = this;
+ var matchIdx = ignoreGroups === 0 ? 0 : ignoreGroups + 1;
+ this.getTextNodes(function(dict) {
+ var match = void 0;
+ while ((match = regex.exec(dict.value)) !== null && match[matchIdx] !== "") {
+ var start = match.index;
+ if (matchIdx !== 0) {
+ for (var i = 1; i < matchIdx; i++) {
+ start += match[i].length;
+ }
+ }
+ var end = start + match[matchIdx].length;
+ _this7.wrapRangeInMappedTextNode(dict, start, end, function(node) {
+ return filterCb(match[matchIdx], node);
+ }, function(node, lastIndex) {
+ regex.lastIndex = lastIndex;
+ eachCb(node);
+ });
+ }
+ endCb();
+ });
+ }
+ }, {
+ key: "wrapRangeFromIndex",
+ value: function wrapRangeFromIndex(ranges, filterCb, eachCb, endCb) {
+ var _this8 = this;
+ this.getTextNodes(function(dict) {
+ var originalLength = dict.value.length;
+ ranges.forEach(function(range, counter) {
+ var _checkWhitespaceRange = _this8.checkWhitespaceRanges(range, originalLength, dict.value), start = _checkWhitespaceRange.start, end = _checkWhitespaceRange.end, valid = _checkWhitespaceRange.valid;
+ if (valid) {
+ _this8.wrapRangeInMappedTextNode(dict, start, end, function(node) {
+ return filterCb(node, range, dict.value.substring(start, end), counter);
+ }, function(node) {
+ eachCb(node, range);
+ });
+ }
+ });
+ endCb();
+ });
+ }
+ }, {
+ key: "unwrapMatches",
+ value: function unwrapMatches(node) {
+ var parent = node.parentNode;
+ var docFrag = document.createDocumentFragment();
+ while (node.firstChild) {
+ docFrag.appendChild(node.removeChild(node.firstChild));
+ }
+ parent.replaceChild(docFrag, node);
+ if (!this.ie) {
+ parent.normalize();
+ } else {
+ this.normalizeTextNode(parent);
+ }
+ }
+ }, {
+ key: "normalizeTextNode",
+ value: function normalizeTextNode(node) {
+ if (!node) {
+ return;
+ }
+ if (node.nodeType === 3) {
+ while (node.nextSibling && node.nextSibling.nodeType === 3) {
+ node.nodeValue += node.nextSibling.nodeValue;
+ node.parentNode.removeChild(node.nextSibling);
+ }
+ } else {
+ this.normalizeTextNode(node.firstChild);
+ }
+ this.normalizeTextNode(node.nextSibling);
+ }
+ }, {
+ key: "markRegExp",
+ value: function markRegExp(regexp, opt) {
+ var _this9 = this;
+ this.opt = opt;
+ this.log('Searching with expression "' + regexp + '"');
+ var totalMatches = 0, fn = "wrapMatches";
+ var eachCb = function eachCb2(element) {
+ totalMatches++;
+ _this9.opt.each(element);
+ };
+ if (this.opt.acrossElements) {
+ fn = "wrapMatchesAcrossElements";
+ }
+ this[fn](regexp, this.opt.ignoreGroups, function(match, node) {
+ return _this9.opt.filter(node, match, totalMatches);
+ }, eachCb, function() {
+ if (totalMatches === 0) {
+ _this9.opt.noMatch(regexp);
+ }
+ _this9.opt.done(totalMatches);
+ });
+ }
+ }, {
+ key: "mark",
+ value: function mark(sv, opt) {
+ var _this10 = this;
+ this.opt = opt;
+ var totalMatches = 0, fn = "wrapMatches";
+ var _getSeparatedKeywords = this.getSeparatedKeywords(typeof sv === "string" ? [sv] : sv), kwArr = _getSeparatedKeywords.keywords, kwArrLen = _getSeparatedKeywords.length, sens = this.opt.caseSensitive ? "" : "i", handler = function handler2(kw) {
+ var regex = new RegExp(_this10.createRegExp(kw), "gm" + sens), matches = 0;
+ _this10.log('Searching with expression "' + regex + '"');
+ _this10[fn](regex, 1, function(term, node) {
+ return _this10.opt.filter(node, kw, totalMatches, matches);
+ }, function(element) {
+ matches++;
+ totalMatches++;
+ _this10.opt.each(element);
+ }, function() {
+ if (matches === 0) {
+ _this10.opt.noMatch(kw);
+ }
+ if (kwArr[kwArrLen - 1] === kw) {
+ _this10.opt.done(totalMatches);
+ } else {
+ handler2(kwArr[kwArr.indexOf(kw) + 1]);
+ }
+ });
+ };
+ if (this.opt.acrossElements) {
+ fn = "wrapMatchesAcrossElements";
+ }
+ if (kwArrLen === 0) {
+ this.opt.done(totalMatches);
+ } else {
+ handler(kwArr[0]);
+ }
+ }
+ }, {
+ key: "markRanges",
+ value: function markRanges(rawRanges, opt) {
+ var _this11 = this;
+ this.opt = opt;
+ var totalMatches = 0, ranges = this.checkRanges(rawRanges);
+ if (ranges && ranges.length) {
+ this.log("Starting to mark with the following ranges: " + JSON.stringify(ranges));
+ this.wrapRangeFromIndex(ranges, function(node, range, match, counter) {
+ return _this11.opt.filter(node, range, match, counter);
+ }, function(element, range) {
+ totalMatches++;
+ _this11.opt.each(element, range);
+ }, function() {
+ _this11.opt.done(totalMatches);
+ });
+ } else {
+ this.opt.done(totalMatches);
+ }
+ }
+ }, {
+ key: "unmark",
+ value: function unmark(opt) {
+ var _this12 = this;
+ this.opt = opt;
+ var sel = this.opt.element ? this.opt.element : "*";
+ sel += "[data-markjs]";
+ if (this.opt.className) {
+ sel += "." + this.opt.className;
+ }
+ this.log('Removal selector "' + sel + '"');
+ this.iterator.forEachNode(NodeFilter.SHOW_ELEMENT, function(node) {
+ _this12.unwrapMatches(node);
+ }, function(node) {
+ var matchesSel = DOMIterator.matches(node, sel), matchesExclude = _this12.matchesExclude(node);
+ if (!matchesSel || matchesExclude) {
+ return NodeFilter.FILTER_REJECT;
+ } else {
+ return NodeFilter.FILTER_ACCEPT;
+ }
+ }, this.opt.done);
+ }
+ }, {
+ key: "opt",
+ set: function set$$1(val) {
+ this._opt = _extends({}, {
+ "element": "",
+ "className": "",
+ "exclude": [],
+ "iframes": false,
+ "iframesTimeout": 5e3,
+ "separateWordSearch": true,
+ "diacritics": true,
+ "synonyms": {},
+ "accuracy": "partially",
+ "acrossElements": false,
+ "caseSensitive": false,
+ "ignoreJoiners": false,
+ "ignoreGroups": 0,
+ "ignorePunctuation": [],
+ "wildcards": "disabled",
+ "each": function each() {
+ },
+ "noMatch": function noMatch() {
+ },
+ "filter": function filter() {
+ return true;
+ },
+ "done": function done() {
+ },
+ "debug": false,
+ "log": window.console
+ }, val);
+ },
+ get: function get$$1() {
+ return this._opt;
+ }
+ }, {
+ key: "iterator",
+ get: function get$$1() {
+ return new DOMIterator(this.ctx, this.opt.iframes, this.opt.exclude, this.opt.iframesTimeout);
+ }
+ }]);
+ return Mark3;
+ })();
+ function Mark2(ctx) {
+ var _this = this;
+ var instance = new Mark$1(ctx);
+ this.mark = function(sv, opt) {
+ instance.mark(sv, opt);
+ return _this;
+ };
+ this.markRegExp = function(sv, opt) {
+ instance.markRegExp(sv, opt);
+ return _this;
+ };
+ this.markRanges = function(sv, opt) {
+ instance.markRanges(sv, opt);
+ return _this;
+ };
+ this.unmark = function(opt) {
+ instance.unmark(opt);
+ return _this;
+ };
+ return this;
+ }
+ return Mark2;
+ }));
+ }
+});
+
+// lib/highlight.ts
+var import_mark = __toESM(require_mark(), 1);
+var PagefindHighlight = class {
+ constructor(options = {
+ markContext: null,
+ highlightParam: "pagefind-highlight",
+ markOptions: {
+ className: "pagefind-highlight",
+ exclude: ["[data-pagefind-ignore]", "[data-pagefind-ignore] *"]
+ },
+ addStyles: true
+ }) {
+ var _a, _b;
+ const { highlightParam, markContext, markOptions, addStyles } = options;
+ this.highlightParam = highlightParam ?? "pagefind-highlight";
+ this.addStyles = addStyles ?? true;
+ this.markContext = markContext !== void 0 ? markContext : null;
+ this.markOptions = markOptions !== void 0 ? markOptions : {
+ className: "pagefind-highlight",
+ exclude: ["[data-pagefind-ignore]", "[data-pagefind-ignore] *"]
+ };
+ (_a = this.markOptions).className ?? (_a.className = "pagefind__highlight");
+ (_b = this.markOptions).exclude ?? (_b.exclude = [
+ "[data-pagefind-ignore]",
+ "[data-pagefind-ignore] *"
+ ]);
+ this.markOptions.separateWordSearch = false;
+ this.highlight();
+ }
+ getHighlightParams(paramName) {
+ const urlParams = new URLSearchParams(window.location.search);
+ return urlParams.getAll(paramName);
+ }
+ // Inline styles might be too hard to override
+ addHighlightStyles(className) {
+ if (!className) return;
+ const styleElement = document.createElement("style");
+ styleElement.innerText = `:where(.${className}) { background-color: yellow; color: black; }`;
+ document.head.appendChild(styleElement);
+ }
+ createMarkInstance() {
+ if (this.markContext) {
+ return new import_mark.default(this.markContext);
+ }
+ const pagefindBody = document.querySelectorAll("[data-pagefind-body]");
+ if (pagefindBody.length !== 0) {
+ return new import_mark.default(pagefindBody);
+ } else {
+ return new import_mark.default(document.body);
+ }
+ }
+ markText(instance, text) {
+ instance.mark(text, this.markOptions);
+ }
+ highlight() {
+ const params = this.getHighlightParams(this.highlightParam);
+ if (!params || params.length === 0) return;
+ this.addStyles && this.addHighlightStyles(this.markOptions.className);
+ const markInstance = this.createMarkInstance();
+ this.markText(markInstance, params);
+ }
+};
+window.PagefindHighlight = PagefindHighlight;
+export {
+ PagefindHighlight as default
+};
+/*! Bundled license information:
+
+mark.js/dist/mark.js:
+ (*!***************************************************
+ * mark.js v8.11.1
+ * https://markjs.io/
+ * Copyright (c) 2014–2018, Julian Kühnel
+ * Released under the MIT license https://git.io/vwTVl
+ *****************************************************)
+*/
diff --git a/docs/public/_pagefind/pagefind-modular-ui.css b/docs/public/_pagefind/pagefind-modular-ui.css
new file mode 100644
index 0000000..9c6793e
--- /dev/null
+++ b/docs/public/_pagefind/pagefind-modular-ui.css
@@ -0,0 +1,214 @@
+:root {
+ --pagefind-ui-scale: 0.8;
+ --pagefind-ui-primary: #034AD8;
+ --pagefind-ui-fade: #707070;
+ --pagefind-ui-text: #393939;
+ --pagefind-ui-background: #ffffff;
+ --pagefind-ui-border: #eeeeee;
+ --pagefind-ui-tag: #eeeeee;
+ --pagefind-ui-border-width: 2px;
+ --pagefind-ui-border-radius: 8px;
+ --pagefind-ui-image-border-radius: 8px;
+ --pagefind-ui-image-box-ratio: 3 / 2;
+ --pagefind-ui-font: system, -apple-system, ".SFNSText-Regular",
+ "San Francisco", "Roboto", "Segoe UI", "Helvetica Neue",
+ "Lucida Grande", sans-serif;
+}
+
+[data-pfmod-hidden] {
+ display: none !important;
+}
+
+[data-pfmod-suppressed] {
+ opacity: 0 !important;
+ pointer-events: none !important;
+}
+
+[data-pfmod-sr-hidden] {
+ -webkit-clip: rect(0 0 0 0) !important;
+ clip: rect(0 0 0 0) !important;
+ -webkit-clip-path: inset(100%) !important;
+ clip-path: inset(100%) !important;
+ height: 1px !important;
+ overflow: hidden !important;
+ overflow: clip !important;
+ position: absolute !important;
+ white-space: nowrap !important;
+ width: 1px !important;
+}
+
+[data-pfmod-loading] {
+ color: var(--pagefind-ui-text);
+ background-color: var(--pagefind-ui-text);
+ border-radius: var(--pagefind-ui-border-radius);
+ opacity: 0.1;
+ pointer-events: none;
+}
+
+/* Input */
+
+.pagefind-modular-input-wrapper {
+ position: relative;
+}
+
+.pagefind-modular-input-wrapper::before {
+ background-color: var(--pagefind-ui-text);
+ width: calc(18px * var(--pagefind-ui-scale));
+ height: calc(18px * var(--pagefind-ui-scale));
+ top: calc(23px * var(--pagefind-ui-scale));
+ left: calc(20px * var(--pagefind-ui-scale));
+ content: "";
+ position: absolute;
+ display: block;
+ opacity: 0.7;
+ -webkit-mask-image: url("data:image/svg+xml,%3Csvg width='18' height='18' viewBox='0 0 18 18' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.7549 11.255H11.9649L11.6849 10.985C12.6649 9.845 13.2549 8.365 13.2549 6.755C13.2549 3.165 10.3449 0.255005 6.75488 0.255005C3.16488 0.255005 0.254883 3.165 0.254883 6.755C0.254883 10.345 3.16488 13.255 6.75488 13.255C8.36488 13.255 9.84488 12.665 10.9849 11.685L11.2549 11.965V12.755L16.2549 17.745L17.7449 16.255L12.7549 11.255ZM6.75488 11.255C4.26488 11.255 2.25488 9.245 2.25488 6.755C2.25488 4.26501 4.26488 2.255 6.75488 2.255C9.24488 2.255 11.2549 4.26501 11.2549 6.755C11.2549 9.245 9.24488 11.255 6.75488 11.255Z' fill='%23000000'/%3E%3C/svg%3E%0A");
+ mask-image: url("data:image/svg+xml,%3Csvg width='18' height='18' viewBox='0 0 18 18' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.7549 11.255H11.9649L11.6849 10.985C12.6649 9.845 13.2549 8.365 13.2549 6.755C13.2549 3.165 10.3449 0.255005 6.75488 0.255005C3.16488 0.255005 0.254883 3.165 0.254883 6.755C0.254883 10.345 3.16488 13.255 6.75488 13.255C8.36488 13.255 9.84488 12.665 10.9849 11.685L11.2549 11.965V12.755L16.2549 17.745L17.7449 16.255L12.7549 11.255ZM6.75488 11.255C4.26488 11.255 2.25488 9.245 2.25488 6.755C2.25488 4.26501 4.26488 2.255 6.75488 2.255C9.24488 2.255 11.2549 4.26501 11.2549 6.755C11.2549 9.245 9.24488 11.255 6.75488 11.255Z' fill='%23000000'/%3E%3C/svg%3E%0A");
+ -webkit-mask-size: 100%;
+ mask-size: 100%;
+ z-index: 9;
+ pointer-events: none;
+}
+
+.pagefind-modular-input {
+ height: calc(64px * var(--pagefind-ui-scale));
+ padding: 0 calc(70px * var(--pagefind-ui-scale)) 0 calc(54px * var(--pagefind-ui-scale));
+ background-color: var(--pagefind-ui-background);
+ border: var(--pagefind-ui-border-width) solid var(--pagefind-ui-border);
+ border-radius: var(--pagefind-ui-border-radius);
+ font-size: calc(21px * var(--pagefind-ui-scale));
+ position: relative;
+ appearance: none;
+ -webkit-appearance: none;
+ display: flex;
+ width: 100%;
+ box-sizing: border-box;
+ font-weight: 700;
+}
+
+.pagefind-modular-input::placeholder {
+ opacity: 0.2;
+}
+
+.pagefind-modular-input-clear {
+ position: absolute;
+ top: calc(2px * var(--pagefind-ui-scale));
+ right: calc(2px * var(--pagefind-ui-scale));
+ height: calc(60px * var(--pagefind-ui-scale));
+ border-radius: var(--pagefind-ui-border-radius);
+ padding: 0 calc(15px * var(--pagefind-ui-scale)) 0 calc(2px * var(--pagefind-ui-scale));
+ color: var(--pagefind-ui-text);
+ font-size: calc(14px * var(--pagefind-ui-scale));
+ cursor: pointer;
+ background-color: var(--pagefind-ui-background);
+ border: none;
+ appearance: none;
+}
+
+/* ResultList */
+
+.pagefind-modular-list-result {
+ list-style-type: none;
+ display: flex;
+ align-items: flex-start;
+ gap: min(calc(40px * var(--pagefind-ui-scale)), 3%);
+ padding: calc(30px * var(--pagefind-ui-scale)) 0 calc(40px * var(--pagefind-ui-scale));
+ border-top: solid var(--pagefind-ui-border-width) var(--pagefind-ui-border);
+}
+
+.pagefind-modular-list-result:last-of-type {
+ border-bottom: solid var(--pagefind-ui-border-width) var(--pagefind-ui-border);
+}
+
+.pagefind-modular-list-thumb {
+ width: min(30%,
+ calc((30% - (100px * var(--pagefind-ui-scale))) * 100000));
+ max-width: calc(120px * var(--pagefind-ui-scale));
+ margin-top: calc(10px * var(--pagefind-ui-scale));
+ aspect-ratio: var(--pagefind-ui-image-box-ratio);
+ position: relative;
+}
+
+.pagefind-modular-list-image {
+ display: block;
+ position: absolute;
+ left: 50%;
+ transform: translateX(-50%);
+ font-size: 0;
+ width: auto;
+ height: auto;
+ max-width: 100%;
+ max-height: 100%;
+ border-radius: var(--pagefind-ui-image-border-radius);
+}
+
+.pagefind-modular-list-inner {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ align-items: flex-start;
+ margin-top: calc(10px * var(--pagefind-ui-scale));
+}
+
+.pagefind-modular-list-title {
+ display: inline-block;
+ font-weight: 700;
+ font-size: calc(21px * var(--pagefind-ui-scale));
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+.pagefind-modular-list-link {
+ color: var(--pagefind-ui-text);
+ text-decoration: none;
+}
+
+.pagefind-modular-list-link:hover {
+ text-decoration: underline;
+}
+
+.pagefind-modular-list-excerpt {
+ display: inline-block;
+ font-weight: 400;
+ font-size: calc(16px * var(--pagefind-ui-scale));
+ margin-top: calc(4px * var(--pagefind-ui-scale));
+ margin-bottom: 0;
+ min-width: calc(250px * var(--pagefind-ui-scale));
+}
+
+/* FilterPills */
+
+.pagefind-modular-filter-pills-wrapper {
+ overflow-x: scroll;
+ padding: 15px 0;
+}
+
+.pagefind-modular-filter-pills {
+ display: flex;
+ gap: 6px;
+}
+
+.pagefind-modular-filter-pill {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ border: none;
+ appearance: none;
+ padding: 0 calc(24px * var(--pagefind-ui-scale));
+ background-color: var(--pagefind-ui-background);
+ color: var(--pagefind-ui-fade);
+ border: var(--pagefind-ui-border-width) solid var(--pagefind-ui-border);
+ border-radius: calc(25px * var(--pagefind-ui-scale));
+ font-size: calc(18px * var(--pagefind-ui-scale));
+ height: calc(50px * var(--pagefind-ui-scale));
+ cursor: pointer;
+ white-space: nowrap;
+}
+
+.pagefind-modular-filter-pill:hover {
+ border-color: var(--pagefind-ui-primary);
+}
+
+.pagefind-modular-filter-pill[aria-pressed="true"] {
+ border-color: var(--pagefind-ui-primary);
+ color: var(--pagefind-ui-primary);
+}
\ No newline at end of file
diff --git a/docs/public/_pagefind/pagefind-modular-ui.js b/docs/public/_pagefind/pagefind-modular-ui.js
new file mode 100644
index 0000000..6caacd6
--- /dev/null
+++ b/docs/public/_pagefind/pagefind-modular-ui.js
@@ -0,0 +1,8 @@
+(()=>{var w=Object.defineProperty;var b=(i,e)=>{for(var t in e)w(i,t,{get:e[t],enumerable:!0})};var f={};b(f,{FilterPills:()=>c,Input:()=>a,Instance:()=>p,ResultList:()=>o,Summary:()=>h});var r=class i{constructor(e){this.element=document.createElement(e)}id(e){return this.element.id=e,this}class(e){return this.element.classList.add(e),this}attrs(e){for(let[t,s]of Object.entries(e))this.element.setAttribute(t,s);return this}text(e){return this.element.innerText=e,this}html(e){return this.element.innerHTML=e,this}handle(e,t){return this.element.addEventListener(e,t),this}addTo(e){return e instanceof i?e.element.appendChild(this.element):e.appendChild(this.element),this.element}};var T=async(i=100)=>new Promise(e=>setTimeout(e,i)),a=class{constructor(e={}){if(this.inputEl=null,this.clearEl=null,this.instance=null,this.searchID=0,this.debounceTimeoutMs=e.debounceTimeoutMs??300,e.inputElement){if(e.containerElement){console.warn("[Pagefind Input component]: inputElement and containerElement both supplied. Ignoring the container option.");return}this.initExisting(e.inputElement)}else if(e.containerElement)this.initContainer(e.containerElement);else{console.error("[Pagefind Input component]: No selector supplied for containerElement or inputElement");return}this.inputEl.addEventListener("input",async t=>{if(this.instance&&typeof t?.target?.value=="string"){this.updateState(t.target.value);let s=++this.searchID;if(await T(this.debounceTimeoutMs),s!==this.searchID)return null;this.instance?.triggerSearch(t.target.value)}}),this.inputEl.addEventListener("keydown",t=>{t.key==="Escape"&&(++this.searchID,this.inputEl.value="",this.instance?.triggerSearch(""),this.updateState("")),t.key==="Enter"&&t.preventDefault()}),this.inputEl.addEventListener("focus",()=>{this.instance?.triggerLoad()})}initContainer(e){let t=document.querySelector(e);if(!t){console.error(`[Pagefind Input component]: No container found for ${e} selector`);return}if(t.tagName==="INPUT")console.warn(`[Pagefind Input component]: Encountered input element for ${e} when a container was expected`),console.warn("[Pagefind Input component]: Treating containerElement option as inputElement and proceeding"),this.initExisting(e);else{t.innerHTML="";let s=0;for(;document.querySelector(`#pfmod-input-${s}`);)s+=1;let n=new r("form").class("pagefind-modular-input-wrapper").attrs({role:"search","aria-label":"Search this site",action:"javascript:void(0);"});new r("label").attrs({for:`pfmod-input-${s}`,"data-pfmod-sr-hidden":"true"}).text("Search this site").addTo(n),this.inputEl=new r("input").id(`pfmod-input-${s}`).class("pagefind-modular-input").attrs({autocapitalize:"none",enterkeyhint:"search"}).addTo(n),this.clearEl=new r("button").class("pagefind-modular-input-clear").attrs({"data-pfmod-suppressed":"true"}).text("Clear").handle("click",()=>{this.inputEl.value="",this.instance.triggerSearch(""),this.updateState("")}).addTo(n),n.addTo(t)}}initExisting(e){let t=document.querySelector(e);if(!t){console.error(`[Pagefind Input component]: No input element found for ${e} selector`);return}if(t.tagName!=="INPUT"){console.error(`[Pagefind Input component]: Expected ${e} to be an element`);return}this.inputEl=t}updateState(e){this.clearEl&&(e&&e?.length?this.clearEl.removeAttribute("data-pfmod-suppressed"):this.clearEl.setAttribute("data-pfmod-suppressed","true"))}register(e){this.instance=e,this.instance.on("search",(t,s)=>{this.inputEl&&document.activeElement!==this.inputEl&&(this.inputEl.value=t,this.updateState(t))})}focus(){this.inputEl&&this.inputEl.focus()}};var g=i=>{if(i instanceof Element)return[i];if(Array.isArray(i)&&i.every(e=>e instanceof Element))return i;if(typeof i=="string"||i instanceof String){let e=document.createElement("div");return e.innerHTML=i,[...e.childNodes]}else return console.error(`[Pagefind ResultList component]: Expected template function to return an HTML element or string, got ${typeof i}`),[]},v=()=>{let i=(e=30)=>". ".repeat(Math.floor(10+Math.random()*e));return`
+
+
+ `},y=(i,e)=>{let t=new r("li").class("pagefind-modular-list-result");if(e){let l=new r("div").class("pagefind-modular-list-thumb").addTo(t);i?.meta?.image&&new r("img").class("pagefind-modular-list-image").attrs({src:i.meta.image,alt:i.meta.image_alt||i.meta.title}).addTo(l)}let s=new r("div").class("pagefind-modular-list-inner").addTo(t),n=new r("p").class("pagefind-modular-list-title").addTo(s);return new r("a").class("pagefind-modular-list-link").text(i.meta?.title).attrs({href:i.meta?.url||i.url}).addTo(n),new r("p").class("pagefind-modular-list-excerpt").html(i.excerpt).addTo(s),t.element},E=i=>{if(!(i instanceof HTMLElement))return null;let e=window.getComputedStyle(i).overflowY;return e!=="visible"&&e!=="hidden"?i:E(i.parentNode)},d=class{constructor(e={}){this.rawResult=e.result,this.placeholderNodes=e.placeholderNodes,this.resultFn=e.resultFn,this.intersectionEl=e.intersectionEl,this.showImages=e.showImages,this.result=null,this.waitForIntersection()}waitForIntersection(){if(!this.placeholderNodes?.length)return;let e={root:this.intersectionEl,rootMargin:"0px",threshold:.01};new IntersectionObserver((s,n)=>{this.result===null&&s?.[0]?.isIntersecting&&(this.load(),n.disconnect())},e).observe(this.placeholderNodes[0])}async load(){if(!this.placeholderNodes?.length)return;this.result=await this.rawResult.data();let e=this.resultFn(this.result,this.showImages),t=g(e);for(;this.placeholderNodes.length>1;)this.placeholderNodes.pop().remove();this.placeholderNodes[0].replaceWith(...t)}},o=class{constructor(e){if(this.intersectionEl=document.body,this.containerEl=null,this.results=[],this.placeholderTemplate=e.placeholderTemplate??v,this.resultTemplate=e.resultTemplate??y,this.showImages=e.showImages??!0,e.containerElement)this.initContainer(e.containerElement);else{console.error("[Pagefind ResultList component]: No selector supplied for containerElement");return}}initContainer(e){let t=document.querySelector(e);if(!t){console.error(`[Pagefind ResultList component]: No container found for ${e} selector`);return}this.containerEl=t}append(e){for(let t of e)this.containerEl.appendChild(t)}register(e){e.on("results",t=>{this.containerEl&&(this.containerEl.innerHTML="",this.intersectionEl=E(this.containerEl),this.results=t.results.map(s=>{let n=g(this.placeholderTemplate());return this.append(n),new d({result:s,placeholderNodes:n,resultFn:this.resultTemplate,intersectionEl:this.intersectionEl,showImages:this.showImages})}))}),e.on("loading",()=>{this.containerEl&&(this.containerEl.innerHTML="")})}};var h=class{constructor(e={}){if(this.containerEl=null,this.defaultMessage=e.defaultMessage??"",this.term="",e.containerElement)this.initContainer(e.containerElement);else{console.error("[Pagefind Summary component]: No selector supplied for containerElement");return}}initContainer(e){let t=document.querySelector(e);if(!t){console.error(`[Pagefind Summary component]: No container found for ${e} selector`);return}this.containerEl=t,this.containerEl.innerText=this.defaultMessage}register(e){e.on("search",(t,s)=>{this.term=t}),e.on("results",t=>{if(!this.containerEl||!t)return;if(!this.term){this.containerEl.innerText=this.defaultMessage;return}let s=t?.results?.length??0;this.containerEl.innerText=`${s} result${s===1?"":"s"} for ${this.term}`}),e.on("loading",()=>{this.containerEl&&(this.containerEl.innerText=`Searching for ${this.term}...`)})}};var c=class{constructor(e={}){if(this.instance=null,this.wrapper=null,this.pillContainer=null,this.available={},this.selected=["All"],this.total=0,this.filterMemo="",this.filter=e.filter,this.ordering=e.ordering??null,this.alwaysShow=e.alwaysShow??!1,this.selectMultiple=e.selectMultiple??!1,!this.filter?.length){console.error("[Pagefind FilterPills component]: No filter option supplied, nothing to display");return}if(e.containerElement)this.initContainer(e.containerElement);else{console.error("[Pagefind FilterPills component]: No selector supplied for containerElement");return}}initContainer(e){let t=document.querySelector(e);if(!t){console.error(`[Pagefind FilterPills component]: No container found for ${e} selector`);return}t.innerHTML="";let s=`pagefind_modular_filter_pills_${this.filter}`,n=new r("div").class("pagefind-modular-filter-pills-wrapper").attrs({role:"group","aria-labelledby":s});this.alwaysShow||n.attrs({"data-pfmod-hidden":!0}),new r("div").id(s).class("pagefind-modular-filter-pills-label").attrs({"data-pfmod-sr-hidden":!0}).text(`Filter results by ${this.filter}`).addTo(n),this.pillContainer=new r("div").class("pagefind-modular-filter-pills").addTo(n),this.wrapper=n.addTo(t)}update(){let e=this.available.map(t=>t[0]).join("~");e==this.filterMemo?this.updateExisting():(this.renderNew(),this.filterMemo=e)}pushFilters(){let e=this.selected.filter(t=>t!=="All");this.instance.triggerFilter(this.filter,e)}pillInner(e,t){return this.total?`${e} (${t}) `:`${e} `}renderNew(){this.available.forEach(([e,t])=>{new r("button").class("pagefind-modular-filter-pill").html(this.pillInner(e,t)).attrs({"aria-pressed":this.selected.includes(e),type:"button"}).handle("click",()=>{e==="All"?this.selected=["All"]:this.selected.includes(e)?this.selected=this.selected.filter(s=>s!==e):this.selectMultiple?this.selected.push(e):this.selected=[e],this.selected?.length?this.selected?.length>1&&(this.selected=this.selected.filter(s=>s!=="All")):this.selected=["All"],this.update(),this.pushFilters()}).addTo(this.pillContainer)})}updateExisting(){let e=[...this.pillContainer.childNodes];this.available.forEach(([t,s],n)=>{e[n].innerHTML=this.pillInner(t,s),e[n].setAttribute("aria-pressed",this.selected.includes(t))})}register(e){this.instance=e,this.instance.on("filters",t=>{if(!this.pillContainer)return;this.selectMultiple?t=t.available:t=t.total;let s=t[this.filter];if(!s){console.warn(`[Pagefind FilterPills component]: No possible values found for the ${this.filter} filter`);return}this.available=Object.entries(s),Array.isArray(this.ordering)?this.available.sort((n,l)=>{let m=this.ordering.indexOf(n[0]),_=this.ordering.indexOf(l[0]);return(m===-1?1/0:m)-(_===-1?1/0:_)}):this.available.sort((n,l)=>n[0].localeCompare(l[0])),this.available.unshift(["All",this.total]),this.update()}),e.on("results",t=>{this.pillContainer&&(this.total=t?.unfilteredResultCount||0,this.available?.[0]?.[0]==="All"&&(this.available[0][1]=this.total),this.total||this.alwaysShow?this.wrapper.removeAttribute("data-pfmod-hidden"):this.wrapper.setAttribute("data-pfmod-hidden","true"),this.update())})}};var P=async(i=50)=>await new Promise(e=>setTimeout(e,i)),u;try{document?.currentScript&&document.currentScript.tagName.toUpperCase()==="SCRIPT"&&(u=new URL(document.currentScript.src).pathname.match(/^(.*\/)(?:pagefind-)?modular-ui.js.*$/)[1])}catch{u="/pagefind/"}var p=class{constructor(e={}){this.__pagefind__=null,this.__initializing__=null,this.__searchID__=0,this.__hooks__={search:[],filters:[],loading:[],results:[]},this.components=[],this.searchTerm="",this.searchFilters={},this.searchResult={},this.availableFilters=null,this.totalFilters=null,this.options={bundlePath:e.bundlePath??u,mergeIndex:e.mergeIndex??[]},delete e.bundlePath,delete e.resetStyles,delete e.processResult,delete e.processTerm,delete e.debounceTimeoutMs,delete e.mergeIndex,delete e.translations,this.pagefindOptions=e}add(e){e?.register?.(this),this.components.push(e)}on(e,t){if(!this.__hooks__[e]){let s=Object.keys(this.__hooks__).join(", ");console.error(`[Pagefind Composable]: Unknown event type ${e}. Supported events: [${s}]`);return}if(typeof t!="function"){console.error(`[Pagefind Composable]: Expected callback to be a function, received ${typeof t}`);return}this.__hooks__[e].push(t)}triggerLoad(){this.__load__()}triggerSearch(e){this.searchTerm=e,this.__dispatch__("search",e,this.searchFilters),this.__search__(e,this.searchFilters)}triggerSearchWithFilters(e,t){this.searchTerm=e,this.searchFilters=t,this.__dispatch__("search",e,t),this.__search__(e,t)}triggerFilters(e){this.searchFilters=e,this.__dispatch__("search",this.searchTerm,e),this.__search__(this.searchTerm,e)}triggerFilter(e,t){this.searchFilters=this.searchFilters||{},this.searchFilters[e]=t,this.__dispatch__("search",this.searchTerm,this.searchFilters),this.__search__(this.searchTerm,this.searchFilters)}__dispatch__(e,...t){this.__hooks__[e]?.forEach(s=>s?.(...t))}async __clear__(){this.__dispatch__("results",{results:[],unfilteredTotalCount:0}),this.availableFilters=await this.__pagefind__.filters(),this.totalFilters=this.availableFilters,this.__dispatch__("filters",{available:this.availableFilters,total:this.totalFilters})}async __search__(e,t){this.__dispatch__("loading"),await this.__load__();let s=++this.__searchID__;if(!e||!e.length)return this.__clear__();let n=await this.__pagefind__.search(e,{filters:t});n&&this.__searchID__===s&&(n.filters&&Object.keys(n.filters)?.length&&(this.availableFilters=n.filters,this.totalFilters=n.totalFilters,this.__dispatch__("filters",{available:this.availableFilters,total:this.totalFilters})),this.searchResult=n,this.__dispatch__("results",this.searchResult))}async __load__(){if(this.__initializing__){for(;!this.__pagefind__;)await P(50);return}if(this.__initializing__=!0,!this.__pagefind__){let e;try{e=await import(`${this.options.bundlePath}pagefind.js`)}catch(t){console.error(t),console.error([`Pagefind couldn't be loaded from ${this.options.bundlePath}pagefind.js`,"You can configure this by passing a bundlePath option to PagefindComposable Instance"].join(`
+`)),document?.currentScript&&document.currentScript.tagName.toUpperCase()==="SCRIPT"?console.error(`[DEBUG: Loaded from ${document.currentScript?.src??"bad script location"}]`):console.error("no known script location")}await e.options(this.pagefindOptions||{});for(let t of this.options.mergeIndex){if(!t.bundlePath)throw new Error("mergeIndex requires a bundlePath parameter");let s=t.bundlePath;delete t.bundlePath,await e.mergeIndex(s,t)}this.__pagefind__=e}this.availableFilters=await this.__pagefind__.filters(),this.totalFilters=this.availableFilters,this.__dispatch__("filters",{available:this.availableFilters,total:this.totalFilters})}};window.PagefindModularUI=f;})();
diff --git a/docs/public/_pagefind/pagefind-ui.css b/docs/public/_pagefind/pagefind-ui.css
new file mode 100644
index 0000000..d7984a9
--- /dev/null
+++ b/docs/public/_pagefind/pagefind-ui.css
@@ -0,0 +1 @@
+.pagefind-ui__result.svelte-j9e30.svelte-j9e30{list-style-type:none;display:flex;align-items:flex-start;gap:min(calc(40px * var(--pagefind-ui-scale)),3%);padding:calc(30px * var(--pagefind-ui-scale)) 0 calc(40px * var(--pagefind-ui-scale));border-top:solid var(--pagefind-ui-border-width) var(--pagefind-ui-border)}.pagefind-ui__result.svelte-j9e30.svelte-j9e30:last-of-type{border-bottom:solid var(--pagefind-ui-border-width) var(--pagefind-ui-border)}.pagefind-ui__result-thumb.svelte-j9e30.svelte-j9e30{width:min(30%,calc((30% - (100px * var(--pagefind-ui-scale))) * 100000));max-width:calc(120px * var(--pagefind-ui-scale));margin-top:calc(10px * var(--pagefind-ui-scale));aspect-ratio:var(--pagefind-ui-image-box-ratio);position:relative}.pagefind-ui__result-image.svelte-j9e30.svelte-j9e30{display:block;position:absolute;left:50%;transform:translate(-50%);font-size:0;width:auto;height:auto;max-width:100%;max-height:100%;border-radius:var(--pagefind-ui-image-border-radius)}.pagefind-ui__result-inner.svelte-j9e30.svelte-j9e30{flex:1;display:flex;flex-direction:column;align-items:flex-start;margin-top:calc(10px * var(--pagefind-ui-scale))}.pagefind-ui__result-title.svelte-j9e30.svelte-j9e30{display:inline-block;font-weight:700;font-size:calc(21px * var(--pagefind-ui-scale));margin-top:0;margin-bottom:0}.pagefind-ui__result-title.svelte-j9e30 .pagefind-ui__result-link.svelte-j9e30{color:var(--pagefind-ui-text);text-decoration:none}.pagefind-ui__result-title.svelte-j9e30 .pagefind-ui__result-link.svelte-j9e30:hover{text-decoration:underline}.pagefind-ui__result-excerpt.svelte-j9e30.svelte-j9e30{display:inline-block;font-weight:400;font-size:calc(16px * var(--pagefind-ui-scale));margin-top:calc(4px * var(--pagefind-ui-scale));margin-bottom:0;min-width:calc(250px * var(--pagefind-ui-scale))}.pagefind-ui__loading.svelte-j9e30.svelte-j9e30{color:var(--pagefind-ui-text);background-color:var(--pagefind-ui-text);border-radius:var(--pagefind-ui-border-radius);opacity:.1;pointer-events:none}.pagefind-ui__result-tags.svelte-j9e30.svelte-j9e30{list-style-type:none;padding:0;display:flex;gap:calc(20px * var(--pagefind-ui-scale));flex-wrap:wrap;margin-top:calc(20px * var(--pagefind-ui-scale))}.pagefind-ui__result-tag.svelte-j9e30.svelte-j9e30{padding:calc(4px * var(--pagefind-ui-scale)) calc(8px * var(--pagefind-ui-scale));font-size:calc(14px * var(--pagefind-ui-scale));border-radius:var(--pagefind-ui-border-radius);background-color:var(--pagefind-ui-tag)}.pagefind-ui__result.svelte-4xnkmf.svelte-4xnkmf{list-style-type:none;display:flex;align-items:flex-start;gap:min(calc(40px * var(--pagefind-ui-scale)),3%);padding:calc(30px * var(--pagefind-ui-scale)) 0 calc(40px * var(--pagefind-ui-scale));border-top:solid var(--pagefind-ui-border-width) var(--pagefind-ui-border)}.pagefind-ui__result.svelte-4xnkmf.svelte-4xnkmf:last-of-type{border-bottom:solid var(--pagefind-ui-border-width) var(--pagefind-ui-border)}.pagefind-ui__result-nested.svelte-4xnkmf.svelte-4xnkmf{display:flex;flex-direction:column;padding-left:calc(20px * var(--pagefind-ui-scale))}.pagefind-ui__result-nested.svelte-4xnkmf.svelte-4xnkmf:first-of-type{padding-top:calc(10px * var(--pagefind-ui-scale))}.pagefind-ui__result-nested.svelte-4xnkmf .pagefind-ui__result-link.svelte-4xnkmf{font-size:.9em;position:relative}.pagefind-ui__result-nested.svelte-4xnkmf .pagefind-ui__result-link.svelte-4xnkmf:before{content:"\2937 ";position:absolute;top:0;right:calc(100% + .1em)}.pagefind-ui__result-thumb.svelte-4xnkmf.svelte-4xnkmf{width:min(30%,calc((30% - (100px * var(--pagefind-ui-scale))) * 100000));max-width:calc(120px * var(--pagefind-ui-scale));margin-top:calc(10px * var(--pagefind-ui-scale));aspect-ratio:var(--pagefind-ui-image-box-ratio);position:relative}.pagefind-ui__result-image.svelte-4xnkmf.svelte-4xnkmf{display:block;position:absolute;left:50%;transform:translate(-50%);font-size:0;width:auto;height:auto;max-width:100%;max-height:100%;border-radius:var(--pagefind-ui-image-border-radius)}.pagefind-ui__result-inner.svelte-4xnkmf.svelte-4xnkmf{flex:1;display:flex;flex-direction:column;align-items:flex-start;margin-top:calc(10px * var(--pagefind-ui-scale))}.pagefind-ui__result-title.svelte-4xnkmf.svelte-4xnkmf{display:inline-block;font-weight:700;font-size:calc(21px * var(--pagefind-ui-scale));margin-top:0;margin-bottom:0}.pagefind-ui__result-title.svelte-4xnkmf .pagefind-ui__result-link.svelte-4xnkmf{color:var(--pagefind-ui-text);text-decoration:none}.pagefind-ui__result-title.svelte-4xnkmf .pagefind-ui__result-link.svelte-4xnkmf:hover{text-decoration:underline}.pagefind-ui__result-excerpt.svelte-4xnkmf.svelte-4xnkmf{display:inline-block;font-weight:400;font-size:calc(16px * var(--pagefind-ui-scale));margin-top:calc(4px * var(--pagefind-ui-scale));margin-bottom:0;min-width:calc(250px * var(--pagefind-ui-scale))}.pagefind-ui__loading.svelte-4xnkmf.svelte-4xnkmf{color:var(--pagefind-ui-text);background-color:var(--pagefind-ui-text);border-radius:var(--pagefind-ui-border-radius);opacity:.1;pointer-events:none}.pagefind-ui__result-tags.svelte-4xnkmf.svelte-4xnkmf{list-style-type:none;padding:0;display:flex;gap:calc(20px * var(--pagefind-ui-scale));flex-wrap:wrap;margin-top:calc(20px * var(--pagefind-ui-scale))}.pagefind-ui__result-tag.svelte-4xnkmf.svelte-4xnkmf{padding:calc(4px * var(--pagefind-ui-scale)) calc(8px * var(--pagefind-ui-scale));font-size:calc(14px * var(--pagefind-ui-scale));border-radius:var(--pagefind-ui-border-radius);background-color:var(--pagefind-ui-tag)}legend.svelte-1v2r7ls.svelte-1v2r7ls{position:absolute;clip:rect(0 0 0 0)}.pagefind-ui__filter-panel.svelte-1v2r7ls.svelte-1v2r7ls{min-width:min(calc(260px * var(--pagefind-ui-scale)),100%);flex:1;display:flex;flex-direction:column;margin-top:calc(20px * var(--pagefind-ui-scale))}.pagefind-ui__filter-group.svelte-1v2r7ls.svelte-1v2r7ls{border:0;padding:0}.pagefind-ui__filter-block.svelte-1v2r7ls.svelte-1v2r7ls{padding:0;display:block;border-bottom:solid calc(2px * var(--pagefind-ui-scale)) var(--pagefind-ui-border);padding:calc(20px * var(--pagefind-ui-scale)) 0}.pagefind-ui__filter-name.svelte-1v2r7ls.svelte-1v2r7ls{font-size:calc(16px * var(--pagefind-ui-scale));position:relative;display:flex;align-items:center;list-style:none;font-weight:700;cursor:pointer;height:calc(24px * var(--pagefind-ui-scale))}.pagefind-ui__filter-name.svelte-1v2r7ls.svelte-1v2r7ls::-webkit-details-marker{display:none}.pagefind-ui__filter-name.svelte-1v2r7ls.svelte-1v2r7ls:after{position:absolute;content:"";right:calc(6px * var(--pagefind-ui-scale));top:50%;width:calc(8px * var(--pagefind-ui-scale));height:calc(8px * var(--pagefind-ui-scale));border:solid calc(2px * var(--pagefind-ui-scale)) currentColor;border-right:0;border-top:0;transform:translateY(-70%) rotate(-45deg)}.pagefind-ui__filter-block[open].svelte-1v2r7ls .pagefind-ui__filter-name.svelte-1v2r7ls:after{transform:translateY(-70%) rotate(-225deg)}.pagefind-ui__filter-group.svelte-1v2r7ls.svelte-1v2r7ls{display:flex;flex-direction:column;gap:calc(20px * var(--pagefind-ui-scale));padding-top:calc(30px * var(--pagefind-ui-scale))}.pagefind-ui__filter-value.svelte-1v2r7ls.svelte-1v2r7ls{position:relative;display:flex;align-items:center;gap:calc(8px * var(--pagefind-ui-scale))}.pagefind-ui__filter-value.svelte-1v2r7ls.svelte-1v2r7ls:before{position:absolute;content:"";top:50%;left:calc(8px * var(--pagefind-ui-scale));width:0px;height:0px;border:solid 1px #fff;opacity:0;transform:translate(calc(4.5px * var(--pagefind-ui-scale) * -1),calc(.8px * var(--pagefind-ui-scale))) skew(-5deg) rotate(-45deg);transform-origin:top left;border-top:0;border-right:0;pointer-events:none}.pagefind-ui__filter-value.pagefind-ui__filter-value--checked.svelte-1v2r7ls.svelte-1v2r7ls:before{opacity:1;width:calc(9px * var(--pagefind-ui-scale));height:calc(4px * var(--pagefind-ui-scale));transition:width .1s ease-out .1s,height .1s ease-in}.pagefind-ui__filter-checkbox.svelte-1v2r7ls.svelte-1v2r7ls{margin:0;width:calc(16px * var(--pagefind-ui-scale));height:calc(16px * var(--pagefind-ui-scale));border:solid 1px var(--pagefind-ui-border);appearance:none;-webkit-appearance:none;border-radius:calc(var(--pagefind-ui-border-radius) / 2);background-color:var(--pagefind-ui-background);cursor:pointer}.pagefind-ui__filter-checkbox.svelte-1v2r7ls.svelte-1v2r7ls:checked{background-color:var(--pagefind-ui-primary);border:solid 1px var(--pagefind-ui-primary)}.pagefind-ui__filter-label.svelte-1v2r7ls.svelte-1v2r7ls{cursor:pointer;font-size:calc(16px * var(--pagefind-ui-scale));font-weight:400}.pagefind-ui--reset *:where(:not(html,iframe,canvas,img,svg,video):not(svg *,symbol *)){all:unset;display:revert;outline:revert}.pagefind-ui--reset *,.pagefind-ui--reset *:before,.pagefind-ui--reset *:after{box-sizing:border-box}.pagefind-ui--reset a,.pagefind-ui--reset button{cursor:revert}.pagefind-ui--reset ol,.pagefind-ui--reset ul,.pagefind-ui--reset menu{list-style:none}.pagefind-ui--reset img{max-width:100%}.pagefind-ui--reset table{border-collapse:collapse}.pagefind-ui--reset input,.pagefind-ui--reset textarea{-webkit-user-select:auto}.pagefind-ui--reset textarea{white-space:revert}.pagefind-ui--reset meter{-webkit-appearance:revert;appearance:revert}.pagefind-ui--reset ::placeholder{color:unset}.pagefind-ui--reset :where([hidden]){display:none}.pagefind-ui--reset :where([contenteditable]:not([contenteditable="false"])){-moz-user-modify:read-write;-webkit-user-modify:read-write;overflow-wrap:break-word;-webkit-line-break:after-white-space;-webkit-user-select:auto}.pagefind-ui--reset :where([draggable="true"]){-webkit-user-drag:element}.pagefind-ui--reset mark{all:revert}:root{--pagefind-ui-scale:.8;--pagefind-ui-primary:#393939;--pagefind-ui-text:#393939;--pagefind-ui-background:#ffffff;--pagefind-ui-border:#eeeeee;--pagefind-ui-tag:#eeeeee;--pagefind-ui-border-width:2px;--pagefind-ui-border-radius:8px;--pagefind-ui-image-border-radius:8px;--pagefind-ui-image-box-ratio:3 / 2;--pagefind-ui-font:system, -apple-system, "BlinkMacSystemFont", ".SFNSText-Regular", "San Francisco", "Roboto", "Segoe UI", "Helvetica Neue", "Lucida Grande", "Ubuntu", "arial", sans-serif}.pagefind-ui.svelte-e9gkc3{width:100%;color:var(--pagefind-ui-text);font-family:var(--pagefind-ui-font)}.pagefind-ui__hidden.svelte-e9gkc3{display:none!important}.pagefind-ui__suppressed.svelte-e9gkc3{opacity:0;pointer-events:none}.pagefind-ui__form.svelte-e9gkc3{position:relative}.pagefind-ui__form.svelte-e9gkc3:before{background-color:var(--pagefind-ui-text);width:calc(18px * var(--pagefind-ui-scale));height:calc(18px * var(--pagefind-ui-scale));top:calc(23px * var(--pagefind-ui-scale));left:calc(20px * var(--pagefind-ui-scale));content:"";position:absolute;display:block;opacity:.7;-webkit-mask-image:url("data:image/svg+xml,%3Csvg width='18' height='18' viewBox='0 0 18 18' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.7549 11.255H11.9649L11.6849 10.985C12.6649 9.845 13.2549 8.365 13.2549 6.755C13.2549 3.165 10.3449 0.255005 6.75488 0.255005C3.16488 0.255005 0.254883 3.165 0.254883 6.755C0.254883 10.345 3.16488 13.255 6.75488 13.255C8.36488 13.255 9.84488 12.665 10.9849 11.685L11.2549 11.965V12.755L16.2549 17.745L17.7449 16.255L12.7549 11.255ZM6.75488 11.255C4.26488 11.255 2.25488 9.245 2.25488 6.755C2.25488 4.26501 4.26488 2.255 6.75488 2.255C9.24488 2.255 11.2549 4.26501 11.2549 6.755C11.2549 9.245 9.24488 11.255 6.75488 11.255Z' fill='%23000000'/%3E%3C/svg%3E%0A");mask-image:url("data:image/svg+xml,%3Csvg width='18' height='18' viewBox='0 0 18 18' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.7549 11.255H11.9649L11.6849 10.985C12.6649 9.845 13.2549 8.365 13.2549 6.755C13.2549 3.165 10.3449 0.255005 6.75488 0.255005C3.16488 0.255005 0.254883 3.165 0.254883 6.755C0.254883 10.345 3.16488 13.255 6.75488 13.255C8.36488 13.255 9.84488 12.665 10.9849 11.685L11.2549 11.965V12.755L16.2549 17.745L17.7449 16.255L12.7549 11.255ZM6.75488 11.255C4.26488 11.255 2.25488 9.245 2.25488 6.755C2.25488 4.26501 4.26488 2.255 6.75488 2.255C9.24488 2.255 11.2549 4.26501 11.2549 6.755C11.2549 9.245 9.24488 11.255 6.75488 11.255Z' fill='%23000000'/%3E%3C/svg%3E%0A");-webkit-mask-size:100%;mask-size:100%;z-index:9;pointer-events:none}.pagefind-ui__search-input.svelte-e9gkc3{height:calc(64px * var(--pagefind-ui-scale));padding:0 calc(70px * var(--pagefind-ui-scale)) 0 calc(54px * var(--pagefind-ui-scale));background-color:var(--pagefind-ui-background);border:var(--pagefind-ui-border-width) solid var(--pagefind-ui-border);border-radius:var(--pagefind-ui-border-radius);font-size:calc(21px * var(--pagefind-ui-scale));position:relative;appearance:none;-webkit-appearance:none;display:flex;width:100%;box-sizing:border-box;font-weight:700}.pagefind-ui__search-input.svelte-e9gkc3::placeholder{opacity:.2}.pagefind-ui__search-clear.svelte-e9gkc3{position:absolute;top:calc(3px * var(--pagefind-ui-scale));right:calc(3px * var(--pagefind-ui-scale));height:calc(58px * var(--pagefind-ui-scale));padding:0 calc(15px * var(--pagefind-ui-scale)) 0 calc(2px * var(--pagefind-ui-scale));color:var(--pagefind-ui-text);font-size:calc(14px * var(--pagefind-ui-scale));cursor:pointer;background-color:var(--pagefind-ui-background);border-radius:var(--pagefind-ui-border-radius)}.pagefind-ui__drawer.svelte-e9gkc3{gap:calc(60px * var(--pagefind-ui-scale));display:flex;flex-direction:row;flex-wrap:wrap}.pagefind-ui__results-area.svelte-e9gkc3{min-width:min(calc(400px * var(--pagefind-ui-scale)),100%);flex:1000;margin-top:calc(20px * var(--pagefind-ui-scale))}.pagefind-ui__results.svelte-e9gkc3{padding:0}.pagefind-ui__message.svelte-e9gkc3{box-sizing:content-box;font-size:calc(16px * var(--pagefind-ui-scale));height:calc(24px * var(--pagefind-ui-scale));padding:calc(20px * var(--pagefind-ui-scale)) 0;display:flex;align-items:center;font-weight:700;margin-top:0}.pagefind-ui__button.svelte-e9gkc3{margin-top:calc(40px * var(--pagefind-ui-scale));border:var(--pagefind-ui-border-width) solid var(--pagefind-ui-border);border-radius:var(--pagefind-ui-border-radius);height:calc(48px * var(--pagefind-ui-scale));padding:0 calc(12px * var(--pagefind-ui-scale));font-size:calc(16px * var(--pagefind-ui-scale));color:var(--pagefind-ui-primary);background:var(--pagefind-ui-background);width:100%;text-align:center;font-weight:700;cursor:pointer}.pagefind-ui__button.svelte-e9gkc3:hover{border-color:var(--pagefind-ui-primary);color:var(--pagefind-ui-primary);background:var(--pagefind-ui-background)}
diff --git a/docs/public/_pagefind/pagefind-ui.js b/docs/public/_pagefind/pagefind-ui.js
new file mode 100644
index 0000000..44c2d5d
--- /dev/null
+++ b/docs/public/_pagefind/pagefind-ui.js
@@ -0,0 +1,2 @@
+(()=>{var Ur=Object.defineProperty;var A=(n,e)=>{for(var t in e)Ur(n,t,{get:e[t],enumerable:!0})};function U(){}function bt(n){return n()}function yn(){return Object.create(null)}function K(n){n.forEach(bt)}function at(n){return typeof n=="function"}function G(n,e){return n!=n?e==e:n!==e||n&&typeof n=="object"||typeof n=="function"}var lt;function ie(n,e){return lt||(lt=document.createElement("a")),lt.href=e,n===lt.href}function vn(n){return Object.keys(n).length===0}var Hn=typeof window<"u"?window:typeof globalThis<"u"?globalThis:global,de=class{constructor(e){this.options=e,this._listeners="WeakMap"in Hn?new WeakMap:void 0}observe(e,t){return this._listeners.set(e,t),this._getObserver().observe(e,this.options),()=>{this._listeners.delete(e),this._observer.unobserve(e)}}_getObserver(){var e;return(e=this._observer)!==null&&e!==void 0?e:this._observer=new ResizeObserver(t=>{var r;for(let s of t)de.entries.set(s.target,s),(r=this._listeners.get(s.target))===null||r===void 0||r(s)})}};de.entries="WeakMap"in Hn?new WeakMap:void 0;var wn=!1;function Dr(){wn=!0}function Ir(){wn=!1}function R(n,e){n.appendChild(e)}function S(n,e,t){n.insertBefore(e,t||null)}function k(n){n.parentNode&&n.parentNode.removeChild(n)}function Q(n,e){for(let t=0;tn.removeEventListener(e,t,r)}function m(n,e,t){t==null?n.removeAttribute(e):n.getAttribute(e)!==t&&n.setAttribute(e,t)}function Lr(n){return Array.from(n.childNodes)}function z(n,e){e=""+e,n.data!==e&&(n.data=e)}function Tt(n,e){n.value=e??""}function B(n,e,t){n.classList[t?"add":"remove"](e)}var ot=class{constructor(e=!1){this.is_svg=!1,this.is_svg=e,this.e=this.n=null}c(e){this.h(e)}m(e,t,r=null){this.e||(this.is_svg?this.e=Pr(t.nodeName):this.e=C(t.nodeType===11?"TEMPLATE":t.nodeName),this.t=t.tagName!=="TEMPLATE"?t:t.content,this.c(e)),this.i(r)}h(e){this.e.innerHTML=e,this.n=Array.from(this.e.nodeName==="TEMPLATE"?this.e.content.childNodes:this.e.childNodes)}i(e){for(let t=0;tn.indexOf(r)===-1?e.push(r):t.push(r)),t.forEach(r=>r()),se=e}var it=new Set,ee;function ae(){ee={r:0,c:[],p:ee}}function oe(){ee.r||K(ee.c),ee=ee.p}function D(n,e){n&&n.i&&(it.delete(n),n.i(e))}function P(n,e,t,r){if(n&&n.o){if(it.has(n))return;it.add(n),ee.c.push(()=>{it.delete(n),r&&(t&&n.d(1),r())}),n.o(e)}else r&&r()}function On(n,e){P(n,1,1,()=>{e.delete(n.key)})}function jn(n,e,t,r,s,l,i,a,o,f,c,d){let p=n.length,h=l.length,u=p,_={};for(;u--;)_[n[u].key]=u;let E=[],b=new Map,T=new Map,M=[];for(u=h;u--;){let H=d(s,l,u),F=t(H),O=i.get(F);O?r&&M.push(()=>O.p(H,e)):(O=f(F,H),O.c()),b.set(F,E[u]=O),F in _&&T.set(F,Math.abs(u-_[F]))}let y=new Set,X=new Set;function V(H){D(H,1),H.m(a,c),i.set(H.key,H),c=H.first,h--}for(;p&&h;){let H=E[h-1],F=n[p-1],O=H.key,W=F.key;H===F?(c=H.first,p--,h--):b.has(W)?!i.has(O)||y.has(O)?V(H):X.has(W)?p--:T.get(O)>T.get(W)?(X.add(O),V(H)):(y.add(W),p--):(o(F,i),p--)}for(;p--;){let H=n[p];b.has(H.key)||o(H,i)}for(;h;)V(E[h-1]);return K(M),E}var Kr=["allowfullscreen","allowpaymentrequest","async","autofocus","autoplay","checked","controls","default","defer","disabled","formnovalidate","hidden","inert","ismap","loop","multiple","muted","nomodule","novalidate","open","playsinline","readonly","required","reversed","selected"],Eo=new Set([...Kr]);function Un(n,e,t){let r=n.$$.props[e];r!==void 0&&(n.$$.bound[r]=t,t(n.$$.ctx[r]))}function ut(n){n&&n.c()}function me(n,e,t,r){let{fragment:s,after_update:l}=n.$$;s&&s.m(e,t),r||Rt(()=>{let i=n.$$.on_mount.map(bt).filter(at);n.$$.on_destroy?n.$$.on_destroy.push(...i):K(i),n.$$.on_mount=[]}),l.forEach(Rt)}function ue(n,e){let t=n.$$;t.fragment!==null&&(Wr(t.after_update),K(t.on_destroy),t.fragment&&t.fragment.d(e),t.on_destroy=t.fragment=null,t.ctx=[])}function Gr(n,e){n.$$.dirty[0]===-1&&(re.push(n),Br(),n.$$.dirty.fill(0)),n.$$.dirty[e/31|0]|=1<{let u=h.length?h[0]:p;return f.ctx&&s(f.ctx[d],f.ctx[d]=u)&&(!f.skip_bound&&f.bound[d]&&f.bound[d](u),c&&Gr(n,d)),p}):[],f.update(),c=!0,K(f.before_update),f.fragment=r?r(f.ctx):!1,e.target){if(e.hydrate){Dr();let d=Lr(e.target);f.fragment&&f.fragment.l(d),d.forEach(k)}else f.fragment&&f.fragment.c();e.intro&&D(n.$$.fragment),me(n,e.target,e.anchor,e.customElement),Ir(),zn()}fe(o)}var Jr;typeof HTMLElement=="function"&&(Jr=class extends HTMLElement{constructor(){super(),this.attachShadow({mode:"open"})}connectedCallback(){let{on_mount:n}=this.$$;this.$$.on_disconnect=n.map(bt).filter(at);for(let e in this.$$.slotted)this.appendChild(this.$$.slotted[e])}attributeChangedCallback(n,e,t){this[n]=t}disconnectedCallback(){K(this.$$.on_disconnect)}$destroy(){ue(this,1),this.$destroy=U}$on(n,e){if(!at(e))return U;let t=this.$$.callbacks[n]||(this.$$.callbacks[n]=[]);return t.push(e),()=>{let r=t.indexOf(e);r!==-1&&t.splice(r,1)}}$set(n){this.$$set&&!vn(n)&&(this.$$.skip_bound=!0,this.$$set(n),this.$$.skip_bound=!1)}});var q=class{$destroy(){ue(this,1),this.$destroy=U}$on(e,t){if(!at(t))return U;let r=this.$$.callbacks[e]||(this.$$.callbacks[e]=[]);return r.push(t),()=>{let s=r.indexOf(t);s!==-1&&r.splice(s,1)}}$set(e){this.$$set&&!vn(e)&&(this.$$.skip_bound=!0,this.$$set(e),this.$$.skip_bound=!1)}};function I(n){let e=typeof n=="string"?n.charCodeAt(0):n;return e>=97&&e<=122||e>=65&&e<=90}function $(n){let e=typeof n=="string"?n.charCodeAt(0):n;return e>=48&&e<=57}function Z(n){return I(n)||$(n)}var Dn=["art-lojban","cel-gaulish","no-bok","no-nyn","zh-guoyu","zh-hakka","zh-min","zh-min-nan","zh-xiang"];var St={"en-gb-oed":"en-GB-oxendict","i-ami":"ami","i-bnn":"bnn","i-default":null,"i-enochian":null,"i-hak":"hak","i-klingon":"tlh","i-lux":"lb","i-mingo":null,"i-navajo":"nv","i-pwn":"pwn","i-tao":"tao","i-tay":"tay","i-tsu":"tsu","sgn-be-fr":"sfb","sgn-be-nl":"vgt","sgn-ch-de":"sgg","art-lojban":"jbo","cel-gaulish":null,"no-bok":"nb","no-nyn":"nn","zh-guoyu":"cmn","zh-hakka":"hak","zh-min":null,"zh-min-nan":"nan","zh-xiang":"hsn"};var Yr={}.hasOwnProperty;function ct(n,e={}){let t=In(),r=String(n),s=r.toLowerCase(),l=0;if(n==null)throw new Error("Expected string, got `"+n+"`");if(Yr.call(St,s)){let a=St[s];return(e.normalize===void 0||e.normalize===null||e.normalize)&&typeof a=="string"?ct(a):(t[Dn.includes(s)?"regular":"irregular"]=r,t)}for(;I(s.charCodeAt(l))&&l<9;)l++;if(l>1&&l<9){if(t.language=r.slice(0,l),l<4){let a=0;for(;s.charCodeAt(l)===45&&I(s.charCodeAt(l+1))&&I(s.charCodeAt(l+2))&&I(s.charCodeAt(l+3))&&!I(s.charCodeAt(l+4));){if(a>2)return i(l,3,"Too many extended language subtags, expected at most 3 subtags");t.extendedLanguageSubtags.push(r.slice(l+1,l+4)),l+=4,a++}}for(s.charCodeAt(l)===45&&I(s.charCodeAt(l+1))&&I(s.charCodeAt(l+2))&&I(s.charCodeAt(l+3))&&I(s.charCodeAt(l+4))&&!I(s.charCodeAt(l+5))&&(t.script=r.slice(l+1,l+5),l+=5),s.charCodeAt(l)===45&&(I(s.charCodeAt(l+1))&&I(s.charCodeAt(l+2))&&!I(s.charCodeAt(l+3))?(t.region=r.slice(l+1,l+3),l+=3):$(s.charCodeAt(l+1))&&$(s.charCodeAt(l+2))&&$(s.charCodeAt(l+3))&&!$(s.charCodeAt(l+4))&&(t.region=r.slice(l+1,l+4),l+=4));s.charCodeAt(l)===45;){let a=l+1,o=a;for(;Z(s.charCodeAt(o));){if(o-a>7)return i(o,1,"Too long variant, expected at most 8 characters");o++}if(o-a>4||o-a>3&&$(s.charCodeAt(a)))t.variants.push(r.slice(a,o)),l=o;else break}for(;s.charCodeAt(l)===45&&!(s.charCodeAt(l+1)===120||!Z(s.charCodeAt(l+1))||s.charCodeAt(l+2)!==45||!Z(s.charCodeAt(l+3)));){let a=l+2,o=0;for(;s.charCodeAt(a)===45&&Z(s.charCodeAt(a+1))&&Z(s.charCodeAt(a+2));){let f=a+1;for(a=f+2,o++;Z(s.charCodeAt(a));){if(a-f>7)return i(a,2,"Too long extension, expected at most 8 characters");a++}}if(!o)return i(a,4,"Empty extension, extensions must have at least 2 characters of content");t.extensions.push({singleton:r.charAt(l+1),extensions:r.slice(l+3,a).split("-")}),l=a}}else l=0;if(l===0&&s.charCodeAt(l)===120||s.charCodeAt(l)===45&&s.charCodeAt(l+1)===120){l=l?l+2:1;let a=l;for(;s.charCodeAt(a)===45&&Z(s.charCodeAt(a+1));){let o=l+1;for(a=o;Z(s.charCodeAt(a));){if(a-o>7)return i(a,5,"Too long private-use area, expected at most 8 characters");a++}t.privateuse.push(r.slice(l+1,a)),l=a}}if(l!==r.length)return i(l,6,"Found superfluous content after tag");return t;function i(a,o,f){return e.warning&&e.warning(f,o,a),e.forgiving?t:In()}}function In(){return{language:null,extendedLanguageSubtags:[],script:null,region:null,variants:[],extensions:[],privateuse:[],irregular:null,regular:null}}function Pn(n,e,t){let r=n.slice();return r[8]=e[t][0],r[9]=e[t][1],r}function Zr(n){let e,t,r,s,l,i=n[0]&&Ln(n);return{c(){i&&i.c(),e=v(),t=C("div"),r=C("p"),r.textContent=`${n[3](30)}`,s=v(),l=C("p"),l.textContent=`${n[3](40)}`,m(r,"class","pagefind-ui__result-title pagefind-ui__loading svelte-j9e30"),m(l,"class","pagefind-ui__result-excerpt pagefind-ui__loading svelte-j9e30"),m(t,"class","pagefind-ui__result-inner svelte-j9e30")},m(a,o){i&&i.m(a,o),S(a,e,o),S(a,t,o),R(t,r),R(t,s),R(t,l)},p(a,o){a[0]?i||(i=Ln(a),i.c(),i.m(e.parentNode,e)):i&&(i.d(1),i=null)},d(a){i&&i.d(a),a&&k(e),a&&k(t)}}}function Xr(n){let e,t,r,s,l=n[1].meta?.title+"",i,a,o,f,c=n[1].excerpt+"",d,p=n[0]&&qn(n),h=n[2].length&&Vn(n);return{c(){p&&p.c(),e=v(),t=C("div"),r=C("p"),s=C("a"),i=w(l),o=v(),f=C("p"),d=v(),h&&h.c(),m(s,"class","pagefind-ui__result-link svelte-j9e30"),m(s,"href",a=n[1].meta?.url||n[1].url),m(r,"class","pagefind-ui__result-title svelte-j9e30"),m(f,"class","pagefind-ui__result-excerpt svelte-j9e30"),m(t,"class","pagefind-ui__result-inner svelte-j9e30")},m(u,_){p&&p.m(u,_),S(u,e,_),S(u,t,_),R(t,r),R(r,s),R(s,i),R(t,o),R(t,f),f.innerHTML=c,R(t,d),h&&h.m(t,null)},p(u,_){u[0]?p?p.p(u,_):(p=qn(u),p.c(),p.m(e.parentNode,e)):p&&(p.d(1),p=null),_&2&&l!==(l=u[1].meta?.title+"")&&z(i,l),_&2&&a!==(a=u[1].meta?.url||u[1].url)&&m(s,"href",a),_&2&&c!==(c=u[1].excerpt+"")&&(f.innerHTML=c),u[2].length?h?h.p(u,_):(h=Vn(u),h.c(),h.m(t,null)):h&&(h.d(1),h=null)},d(u){p&&p.d(u),u&&k(e),u&&k(t),h&&h.d()}}}function Ln(n){let e;return{c(){e=C("div"),m(e,"class","pagefind-ui__result-thumb pagefind-ui__loading svelte-j9e30")},m(t,r){S(t,e,r)},d(t){t&&k(e)}}}function qn(n){let e,t=n[1].meta.image&&Bn(n);return{c(){e=C("div"),t&&t.c(),m(e,"class","pagefind-ui__result-thumb svelte-j9e30")},m(r,s){S(r,e,s),t&&t.m(e,null)},p(r,s){r[1].meta.image?t?t.p(r,s):(t=Bn(r),t.c(),t.m(e,null)):t&&(t.d(1),t=null)},d(r){r&&k(e),t&&t.d()}}}function Bn(n){let e,t,r;return{c(){e=C("img"),m(e,"class","pagefind-ui__result-image svelte-j9e30"),ie(e.src,t=n[1].meta?.image)||m(e,"src",t),m(e,"alt",r=n[1].meta?.image_alt||n[1].meta?.title)},m(s,l){S(s,e,l)},p(s,l){l&2&&!ie(e.src,t=s[1].meta?.image)&&m(e,"src",t),l&2&&r!==(r=s[1].meta?.image_alt||s[1].meta?.title)&&m(e,"alt",r)},d(s){s&&k(e)}}}function Vn(n){let e,t=n[2],r=[];for(let s=0;sn.toLocaleUpperCase();function xr(n,e,t){let{show_images:r=!0}=e,{process_result:s=null}=e,{result:l={data:async()=>{}}}=e,i=["title","image","image_alt","url"],a,o=[],f=async d=>{t(1,a=await d.data()),t(1,a=s?.(a)??a),t(2,o=Object.entries(a.meta).filter(([p])=>!i.includes(p)))},c=(d=30)=>". ".repeat(Math.floor(10+Math.random()*d));return n.$$set=d=>{"show_images"in d&&t(0,r=d.show_images),"process_result"in d&&t(4,s=d.process_result),"result"in d&&t(5,l=d.result)},n.$$.update=()=>{if(n.$$.dirty&32)e:f(l)},[r,a,o,c,s,l]}var Mt=class extends q{constructor(e){super(),Y(this,e,xr,Qr,G,{show_images:0,process_result:4,result:5})}},Gn=Mt;function Jn(n,e,t){let r=n.slice();return r[11]=e[t][0],r[12]=e[t][1],r}function Yn(n,e,t){let r=n.slice();return r[15]=e[t],r}function $r(n){let e,t,r,s,l,i=n[0]&&Zn(n);return{c(){i&&i.c(),e=v(),t=C("div"),r=C("p"),r.textContent=`${n[5](30)}`,s=v(),l=C("p"),l.textContent=`${n[5](40)}`,m(r,"class","pagefind-ui__result-title pagefind-ui__loading svelte-4xnkmf"),m(l,"class","pagefind-ui__result-excerpt pagefind-ui__loading svelte-4xnkmf"),m(t,"class","pagefind-ui__result-inner svelte-4xnkmf")},m(a,o){i&&i.m(a,o),S(a,e,o),S(a,t,o),R(t,r),R(t,s),R(t,l)},p(a,o){a[0]?i||(i=Zn(a),i.c(),i.m(e.parentNode,e)):i&&(i.d(1),i=null)},d(a){i&&i.d(a),a&&k(e),a&&k(t)}}}function es(n){let e,t,r,s,l=n[1].meta?.title+"",i,a,o,f,c,d=n[0]&&Xn(n),p=n[4]&&xn(n),h=n[3],u=[];for(let E=0;En.toLocaleUpperCase();function ns(n,e,t){let{show_images:r=!0}=e,{process_result:s=null}=e,{result:l={data:async()=>{}}}=e,i=["title","image","image_alt","url"],a,o=[],f=[],c=!1,d=(u,_)=>{if(u.length<=_)return u;let E=[...u].sort((b,T)=>T.locations.length-b.locations.length).slice(0,3).map(b=>b.url);return u.filter(b=>E.includes(b.url))},p=async u=>{t(1,a=await u.data()),t(1,a=s?.(a)??a),t(2,o=Object.entries(a.meta).filter(([_])=>!i.includes(_))),Array.isArray(a.sub_results)&&(t(4,c=a.sub_results?.[0]?.url===(a.meta?.url||a.url)),c?t(3,f=d(a.sub_results.slice(1),3)):t(3,f=d([...a.sub_results],3)))},h=(u=30)=>". ".repeat(Math.floor(10+Math.random()*u));return n.$$set=u=>{"show_images"in u&&t(0,r=u.show_images),"process_result"in u&&t(6,s=u.process_result),"result"in u&&t(7,l=u.result)},n.$$.update=()=>{if(n.$$.dirty&128)e:p(l)},[r,a,o,f,c,h,s,l]}var At=class extends q{constructor(e){super(),Y(this,e,ns,ts,G,{show_images:0,process_result:6,result:7})}},rr=At;function sr(n,e,t){let r=n.slice();return r[10]=e[t][0],r[11]=e[t][1],r[12]=e,r[13]=t,r}function lr(n,e,t){let r=n.slice();return r[14]=e[t][0],r[15]=e[t][1],r[16]=e,r[17]=t,r}function ir(n){let e,t,r=n[4]("filters_label",n[5],n[6])+"",s,l,i=Object.entries(n[1]),a=[];for(let o=0;on.toLocaleUpperCase(),_r=n=>n.toLowerCase();function ss(n,e,t){let{available_filters:r=null}=e,{show_empty_filters:s=!0}=e,{open_filters:l=[]}=e,{translate:i=()=>""}=e,{automatic_translations:a={}}=e,{translations:o={}}=e,{selected_filters:f={}}=e,c=!1,d=!1;function p(h,u){f[`${h}:${u}`]=this.checked,t(0,f)}return n.$$set=h=>{"available_filters"in h&&t(1,r=h.available_filters),"show_empty_filters"in h&&t(2,s=h.show_empty_filters),"open_filters"in h&&t(3,l=h.open_filters),"translate"in h&&t(4,i=h.translate),"automatic_translations"in h&&t(5,a=h.automatic_translations),"translations"in h&&t(6,o=h.translations),"selected_filters"in h&&t(0,f=h.selected_filters)},n.$$.update=()=>{if(n.$$.dirty&258){e:if(r&&!c){t(8,c=!0);let h=Object.entries(r||{});h.length===1&&Object.entries(h[0][1])?.length<=6&&t(7,d=!0)}}},[f,r,s,l,i,a,o,d,c,p]}var yt=class extends q{constructor(e){super(),Y(this,e,ss,rs,G,{available_filters:1,show_empty_filters:2,open_filters:3,translate:4,automatic_translations:5,translations:6,selected_filters:0})}},fr=yt;var vt={};A(vt,{comments:()=>is,default:()=>us,direction:()=>as,strings:()=>os,thanks_to:()=>ls});var ls="Jan Claasen ",is="",as="ltr",os={placeholder:"Soek",clear_search:"Opruim",load_more:"Laai nog resultate",search_label:"Soek hierdie webwerf",filters_label:"Filters",zero_results:"Geen resultate vir [SEARCH_TERM]",many_results:"[COUNT] resultate vir [SEARCH_TERM]",one_result:"[COUNT] resultate vir [SEARCH_TERM]",alt_search:"Geen resultate vir [SEARCH_TERM]. Toon resultate vir [DIFFERENT_TERM] in plaas daarvan",search_suggestion:"Geen resultate vir [SEARCH_TERM]. Probeer eerder een van die volgende terme:",searching:"Soek vir [SEARCH_TERM]"},us={thanks_to:ls,comments:is,direction:as,strings:os};var Ht={};A(Ht,{comments:()=>_s,default:()=>hs,direction:()=>fs,strings:()=>ds,thanks_to:()=>cs});var cs="Jermanuts",_s="",fs="rtl",ds={placeholder:"\u0628\u062D\u062B",clear_search:"\u0627\u0645\u0633\u062D",load_more:"\u062D\u0645\u0651\u0650\u0644 \u0627\u0644\u0645\u0632\u064A\u062F \u0645\u0646 \u0627\u0644\u0646\u062A\u0627\u0626\u062C",search_label:"\u0627\u0628\u062D\u062B \u0641\u064A \u0647\u0630\u0627 \u0627\u0644\u0645\u0648\u0642\u0639",filters_label:"\u062A\u0635\u0641\u064A\u0627\u062A",zero_results:"\u0644\u0627 \u062A\u0648\u062C\u062F \u0646\u062A\u0627\u0626\u062C \u0644 [SEARCH_TERM]",many_results:"[COUNT] \u0646\u062A\u0627\u0626\u062C \u0644 [SEARCH_TERM]",one_result:"[COUNT] \u0646\u062A\u064A\u062C\u0629 \u0644 [SEARCH_TERM]",alt_search:"\u0644\u0627 \u062A\u0648\u062C\u062F \u0646\u062A\u0627\u0626\u062C \u0644 [SEARCH_TERM]. \u064A\u0639\u0631\u0636 \u0627\u0644\u0646\u062A\u0627\u0626\u062C \u0644 [DIFFERENT_TERM] \u0628\u062F\u0644\u0627\u064B \u0645\u0646 \u0630\u0644\u0643",search_suggestion:"\u0644\u0627 \u062A\u0648\u062C\u062F \u0646\u062A\u0627\u0626\u062C \u0644 [SEARCH_TERM]. \u062C\u0631\u0628 \u0623\u062D\u062F \u0639\u0645\u0644\u064A\u0627\u062A \u0627\u0644\u0628\u062D\u062B \u0627\u0644\u062A\u0627\u0644\u064A\u0629:",searching:"\u064A\u0628\u062D\u062B \u0639\u0646 [SEARCH_TERM]..."},hs={thanks_to:cs,comments:_s,direction:fs,strings:ds};var wt={};A(wt,{comments:()=>ps,default:()=>Rs,direction:()=>gs,strings:()=>Es,thanks_to:()=>ms});var ms="Maruf Alom ",ps="",gs="ltr",Es={placeholder:"\u0985\u09A8\u09C1\u09B8\u09A8\u09CD\u09A7\u09BE\u09A8 \u0995\u09B0\u09C1\u09A8",clear_search:"\u09AE\u09C1\u099B\u09C7 \u09AB\u09C7\u09B2\u09C1\u09A8",load_more:"\u0986\u09B0\u09CB \u09AB\u09B2\u09BE\u09AB\u09B2 \u09A6\u09C7\u0996\u09C1\u09A8",search_label:"\u098F\u0987 \u0993\u09DF\u09C7\u09AC\u09B8\u09BE\u0987\u099F\u09C7 \u0985\u09A8\u09C1\u09B8\u09A8\u09CD\u09A7\u09BE\u09A8 \u0995\u09B0\u09C1\u09A8",filters_label:"\u09AB\u09BF\u09B2\u09CD\u099F\u09BE\u09B0",zero_results:"[SEARCH_TERM] \u098F\u09B0 \u099C\u09A8\u09CD\u09AF \u0995\u09BF\u099B\u09C1 \u0996\u09C1\u0981\u099C\u09C7 \u09AA\u09BE\u0993\u09DF\u09BE \u09AF\u09BE\u09DF\u09A8\u09BF",many_results:"[COUNT]-\u099F\u09BF \u09AB\u09B2\u09BE\u09AB\u09B2 \u09AA\u09BE\u0993\u09DF\u09BE \u0997\u09BF\u09DF\u09C7\u099B\u09C7 [SEARCH_TERM] \u098F\u09B0 \u099C\u09A8\u09CD\u09AF",one_result:"[COUNT]-\u099F\u09BF \u09AB\u09B2\u09BE\u09AB\u09B2 \u09AA\u09BE\u0993\u09DF\u09BE \u0997\u09BF\u09DF\u09C7\u099B\u09C7 [SEARCH_TERM] \u098F\u09B0 \u099C\u09A8\u09CD\u09AF",alt_search:"\u0995\u09CB\u09A8 \u0995\u09BF\u099B\u09C1 \u0996\u09C1\u0981\u099C\u09C7 \u09AA\u09BE\u0993\u09DF\u09BE \u09AF\u09BE\u09DF\u09A8\u09BF [SEARCH_TERM] \u098F\u09B0 \u099C\u09A8\u09CD\u09AF. \u09AA\u09B0\u09BF\u09AC\u09B0\u09CD\u09A4\u09C7 [DIFFERENT_TERM] \u098F\u09B0 \u099C\u09A8\u09CD\u09AF \u09A6\u09C7\u0996\u09BE\u09A8\u09CB \u09B9\u099A\u09CD\u099B\u09C7",search_suggestion:"\u0995\u09CB\u09A8 \u0995\u09BF\u099B\u09C1 \u0996\u09C1\u0981\u099C\u09C7 \u09AA\u09BE\u0993\u09DF\u09BE \u09AF\u09BE\u09DF\u09A8\u09BF [SEARCH_TERM] \u098F\u09B0 \u09AC\u09BF\u09B7\u09DF\u09C7. \u09A8\u09BF\u09A8\u09CD\u09AE\u09C7\u09B0 \u09AC\u09BF\u09B7\u09DF\u09AC\u09B8\u09CD\u09A4\u09C1 \u0996\u09C1\u0981\u099C\u09C7 \u09A6\u09C7\u0996\u09C1\u09A8:",searching:"\u0985\u09A8\u09C1\u09B8\u09A8\u09CD\u09A7\u09BE\u09A8 \u099A\u09B2\u099B\u09C7 [SEARCH_TERM]..."},Rs={thanks_to:ms,comments:ps,direction:gs,strings:Es};var Ft={};A(Ft,{comments:()=>Ts,default:()=>Ss,direction:()=>Cs,strings:()=>ks,thanks_to:()=>bs});var bs="Pablo Villaverde ",Ts="",Cs="ltr",ks={placeholder:"Cerca",clear_search:"Netejar",load_more:"Veure m\xE9s resultats",search_label:"Cerca en aquest lloc",filters_label:"Filtres",zero_results:"No es van trobar resultats per [SEARCH_TERM]",many_results:"[COUNT] resultats trobats per [SEARCH_TERM]",one_result:"[COUNT] resultat trobat per [SEARCH_TERM]",alt_search:"No es van trobar resultats per [SEARCH_TERM]. Mostrant al seu lloc resultats per [DIFFERENT_TERM]",search_suggestion:"No es van trobar resultats per [SEARCH_TERM]. Proveu una de les cerques seg\xFCents:",searching:"Cercant [SEARCH_TERM]..."},Ss={thanks_to:bs,comments:Ts,direction:Cs,strings:ks};var Nt={};A(Nt,{comments:()=>As,default:()=>Hs,direction:()=>ys,strings:()=>vs,thanks_to:()=>Ms});var Ms="Dalibor Hon ",As="",ys="ltr",vs={placeholder:"Hledat",clear_search:"Smazat",load_more:"Na\u010D\xEDst dal\u0161\xED v\xFDsledky",search_label:"Prohledat tuto str\xE1nku",filters_label:"Filtry",zero_results:"\u017D\xE1dn\xE9 v\xFDsledky pro [SEARCH_TERM]",many_results:"[COUNT] v\xFDsledk\u016F pro [SEARCH_TERM]",one_result:"[COUNT] v\xFDsledek pro [SEARCH_TERM]",alt_search:"\u017D\xE1dn\xE9 v\xFDsledky pro [SEARCH_TERM]. Zobrazuj\xED se v\xFDsledky pro [DIFFERENT_TERM]",search_suggestion:"\u017D\xE1dn\xE9 v\xFDsledky pro [SEARCH_TERM]. Souvisej\xEDc\xED v\xFDsledky hled\xE1n\xED:",searching:"Hled\xE1m [SEARCH_TERM]..."},Hs={thanks_to:Ms,comments:As,direction:ys,strings:vs};var zt={};A(zt,{comments:()=>Fs,default:()=>Os,direction:()=>Ns,strings:()=>zs,thanks_to:()=>ws});var ws="Jonas Smedegaard ",Fs="",Ns="ltr",zs={placeholder:"S\xF8g",clear_search:"Nulstil",load_more:"Indl\xE6s flere resultater",search_label:"S\xF8g p\xE5 dette website",filters_label:"Filtre",zero_results:"Ingen resultater for [SEARCH_TERM]",many_results:"[COUNT] resultater for [SEARCH_TERM]",one_result:"[COUNT] resultat for [SEARCH_TERM]",alt_search:"Ingen resultater for [SEARCH_TERM]. Viser resultater for [DIFFERENT_TERM] i stedet",search_suggestion:"Ingen resultater for [SEARCH_TERM]. Pr\xF8v et af disse s\xF8geord i stedet:",searching:"S\xF8ger efter [SEARCH_TERM]..."},Os={thanks_to:ws,comments:Fs,direction:Ns,strings:zs};var Ot={};A(Ot,{comments:()=>Us,default:()=>Ps,direction:()=>Ds,strings:()=>Is,thanks_to:()=>js});var js="Jan Claasen ",Us="",Ds="ltr",Is={placeholder:"Suche",clear_search:"L\xF6schen",load_more:"Mehr Ergebnisse laden",search_label:"Suche diese Seite",filters_label:"Filter",zero_results:"Keine Ergebnisse f\xFCr [SEARCH_TERM]",many_results:"[COUNT] Ergebnisse f\xFCr [SEARCH_TERM]",one_result:"[COUNT] Ergebnis f\xFCr [SEARCH_TERM]",alt_search:"Keine Ergebnisse f\xFCr [SEARCH_TERM]. Stattdessen werden Ergebnisse f\xFCr [DIFFERENT_TERM] angezeigt",search_suggestion:"Keine Ergebnisse f\xFCr [SEARCH_TERM]. Versuchen Sie eine der folgenden Suchen:",searching:"Suche f\xFCr [SEARCH_TERM]"},Ps={thanks_to:js,comments:Us,direction:Ds,strings:Is};var jt={};A(jt,{comments:()=>qs,default:()=>Ws,direction:()=>Bs,strings:()=>Vs,thanks_to:()=>Ls});var Ls="Liam Bigelow ",qs="",Bs="ltr",Vs={placeholder:"Search",clear_search:"Clear",load_more:"Load more results",search_label:"Search this site",filters_label:"Filters",zero_results:"No results for [SEARCH_TERM]",many_results:"[COUNT] results for [SEARCH_TERM]",one_result:"[COUNT] result for [SEARCH_TERM]",alt_search:"No results for [SEARCH_TERM]. Showing results for [DIFFERENT_TERM] instead",search_suggestion:"No results for [SEARCH_TERM]. Try one of the following searches:",searching:"Searching for [SEARCH_TERM]..."},Ws={thanks_to:Ls,comments:qs,direction:Bs,strings:Vs};var Ut={};A(Ut,{comments:()=>Gs,default:()=>Zs,direction:()=>Js,strings:()=>Ys,thanks_to:()=>Ks});var Ks="Pablo Villaverde ",Gs="",Js="ltr",Ys={placeholder:"Buscar",clear_search:"Limpiar",load_more:"Ver m\xE1s resultados",search_label:"Buscar en este sitio",filters_label:"Filtros",zero_results:"No se encontraron resultados para [SEARCH_TERM]",many_results:"[COUNT] resultados encontrados para [SEARCH_TERM]",one_result:"[COUNT] resultado encontrado para [SEARCH_TERM]",alt_search:"No se encontraron resultados para [SEARCH_TERM]. Mostrando en su lugar resultados para [DIFFERENT_TERM]",search_suggestion:"No se encontraron resultados para [SEARCH_TERM]. Prueba una de las siguientes b\xFAsquedas:",searching:"Buscando [SEARCH_TERM]..."},Zs={thanks_to:Ks,comments:Gs,direction:Js,strings:Ys};var Dt={};A(Dt,{comments:()=>Qs,default:()=>el,direction:()=>xs,strings:()=>$s,thanks_to:()=>Xs});var Xs="Mikel Larreategi ",Qs="",xs="ltr",$s={placeholder:"Bilatu",clear_search:"Garbitu",load_more:"Kargatu emaitza gehiagi",search_label:"Bilatu",filters_label:"Iragazkiak",zero_results:"Ez dago emaitzarik [SEARCH_TERM] bilaketarentzat",many_results:"[COUNT] emaitza [SEARCH_TERM] bilaketarentzat",one_result:"Emaitza bat [COUNT] [SEARCH_TERM] bilaketarentzat",alt_search:"Ez dago emaitzarik [SEARCH_TERM] bilaketarentzat. [DIFFERENT_TERM] bilaketaren emaitzak erakusten",search_suggestion:"Ez dago emaitzarik [SEARCH_TERM] bilaketarentzat. Saiatu hauetako beste bateikin:",searching:"[SEARCH_TERM] bilatzen..."},el={thanks_to:Xs,comments:Qs,direction:xs,strings:$s};var It={};A(It,{comments:()=>nl,default:()=>ll,direction:()=>rl,strings:()=>sl,thanks_to:()=>tl});var tl="Ali Khaleqi Yekta ",nl="",rl="rtl",sl={placeholder:"\u062C\u0633\u062A\u062C\u0648",clear_search:"\u067E\u0627\u06A9\u0633\u0627\u0632\u06CC",load_more:"\u0628\u0627\u0631\u06AF\u0630\u0627\u0631\u06CC \u0646\u062A\u0627\u06CC\u062C \u0628\u06CC\u0634\u062A\u0631",search_label:"\u062C\u0633\u062A\u062C\u0648 \u062F\u0631 \u0633\u0627\u06CC\u062A",filters_label:"\u0641\u06CC\u0644\u062A\u0631\u0647\u0627",zero_results:"\u0646\u062A\u06CC\u062C\u0647\u200C\u0627\u06CC \u0628\u0631\u0627\u06CC [SEARCH_TERM] \u06CC\u0627\u0641\u062A \u0646\u0634\u062F",many_results:"[COUNT] \u0646\u062A\u06CC\u062C\u0647 \u0628\u0631\u0627\u06CC [SEARCH_TERM] \u06CC\u0627\u0641\u062A \u0634\u062F",one_result:"[COUNT] \u0646\u062A\u06CC\u062C\u0647 \u0628\u0631\u0627\u06CC [SEARCH_TERM] \u06CC\u0627\u0641\u062A \u0634\u062F",alt_search:"\u0646\u062A\u06CC\u062C\u0647\u200C\u0627\u06CC \u0628\u0631\u0627\u06CC [SEARCH_TERM] \u06CC\u0627\u0641\u062A \u0646\u0634\u062F. \u062F\u0631 \u0639\u0648\u0636 \u0646\u062A\u0627\u06CC\u062C \u0628\u0631\u0627\u06CC [DIFFERENT_TERM] \u0646\u0645\u0627\u06CC\u0634 \u062F\u0627\u062F\u0647 \u0645\u06CC\u200C\u0634\u0648\u062F",search_suggestion:"\u0646\u062A\u06CC\u062C\u0647\u200C\u0627\u06CC \u0628\u0631\u0627\u06CC [SEARCH_TERM] \u06CC\u0627\u0641\u062A \u0646\u0634\u062F. \u06CC\u06A9\u06CC \u0627\u0632 \u062C\u0633\u062A\u062C\u0648\u0647\u0627\u06CC \u0632\u06CC\u0631 \u0631\u0627 \u0627\u0645\u062A\u062D\u0627\u0646 \u06A9\u0646\u06CC\u062F:",searching:"\u062F\u0631 \u062D\u0627\u0644 \u062C\u0633\u062A\u062C\u0648\u06CC [SEARCH_TERM]..."},ll={thanks_to:tl,comments:nl,direction:rl,strings:sl};var Pt={};A(Pt,{comments:()=>al,default:()=>cl,direction:()=>ol,strings:()=>ul,thanks_to:()=>il});var il="Valtteri Laitinen ",al="",ol="ltr",ul={placeholder:"Haku",clear_search:"Tyhjenn\xE4",load_more:"Lataa lis\xE4\xE4 tuloksia",search_label:"Hae t\xE4lt\xE4 sivustolta",filters_label:"Suodattimet",zero_results:"Ei tuloksia haulle [SEARCH_TERM]",many_results:"[COUNT] tulosta haulle [SEARCH_TERM]",one_result:"[COUNT] tulos haulle [SEARCH_TERM]",alt_search:"Ei tuloksia haulle [SEARCH_TERM]. N\xE4ytet\xE4\xE4n tulokset sen sijaan haulle [DIFFERENT_TERM]",search_suggestion:"Ei tuloksia haulle [SEARCH_TERM]. Kokeile jotain seuraavista:",searching:"Haetaan [SEARCH_TERM]..."},cl={thanks_to:il,comments:al,direction:ol,strings:ul};var Lt={};A(Lt,{comments:()=>fl,default:()=>ml,direction:()=>dl,strings:()=>hl,thanks_to:()=>_l});var _l="Nicolas Friedli ",fl="",dl="ltr",hl={placeholder:"Rechercher",clear_search:"Nettoyer",load_more:"Charger plus de r\xE9sultats",search_label:"Recherche sur ce site",filters_label:"Filtres",zero_results:"Pas de r\xE9sultat pour [SEARCH_TERM]",many_results:"[COUNT] r\xE9sultats pour [SEARCH_TERM]",one_result:"[COUNT] r\xE9sultat pour [SEARCH_TERM]",alt_search:"Pas de r\xE9sultat pour [SEARCH_TERM]. Montre les r\xE9sultats pour [DIFFERENT_TERM] \xE0 la place",search_suggestion:"Pas de r\xE9sultat pour [SEARCH_TERM]. Essayer une des recherches suivantes:",searching:"Recherche [SEARCH_TERM]..."},ml={thanks_to:_l,comments:fl,direction:dl,strings:hl};var qt={};A(qt,{comments:()=>gl,default:()=>bl,direction:()=>El,strings:()=>Rl,thanks_to:()=>pl});var pl="Pablo Villaverde ",gl="",El="ltr",Rl={placeholder:"Buscar",clear_search:"Limpar",load_more:"Ver m\xE1is resultados",search_label:"Buscar neste sitio",filters_label:"Filtros",zero_results:"Non se atoparon resultados para [SEARCH_TERM]",many_results:"[COUNT] resultados atopados para [SEARCH_TERM]",one_result:"[COUNT] resultado atopado para [SEARCH_TERM]",alt_search:"Non se atoparon resultados para [SEARCH_TERM]. Amosando no seu lugar resultados para [DIFFERENT_TERM]",search_suggestion:"Non se atoparon resultados para [SEARCH_TERM]. Probe unha das seguintes pesquisas:",searching:"Buscando [SEARCH_TERM]..."},bl={thanks_to:pl,comments:gl,direction:El,strings:Rl};var Bt={};A(Bt,{comments:()=>Cl,default:()=>Ml,direction:()=>kl,strings:()=>Sl,thanks_to:()=>Tl});var Tl="Nir Tamir ",Cl="",kl="rtl",Sl={placeholder:"\u05D7\u05D9\u05E4\u05D5\u05E9",clear_search:"\u05E0\u05D9\u05E7\u05D5\u05D9",load_more:"\u05E2\u05D5\u05D3 \u05EA\u05D5\u05E6\u05D0\u05D5\u05EA",search_label:"\u05D7\u05D9\u05E4\u05D5\u05E9 \u05D1\u05D0\u05EA\u05E8 \u05D6\u05D4",filters_label:"\u05DE\u05E1\u05E0\u05E0\u05D9\u05DD",zero_results:"\u05DC\u05D0 \u05E0\u05DE\u05E6\u05D0\u05D5 \u05EA\u05D5\u05E6\u05D0\u05D5\u05EA \u05E2\u05D1\u05D5\u05E8 [SEARCH_TERM]",many_results:"\u05E0\u05DE\u05E6\u05D0\u05D5 [COUNT] \u05EA\u05D5\u05E6\u05D0\u05D5\u05EA \u05E2\u05D1\u05D5\u05E8 [SEARCH_TERM]",one_result:"\u05E0\u05DE\u05E6\u05D0\u05D4 \u05EA\u05D5\u05E6\u05D0\u05D4 \u05D0\u05D7\u05EA \u05E2\u05D1\u05D5\u05E8 [SEARCH_TERM]",alt_search:"\u05DC\u05D0 \u05E0\u05DE\u05E6\u05D0\u05D5 \u05EA\u05D5\u05E6\u05D0\u05D5\u05EA \u05E2\u05D1\u05D5\u05E8 [SEARCH_TERM]. \u05DE\u05D5\u05E6\u05D2\u05D5\u05EA \u05EA\u05D5\u05E6\u05D0\u05D5\u05EA \u05E2\u05D1\u05D5\u05E8 [DIFFERENT_TERM]",search_suggestion:"\u05DC\u05D0 \u05E0\u05DE\u05E6\u05D0\u05D5 \u05EA\u05D5\u05E6\u05D0\u05D5\u05EA \u05E2\u05D1\u05D5\u05E8 [SEARCH_TERM]. \u05E0\u05E1\u05D5 \u05D0\u05D7\u05D3 \u05DE\u05D4\u05D7\u05D9\u05E4\u05D5\u05E9\u05D9\u05DD \u05D4\u05D1\u05D0\u05D9\u05DD:",searching:"\u05DE\u05D7\u05E4\u05E9 \u05D0\u05EA [SEARCH_TERM]..."},Ml={thanks_to:Tl,comments:Cl,direction:kl,strings:Sl};var Vt={};A(Vt,{comments:()=>yl,default:()=>wl,direction:()=>vl,strings:()=>Hl,thanks_to:()=>Al});var Al="Amit Yadav ",yl="",vl="ltr",Hl={placeholder:"\u0916\u094B\u091C\u0947\u0902",clear_search:"\u0938\u093E\u092B \u0915\u0930\u0947\u0902",load_more:"\u0914\u0930 \u0905\u0927\u093F\u0915 \u092A\u0930\u093F\u0923\u093E\u092E \u0932\u094B\u0921 \u0915\u0930\u0947\u0902",search_label:"\u0907\u0938 \u0938\u093E\u0907\u091F \u092E\u0947\u0902 \u0916\u094B\u091C\u0947\u0902",filters_label:"\u092B\u093C\u093F\u0932\u094D\u091F\u0930",zero_results:"\u0915\u094B\u0908 \u092A\u0930\u093F\u0923\u093E\u092E [SEARCH_TERM] \u0915\u0947 \u0932\u093F\u090F \u0928\u0939\u0940\u0902 \u092E\u093F\u0932\u093E",many_results:"[COUNT] \u092A\u0930\u093F\u0923\u093E\u092E [SEARCH_TERM] \u0915\u0947 \u0932\u093F\u090F \u092E\u093F\u0932\u0947",one_result:"[COUNT] \u092A\u0930\u093F\u0923\u093E\u092E [SEARCH_TERM] \u0915\u0947 \u0932\u093F\u090F \u092E\u093F\u0932\u093E",alt_search:"[SEARCH_TERM] \u0915\u0947 \u0932\u093F\u090F \u0915\u094B\u0908 \u092A\u0930\u093F\u0923\u093E\u092E \u0928\u0939\u0940\u0902 \u092E\u093F\u0932\u093E\u0964 \u0907\u0938\u0915\u0947 \u092C\u091C\u093E\u092F [DIFFERENT_TERM] \u0915\u0947 \u0932\u093F\u090F \u092A\u0930\u093F\u0923\u093E\u092E \u0926\u093F\u0916\u093E \u0930\u0939\u093E \u0939\u0948",search_suggestion:"[SEARCH_TERM] \u0915\u0947 \u0932\u093F\u090F \u0915\u094B\u0908 \u092A\u0930\u093F\u0923\u093E\u092E \u0928\u0939\u0940\u0902 \u092E\u093F\u0932\u093E\u0964 \u0928\u093F\u092E\u094D\u0928\u0932\u093F\u0916\u093F\u0924 \u0916\u094B\u091C\u094B\u0902 \u092E\u0947\u0902 \u0938\u0947 \u0915\u094B\u0908 \u090F\u0915 \u0906\u091C\u093C\u092E\u093E\u090F\u0902:",searching:"[SEARCH_TERM] \u0915\u0940 \u0916\u094B\u091C \u0915\u0940 \u091C\u093E \u0930\u0939\u0940 \u0939\u0948..."},wl={thanks_to:Al,comments:yl,direction:vl,strings:Hl};var Wt={};A(Wt,{comments:()=>Nl,default:()=>jl,direction:()=>zl,strings:()=>Ol,thanks_to:()=>Fl});var Fl="Diomed ",Nl="",zl="ltr",Ol={placeholder:"Tra\u017Ei",clear_search:"O\u010Disti",load_more:"U\u010Ditaj vi\u0161e rezultata",search_label:"Pretra\u017Ei ovu stranicu",filters_label:"Filteri",zero_results:"Nema rezultata za [SEARCH_TERM]",many_results:"[COUNT] rezultata za [SEARCH_TERM]",one_result:"[COUNT] rezultat za [SEARCH_TERM]",alt_search:"Nema rezultata za [SEARCH_TERM]. Prikazujem rezultate za [DIFFERENT_TERM]",search_suggestion:"Nema rezultata za [SEARCH_TERM]. Poku\u0161aj s jednom od ovih pretraga:",searching:"Pretra\u017Eujem [SEARCH_TERM]..."},jl={thanks_to:Fl,comments:Nl,direction:zl,strings:Ol};var Kt={};A(Kt,{comments:()=>Dl,default:()=>Ll,direction:()=>Il,strings:()=>Pl,thanks_to:()=>Ul});var Ul="Adam Laki ",Dl="",Il="ltr",Pl={placeholder:"Keres\xE9s",clear_search:"T\xF6rl\xE9s",load_more:"Tov\xE1bbi tal\xE1latok bet\xF6lt\xE9se",search_label:"Keres\xE9s az oldalon",filters_label:"Sz\u0171r\xE9s",zero_results:"Nincs tal\xE1lat a(z) [SEARCH_TERM] kifejez\xE9sre",many_results:"[COUNT] db tal\xE1lat a(z) [SEARCH_TERM] kifejez\xE9sre",one_result:"[COUNT] db tal\xE1lat a(z) [SEARCH_TERM] kifejez\xE9sre",alt_search:"Nincs tal\xE1lat a(z) [SEARCH_TERM] kifejez\xE9sre. Tal\xE1latok mutat\xE1sa ink\xE1bb a(z) [DIFFERENT_TERM] kifejez\xE9sre",search_suggestion:"Nincs tal\xE1lat a(z) [SEARCH_TERM] kifejez\xE9sre. Pr\xF3b\xE1ld meg a k\xF6vetkez\u0151 keres\xE9sek egyik\xE9t:",searching:"Keres\xE9s a(z) [SEARCH_TERM] kifejez\xE9sre..."},Ll={thanks_to:Ul,comments:Dl,direction:Il,strings:Pl};var Gt={};A(Gt,{comments:()=>Bl,default:()=>Kl,direction:()=>Vl,strings:()=>Wl,thanks_to:()=>ql});var ql="Nixentric",Bl="",Vl="ltr",Wl={placeholder:"Cari",clear_search:"Bersihkan",load_more:"Muat lebih banyak hasil",search_label:"Telusuri situs ini",filters_label:"Filter",zero_results:"[SEARCH_TERM] tidak ditemukan",many_results:"Ditemukan [COUNT] hasil untuk [SEARCH_TERM]",one_result:"Ditemukan [COUNT] hasil untuk [SEARCH_TERM]",alt_search:"[SEARCH_TERM] tidak ditemukan. Menampilkan hasil [DIFFERENT_TERM] sebagai gantinya",search_suggestion:"[SEARCH_TERM] tidak ditemukan. Coba salah satu pencarian berikut ini:",searching:"Mencari [SEARCH_TERM]..."},Kl={thanks_to:ql,comments:Bl,direction:Vl,strings:Wl};var Jt={};A(Jt,{comments:()=>Jl,default:()=>Xl,direction:()=>Yl,strings:()=>Zl,thanks_to:()=>Gl});var Gl="Cosette Bruhns Alonso, Andrew Janco ",Jl="",Yl="ltr",Zl={placeholder:"Cerca",clear_search:"Cancella la cronologia",load_more:"Mostra pi\xF9 risultati",search_label:"Cerca nel sito",filters_label:"Filtri di ricerca",zero_results:"Nessun risultato per [SEARCH_TERM]",many_results:"[COUNT] risultati per [SEARCH_TERM]",one_result:"[COUNT] risultato per [SEARCH_TERM]",alt_search:"Nessun risultato per [SEARCH_TERM]. Mostrando risultati per [DIFFERENT_TERM] come alternativa.",search_suggestion:"Nessun risultato per [SEARCH_TERM]. Prova una delle seguenti ricerche:",searching:"Cercando [SEARCH_TERM]..."},Xl={thanks_to:Gl,comments:Jl,direction:Yl,strings:Zl};var Yt={};A(Yt,{comments:()=>xl,default:()=>ti,direction:()=>$l,strings:()=>ei,thanks_to:()=>Ql});var Ql="Tate",xl="",$l="ltr",ei={placeholder:"\u691C\u7D22",clear_search:"\u30AF\u30EA\u30A2",load_more:"\u6B21\u3092\u8AAD\u307F\u8FBC\u3080",search_label:"\u3053\u306E\u30B5\u30A4\u30C8\u3092\u691C\u7D22",filters_label:"\u30D5\u30A3\u30EB\u30BF",zero_results:"[SEARCH_TERM]\u306E\u691C\u7D22\u306B\u4E00\u81F4\u3059\u308B\u60C5\u5831\u306F\u3042\u308A\u307E\u305B\u3093\u3067\u3057\u305F",many_results:"[SEARCH_TERM]\u306E[COUNT]\u4EF6\u306E\u691C\u7D22\u7D50\u679C",one_result:"[SEARCH_TERM]\u306E[COUNT]\u4EF6\u306E\u691C\u7D22\u7D50\u679C",alt_search:"[SEARCH_TERM]\u306E\u691C\u7D22\u306B\u4E00\u81F4\u3059\u308B\u60C5\u5831\u306F\u3042\u308A\u307E\u305B\u3093\u3067\u3057\u305F\u3002[DIFFERENT_TERM]\u306E\u691C\u7D22\u7D50\u679C\u3092\u8868\u793A\u3057\u3066\u3044\u307E\u3059",search_suggestion:"[SEARCH_TERM]\u306E\u691C\u7D22\u306B\u4E00\u81F4\u3059\u308B\u60C5\u5831\u306F\u3042\u308A\u307E\u305B\u3093\u3067\u3057\u305F\u3002\u6B21\u306E\u3044\u305A\u308C\u304B\u306E\u691C\u7D22\u3092\u8A66\u3057\u3066\u304F\u3060\u3055\u3044",searching:"[SEARCH_TERM]\u3092\u691C\u7D22\u3057\u3066\u3044\u307E\u3059"},ti={thanks_to:Ql,comments:xl,direction:$l,strings:ei};var Zt={};A(Zt,{comments:()=>ri,default:()=>ii,direction:()=>si,strings:()=>li,thanks_to:()=>ni});var ni="Seokho Son ",ri="",si="ltr",li={placeholder:"\uAC80\uC0C9\uC5B4",clear_search:"\uBE44\uC6B0\uAE30",load_more:"\uAC80\uC0C9 \uACB0\uACFC \uB354 \uBCF4\uAE30",search_label:"\uC0AC\uC774\uD2B8 \uAC80\uC0C9",filters_label:"\uD544\uD130",zero_results:"[SEARCH_TERM]\uC5D0 \uB300\uD55C \uACB0\uACFC \uC5C6\uC74C",many_results:"[SEARCH_TERM]\uC5D0 \uB300\uD55C \uACB0\uACFC [COUNT]\uAC74",one_result:"[SEARCH_TERM]\uC5D0 \uB300\uD55C \uACB0\uACFC [COUNT]\uAC74",alt_search:"[SEARCH_TERM]\uC5D0 \uB300\uD55C \uACB0\uACFC \uC5C6\uC74C. [DIFFERENT_TERM]\uC5D0 \uB300\uD55C \uACB0\uACFC",search_suggestion:"[SEARCH_TERM]\uC5D0 \uB300\uD55C \uACB0\uACFC \uC5C6\uC74C. \uCD94\uCC9C \uAC80\uC0C9\uC5B4: ",searching:"[SEARCH_TERM] \uAC80\uC0C9 \uC911..."},ii={thanks_to:ni,comments:ri,direction:si,strings:li};var Xt={};A(Xt,{comments:()=>oi,default:()=>_i,direction:()=>ui,strings:()=>ci,thanks_to:()=>ai});var ai="",oi="",ui="ltr",ci={placeholder:"Rapu",clear_search:"Whakakore",load_more:"Whakauta \u0113tahi otinga k\u0113",search_label:"Rapu",filters_label:"T\u0101tari",zero_results:"Otinga kore ki [SEARCH_TERM]",many_results:"[COUNT] otinga ki [SEARCH_TERM]",one_result:"[COUNT] otinga ki [SEARCH_TERM]",alt_search:"Otinga kore ki [SEARCH_TERM]. Otinga k\u0113 ki [DIFFERENT_TERM]",search_suggestion:"Otinga kore ki [SEARCH_TERM]. whakam\u0101tau ki ng\u0101 mea atu:",searching:"Rapu ki [SEARCH_TERM]..."},_i={thanks_to:ai,comments:oi,direction:ui,strings:ci};var Qt={};A(Qt,{comments:()=>di,default:()=>pi,direction:()=>hi,strings:()=>mi,thanks_to:()=>fi});var fi="Harry Min Khant ",di="",hi="ltr",mi={placeholder:"\u101B\u103E\u102C\u101B\u1014\u103A",clear_search:"\u101B\u103E\u102C\u1016\u103D\u1031\u1019\u103E\u102F\u1000\u102D\u102F \u101B\u103E\u1004\u103A\u1038\u101C\u1004\u103A\u1038\u1015\u102B\u104B",load_more:"\u1014\u1031\u102C\u1000\u103A\u1011\u1015\u103A\u101B\u101C\u1012\u103A\u1019\u103B\u102C\u1038\u1000\u102D\u102F \u1010\u1004\u103A\u1015\u102B\u104B",search_label:"\u1024\u1006\u102D\u102F\u1000\u103A\u1010\u103D\u1004\u103A\u101B\u103E\u102C\u1016\u103D\u1031\u1015\u102B\u104B",filters_label:"\u1005\u1005\u103A\u1011\u102F\u1010\u103A\u1019\u103E\u102F\u1019\u103B\u102C\u1038",zero_results:"[SEARCH_TERM] \u1021\u1010\u103D\u1000\u103A \u101B\u101C\u1012\u103A\u1019\u103B\u102C\u1038 \u1019\u101B\u103E\u102D\u1015\u102B",many_results:"[SEARCH_TERM] \u1021\u1010\u103D\u1000\u103A \u101B\u101C\u1012\u103A [COUNT] \u1001\u102F",one_result:"[SEARCH_TERM] \u1021\u1010\u103D\u1000\u103A \u101B\u101C\u1012\u103A [COUNT]",alt_search:"[SEARCH_TERM] \u1021\u1010\u103D\u1000\u103A \u101B\u101C\u1012\u103A\u1019\u101B\u103E\u102D\u1015\u102B\u104B \u104E\u1004\u103A\u1038\u1021\u1005\u102C\u1038 [DIFFERENT_TERM] \u1021\u1010\u103D\u1000\u103A \u101B\u101C\u1012\u103A\u1019\u103B\u102C\u1038\u1000\u102D\u102F \u1015\u103C\u101E\u101E\u100A\u103A\u104B",search_suggestion:"[SEARCH_TERM] \u1021\u1010\u103D\u1000\u103A \u101B\u101C\u1012\u103A\u1019\u101B\u103E\u102D\u1015\u102B\u104B \u1021\u1031\u102C\u1000\u103A\u1015\u102B\u101B\u103E\u102C\u1016\u103D\u1031\u1019\u103E\u102F\u1019\u103B\u102C\u1038\u1011\u1032\u1019\u103E \u1010\u1005\u103A\u1001\u102F\u1000\u102D\u102F \u1005\u1019\u103A\u1038\u1000\u103C\u100A\u1037\u103A\u1015\u102B:",searching:"[SEARCH_TERM] \u1000\u102D\u102F \u101B\u103E\u102C\u1016\u103D\u1031\u1014\u1031\u101E\u100A\u103A..."},pi={thanks_to:fi,comments:di,direction:hi,strings:mi};var xt={};A(xt,{comments:()=>Ei,default:()=>Ti,direction:()=>Ri,strings:()=>bi,thanks_to:()=>gi});var gi="Eirik Mikkelsen",Ei="",Ri="ltr",bi={placeholder:"S\xF8k",clear_search:"Fjern",load_more:"Last flere resultater",search_label:"S\xF8k p\xE5 denne siden",filters_label:"Filtre",zero_results:"Ingen resultater for [SEARCH_TERM]",many_results:"[COUNT] resultater for [SEARCH_TERM]",one_result:"[COUNT] resultat for [SEARCH_TERM]",alt_search:"Ingen resultater for [SEARCH_TERM]. Viser resultater for [DIFFERENT_TERM] i stedet",search_suggestion:"Ingen resultater for [SEARCH_TERM]. Pr\xF8v en av disse s\xF8keordene i stedet:",searching:"S\xF8ker etter [SEARCH_TERM]"},Ti={thanks_to:gi,comments:Ei,direction:Ri,strings:bi};var $t={};A($t,{comments:()=>ki,default:()=>Ai,direction:()=>Si,strings:()=>Mi,thanks_to:()=>Ci});var Ci="Paul van Brouwershaven",ki="",Si="ltr",Mi={placeholder:"Zoeken",clear_search:"Reset",load_more:"Meer resultaten laden",search_label:"Doorzoek deze site",filters_label:"Filters",zero_results:"Geen resultaten voor [SEARCH_TERM]",many_results:"[COUNT] resultaten voor [SEARCH_TERM]",one_result:"[COUNT] resultaat voor [SEARCH_TERM]",alt_search:"Geen resultaten voor [SEARCH_TERM]. In plaats daarvan worden resultaten voor [DIFFERENT_TERM] weergegeven",search_suggestion:"Geen resultaten voor [SEARCH_TERM]. Probeer een van de volgende zoekopdrachten:",searching:"Zoeken naar [SEARCH_TERM]..."},Ai={thanks_to:Ci,comments:ki,direction:Si,strings:Mi};var en={};A(en,{comments:()=>vi,default:()=>Fi,direction:()=>Hi,strings:()=>wi,thanks_to:()=>yi});var yi="Eirik Mikkelsen",vi="",Hi="ltr",wi={placeholder:"S\xF8k",clear_search:"Fjern",load_more:"Last fleire resultat",search_label:"S\xF8k p\xE5 denne sida",filters_label:"Filter",zero_results:"Ingen resultat for [SEARCH_TERM]",many_results:"[COUNT] resultat for [SEARCH_TERM]",one_result:"[COUNT] resultat for [SEARCH_TERM]",alt_search:"Ingen resultat for [SEARCH_TERM]. Viser resultat for [DIFFERENT_TERM] i staden",search_suggestion:"Ingen resultat for [SEARCH_TERM]. Pr\xF8v eitt av desse s\xF8keorda i staden:",searching:"S\xF8ker etter [SEARCH_TERM]"},Fi={thanks_to:yi,comments:vi,direction:Hi,strings:wi};var tn={};A(tn,{comments:()=>zi,default:()=>Ui,direction:()=>Oi,strings:()=>ji,thanks_to:()=>Ni});var Ni="Christopher Wingate",zi="",Oi="ltr",ji={placeholder:"S\xF8k",clear_search:"Fjern",load_more:"Last flere resultater",search_label:"S\xF8k p\xE5 denne siden",filters_label:"Filtre",zero_results:"Ingen resultater for [SEARCH_TERM]",many_results:"[COUNT] resultater for [SEARCH_TERM]",one_result:"[COUNT] resultat for [SEARCH_TERM]",alt_search:"Ingen resultater for [SEARCH_TERM]. Viser resultater for [DIFFERENT_TERM] i stedet",search_suggestion:"Ingen resultater for [SEARCH_TERM]. Pr\xF8v en av disse s\xF8keordene i stedet:",searching:"S\xF8ker etter [SEARCH_TERM]"},Ui={thanks_to:Ni,comments:zi,direction:Oi,strings:ji};var nn={};A(nn,{comments:()=>Ii,default:()=>qi,direction:()=>Pi,strings:()=>Li,thanks_to:()=>Di});var Di="",Ii="",Pi="ltr",Li={placeholder:"Szukaj",clear_search:"Wyczy\u015B\u0107",load_more:"Za\u0142aduj wi\u0119cej",search_label:"Przeszukaj t\u0119 stron\u0119",filters_label:"Filtry",zero_results:"Brak wynik\xF3w dla [SEARCH_TERM]",many_results:"[COUNT] wynik\xF3w dla [SEARCH_TERM]",one_result:"[COUNT] wynik dla [SEARCH_TERM]",alt_search:"Brak wynik\xF3w dla [SEARCH_TERM]. Wy\u015Bwietlam wyniki dla [DIFFERENT_TERM]",search_suggestion:"Brak wynik\xF3w dla [SEARCH_TERM]. Pokrewne wyniki wyszukiwania:",searching:"Szukam [SEARCH_TERM]..."},qi={thanks_to:Di,comments:Ii,direction:Pi,strings:Li};var rn={};A(rn,{comments:()=>Vi,default:()=>Gi,direction:()=>Wi,strings:()=>Ki,thanks_to:()=>Bi});var Bi="Jonatah",Vi="",Wi="ltr",Ki={placeholder:"Pesquisar",clear_search:"Limpar",load_more:"Ver mais resultados",search_label:"Pesquisar",filters_label:"Filtros",zero_results:"Nenhum resultado encontrado para [SEARCH_TERM]",many_results:"[COUNT] resultados encontrados para [SEARCH_TERM]",one_result:"[COUNT] resultado encontrado para [SEARCH_TERM]",alt_search:"Nenhum resultado encontrado para [SEARCH_TERM]. Exibindo resultados para [DIFFERENT_TERM]",search_suggestion:"Nenhum resultado encontrado para [SEARCH_TERM]. Tente uma das seguintes pesquisas:",searching:"Pesquisando por [SEARCH_TERM]..."},Gi={thanks_to:Bi,comments:Vi,direction:Wi,strings:Ki};var sn={};A(sn,{comments:()=>Yi,default:()=>Qi,direction:()=>Zi,strings:()=>Xi,thanks_to:()=>Ji});var Ji="Bogdan Mateescu ",Yi="",Zi="ltr",Xi={placeholder:"C\u0103utare",clear_search:"\u015Eterge\u0163i",load_more:"\xCEnc\u0103rca\u021Bi mai multe rezultate",search_label:"C\u0103uta\u021Bi \xEEn acest site",filters_label:"Filtre",zero_results:"Niciun rezultat pentru [SEARCH_TERM]",many_results:"[COUNT] rezultate pentru [SEARCH_TERM]",one_result:"[COUNT] rezultat pentru [SEARCH_TERM]",alt_search:"Niciun rezultat pentru [SEARCH_TERM]. Se afi\u0219eaz\u0103 \xEEn schimb rezultatele pentru [DIFFERENT_TERM]",search_suggestion:"Niciun rezultat pentru [SEARCH_TERM]. \xCEncerca\u021Bi una dintre urm\u0103toarele c\u0103ut\u0103ri:",searching:"Se caut\u0103 dup\u0103: [SEARCH_TERM]..."},Qi={thanks_to:Ji,comments:Yi,direction:Zi,strings:Xi};var ln={};A(ln,{comments:()=>$i,default:()=>na,direction:()=>ea,strings:()=>ta,thanks_to:()=>xi});var xi="Aleksandr Gordeev",$i="",ea="ltr",ta={placeholder:"\u041F\u043E\u0438\u0441\u043A",clear_search:"\u041E\u0447\u0438\u0441\u0442\u0438\u0442\u044C \u043F\u043E\u043B\u0435",load_more:"\u0417\u0430\u0433\u0440\u0443\u0437\u0438\u0442\u044C \u0435\u0449\u0435",search_label:"\u041F\u043E\u0438\u0441\u043A \u043F\u043E \u0441\u0430\u0439\u0442\u0443",filters_label:"\u0424\u0438\u043B\u044C\u0442\u0440\u044B",zero_results:"\u041D\u0438\u0447\u0435\u0433\u043E \u043D\u0435 \u043D\u0430\u0439\u0434\u0435\u043D\u043E \u043F\u043E \u0437\u0430\u043F\u0440\u043E\u0441\u0443: [SEARCH_TERM]",many_results:"[COUNT] \u0440\u0435\u0437\u0443\u043B\u044C\u0442\u0430\u0442\u043E\u0432 \u043F\u043E \u0437\u0430\u043F\u0440\u043E\u0441\u0443: [SEARCH_TERM]",one_result:"[COUNT] \u0440\u0435\u0437\u0443\u043B\u044C\u0442\u0430\u0442 \u043F\u043E \u0437\u0430\u043F\u0440\u043E\u0441\u0443: [SEARCH_TERM]",alt_search:"\u041D\u0438\u0447\u0435\u0433\u043E \u043D\u0435 \u043D\u0430\u0439\u0434\u0435\u043D\u043E \u043F\u043E \u0437\u0430\u043F\u0440\u043E\u0441\u0443: [SEARCH_TERM]. \u041F\u043E\u043A\u0430\u0437\u0430\u043D\u044B \u0440\u0435\u0437\u0443\u043B\u044C\u0442\u0430\u0442\u044B \u043F\u043E \u0437\u0430\u043F\u0440\u043E\u0441\u0443: [DIFFERENT_TERM]",search_suggestion:"\u041D\u0438\u0447\u0435\u0433\u043E \u043D\u0435 \u043D\u0430\u0439\u0434\u0435\u043D\u043E \u043F\u043E \u0437\u0430\u043F\u0440\u043E\u0441\u0443: [SEARCH_TERM]. \u041F\u043E\u043F\u0440\u043E\u0431\u0443\u0439\u0442\u0435 \u043E\u0434\u0438\u043D \u0438\u0437 \u0441\u043B\u0435\u0434\u0443\u044E\u0449\u0438\u0445 \u0432\u0430\u0440\u0438\u0430\u043D\u0442\u043E\u0432",searching:"\u041F\u043E\u0438\u0441\u043A \u043F\u043E \u0437\u0430\u043F\u0440\u043E\u0441\u0443: [SEARCH_TERM]"},na={thanks_to:xi,comments:$i,direction:ea,strings:ta};var an={};A(an,{comments:()=>sa,default:()=>aa,direction:()=>la,strings:()=>ia,thanks_to:()=>ra});var ra="Andrija Sagicc",sa="",la="ltr",ia={placeholder:"\u041F\u0440\u0435\u0442\u0440\u0430\u0433\u0430",clear_search:"\u0411\u0440\u0438\u0441\u0430\u045A\u0435",load_more:"\u041F\u0440\u0438\u043A\u0430\u0437 \u0432\u0438\u0448\u0435 \u0440\u0435\u0437\u0443\u043B\u0442\u0430\u0442\u0430",search_label:"\u041F\u0440\u0435\u0442\u0440\u0430\u0433\u0430 \u0441\u0430\u0458\u0442\u0430",filters_label:"\u0424\u0438\u043B\u0442\u0435\u0440\u0438",zero_results:"\u041D\u0435\u043C\u0430 \u0440\u0435\u0437\u0443\u043B\u0442\u0430\u0442\u0430 \u0437\u0430 [SEARCH_TERM]",many_results:"[COUNT] \u0440\u0435\u0437\u0443\u043B\u0442\u0430\u0442\u0430 \u0437\u0430 [SEARCH_TERM]",one_result:"[COUNT] \u0440\u0435\u0437\u0443\u043B\u0442\u0430\u0442\u0430 \u0437\u0430 [SEARCH_TERM]",alt_search:"\u041D\u0435\u043C\u0430 \u0440\u0435\u0437\u0443\u043B\u0442\u0430\u0442\u0430 \u0437\u0430 [SEARCH_TERM]. \u041F\u0440\u0438\u043A\u0430\u0437 \u0434\u043E\u0434\u0430\u0442\u043D\u0438\u043A \u0440\u0435\u0437\u0443\u043B\u0442\u0430\u0442\u0430 \u0437\u0430 [DIFFERENT_TERM]",search_suggestion:"\u041D\u0435\u043C\u0430 \u0440\u0435\u0437\u0443\u043B\u0442\u0430\u0442\u0430 \u0437\u0430 [SEARCH_TERM]. \u041F\u043E\u043A\u0443\u0448\u0430\u0458\u0442\u0435 \u0441\u0430 \u043D\u0435\u043A\u043E\u043C \u043E\u0434 \u0441\u043B\u0435\u0434\u0435\u045B\u0438\u0445 \u043F\u0440\u0435\u0442\u0440\u0430\u0433\u0430:",searching:"\u041F\u0440\u0435\u0442\u0440\u0430\u0433\u0430 \u0442\u0435\u0440\u043C\u0438\u043D\u0430 [SEARCH_TERM]..."},aa={thanks_to:ra,comments:sa,direction:la,strings:ia};var on={};A(on,{comments:()=>ua,default:()=>fa,direction:()=>ca,strings:()=>_a,thanks_to:()=>oa});var oa="Montazar Al-Jaber ",ua="",ca="ltr",_a={placeholder:"S\xF6k",clear_search:"Rensa",load_more:"Visa fler tr\xE4ffar",search_label:"S\xF6k p\xE5 denna sida",filters_label:"Filter",zero_results:"[SEARCH_TERM] gav inga tr\xE4ffar",many_results:"[SEARCH_TERM] gav [COUNT] tr\xE4ffar",one_result:"[SEARCH_TERM] gav [COUNT] tr\xE4ff",alt_search:"[SEARCH_TERM] gav inga tr\xE4ffar. Visar resultat f\xF6r [DIFFERENT_TERM] ist\xE4llet",search_suggestion:"[SEARCH_TERM] gav inga tr\xE4ffar. F\xF6rs\xF6k igen med en av f\xF6ljande s\xF6kord:",searching:"S\xF6ker efter [SEARCH_TERM]..."},fa={thanks_to:oa,comments:ua,direction:ca,strings:_a};var un={};A(un,{comments:()=>ha,default:()=>ga,direction:()=>ma,strings:()=>pa,thanks_to:()=>da});var da="Anonymous",ha="",ma="ltr",pa={placeholder:"Tafuta",clear_search:"Futa",load_more:"Pakia matokeo zaidi",search_label:"Tafuta tovuti hii",filters_label:"Vichujio",zero_results:"Hakuna matokeo ya [SEARCH_TERM]",many_results:"Matokeo [COUNT] ya [SEARCH_TERM]",one_result:"Tokeo [COUNT] la [SEARCH_TERM]",alt_search:"Hakuna mayokeo ya [SEARCH_TERM]. Badala yake, inaonyesha matokeo ya [DIFFERENT_TERM]",search_suggestion:"Hakuna matokeo ya [SEARCH_TERM]. Jaribu mojawapo ya utafutaji ufuatao:",searching:"Kutafuta [SEARCH_TERM]..."},ga={thanks_to:da,comments:ha,direction:ma,strings:pa};var cn={};A(cn,{comments:()=>Ra,default:()=>Ca,direction:()=>ba,strings:()=>Ta,thanks_to:()=>Ea});var Ea="",Ra="",ba="ltr",Ta={placeholder:"\u0BA4\u0BC7\u0B9F\u0BC1\u0B95",clear_search:"\u0B85\u0BB4\u0BBF\u0B95\u0BCD\u0B95\u0BC1\u0B95",load_more:"\u0BAE\u0BC7\u0BB2\u0BC1\u0BAE\u0BCD \u0BAE\u0BC1\u0B9F\u0BBF\u0BB5\u0BC1\u0B95\u0BB3\u0BC8\u0B95\u0BCD \u0B95\u0BBE\u0B9F\u0BCD\u0B9F\u0BC1\u0B95",search_label:"\u0B87\u0BA8\u0BCD\u0BA4 \u0BA4\u0BB3\u0BA4\u0BCD\u0BA4\u0BBF\u0BB2\u0BCD \u0BA4\u0BC7\u0B9F\u0BC1\u0B95",filters_label:"\u0BB5\u0B9F\u0BBF\u0B95\u0B9F\u0BCD\u0B9F\u0BB2\u0BCD\u0B95\u0BB3\u0BCD",zero_results:"[SEARCH_TERM] \u0B95\u0BCD\u0B95\u0BBE\u0BA9 \u0BAE\u0BC1\u0B9F\u0BBF\u0BB5\u0BC1\u0B95\u0BB3\u0BCD \u0B87\u0BB2\u0BCD\u0BB2\u0BC8",many_results:"[SEARCH_TERM] \u0B95\u0BCD\u0B95\u0BBE\u0BA9 [COUNT] \u0BAE\u0BC1\u0B9F\u0BBF\u0BB5\u0BC1\u0B95\u0BB3\u0BCD",one_result:"[SEARCH_TERM] \u0B95\u0BCD\u0B95\u0BBE\u0BA9 \u0BAE\u0BC1\u0B9F\u0BBF\u0BB5\u0BC1",alt_search:"[SEARCH_TERM] \u0B87\u0BA4\u0BCD\u0BA4\u0BC7\u0B9F\u0BB2\u0BC1\u0B95\u0BCD\u0B95\u0BBE\u0BA9 \u0BAE\u0BC1\u0B9F\u0BBF\u0BB5\u0BC1\u0B95\u0BB3\u0BCD \u0B87\u0BB2\u0BCD\u0BB2\u0BC8, \u0B87\u0BA8\u0BCD\u0BA4 \u0BA4\u0BC7\u0B9F\u0BB2\u0BCD\u0B95\u0BB3\u0BC1\u0B95\u0BCD\u0B95\u0BBE\u0BA9 \u0B92\u0BA4\u0BCD\u0BA4 \u0BAE\u0BC1\u0B9F\u0BBF\u0BB5\u0BC1\u0B95\u0BB3\u0BCD [DIFFERENT_TERM]",search_suggestion:"[SEARCH_TERM] \u0B87\u0BA4\u0BCD \u0BA4\u0BC7\u0B9F\u0BB2\u0BC1\u0B95\u0BCD\u0B95\u0BBE\u0BA9 \u0BAE\u0BC1\u0B9F\u0BBF\u0BB5\u0BC1\u0B95\u0BB3\u0BCD \u0B87\u0BB2\u0BCD\u0BB2\u0BC8.\u0B87\u0BA4\u0BB1\u0BCD\u0B95\u0BC1 \u0BAA\u0BA4\u0BBF\u0BB2\u0BC0\u0B9F\u0BBE\u0BA9 \u0BA4\u0BC7\u0B9F\u0BB2\u0BCD\u0B95\u0BB3\u0BC8 \u0BA4\u0BC7\u0B9F\u0BC1\u0B95:",searching:"[SEARCH_TERM] \u0BA4\u0BC7\u0B9F\u0BAA\u0BCD\u0BAA\u0B9F\u0BC1\u0B95\u0BBF\u0BA9\u0BCD\u0BB1\u0BA4\u0BC1"},Ca={thanks_to:Ea,comments:Ra,direction:ba,strings:Ta};var _n={};A(_n,{comments:()=>Sa,default:()=>ya,direction:()=>Ma,strings:()=>Aa,thanks_to:()=>ka});var ka="Patiphon Loetsuthakun ",Sa="",Ma="ltr",Aa={placeholder:"\u0E04\u0E49\u0E19\u0E2B\u0E32",clear_search:"\u0E25\u0E49\u0E32\u0E07",load_more:"\u0E42\u0E2B\u0E25\u0E14\u0E1C\u0E25\u0E25\u0E31\u0E1E\u0E18\u0E4C\u0E40\u0E1E\u0E34\u0E48\u0E21\u0E40\u0E15\u0E34\u0E21",search_label:"\u0E04\u0E49\u0E19\u0E2B\u0E32\u0E1A\u0E19\u0E40\u0E27\u0E47\u0E1A\u0E44\u0E0B\u0E15\u0E4C",filters_label:"\u0E15\u0E31\u0E27\u0E01\u0E23\u0E2D\u0E07",zero_results:"\u0E44\u0E21\u0E48\u0E1E\u0E1A\u0E1C\u0E25\u0E25\u0E31\u0E1E\u0E18\u0E4C\u0E2A\u0E33\u0E2B\u0E23\u0E31\u0E1A [SEARCH_TERM]",many_results:"\u0E1E\u0E1A [COUNT] \u0E1C\u0E25\u0E01\u0E32\u0E23\u0E04\u0E49\u0E19\u0E2B\u0E32\u0E2A\u0E33\u0E2B\u0E23\u0E31\u0E1A [SEARCH_TERM]",one_result:"\u0E1E\u0E1A [COUNT] \u0E1C\u0E25\u0E01\u0E32\u0E23\u0E04\u0E49\u0E19\u0E2B\u0E32\u0E2A\u0E33\u0E2B\u0E23\u0E31\u0E1A [SEARCH_TERM]",alt_search:"\u0E44\u0E21\u0E48\u0E1E\u0E1A\u0E1C\u0E25\u0E25\u0E31\u0E1E\u0E18\u0E4C\u0E2A\u0E33\u0E2B\u0E23\u0E31\u0E1A [SEARCH_TERM] \u0E41\u0E2A\u0E14\u0E07\u0E1C\u0E25\u0E25\u0E31\u0E1E\u0E18\u0E4C\u0E08\u0E32\u0E01\u0E01\u0E32\u0E23\u0E04\u0E49\u0E19\u0E2B\u0E32 [DIFFERENT_TERM] \u0E41\u0E17\u0E19",search_suggestion:"\u0E44\u0E21\u0E48\u0E1E\u0E1A\u0E1C\u0E25\u0E25\u0E31\u0E1E\u0E18\u0E4C\u0E2A\u0E33\u0E2B\u0E23\u0E31\u0E1A [SEARCH_TERM] \u0E25\u0E2D\u0E07\u0E04\u0E33\u0E04\u0E49\u0E19\u0E2B\u0E32\u0E40\u0E2B\u0E25\u0E48\u0E32\u0E19\u0E35\u0E49\u0E41\u0E17\u0E19:",searching:"\u0E01\u0E33\u0E25\u0E31\u0E07\u0E04\u0E49\u0E19\u0E2B\u0E32 [SEARCH_TERM]..."},ya={thanks_to:ka,comments:Sa,direction:Ma,strings:Aa};var fn={};A(fn,{comments:()=>Ha,default:()=>Na,direction:()=>wa,strings:()=>Fa,thanks_to:()=>va});var va="Taylan \xD6zg\xFCr Bildik",Ha="",wa="ltr",Fa={placeholder:"Ara\u015Ft\u0131r",clear_search:"Temizle",load_more:"Daha fazla sonu\xE7",search_label:"Site genelinde arama",filters_label:"Filtreler",zero_results:"[SEARCH_TERM] i\xE7in sonu\xE7 yok",many_results:"[SEARCH_TERM] i\xE7in [COUNT] sonu\xE7 bulundu",one_result:"[SEARCH_TERM] i\xE7in [COUNT] sonu\xE7 bulundu",alt_search:"[SEARCH_TERM] i\xE7in sonu\xE7 yok. Bunun yerine [DIFFERENT_TERM] i\xE7in sonu\xE7lar g\xF6steriliyor",search_suggestion:"[SEARCH_TERM] i\xE7in sonu\xE7 yok. Alternatif olarak a\u015Fa\u011F\u0131daki kelimelerden birini deneyebilirsiniz:",searching:"[SEARCH_TERM] ara\u015Ft\u0131r\u0131l\u0131yor..."},Na={thanks_to:va,comments:Ha,direction:wa,strings:Fa};var dn={};A(dn,{comments:()=>Oa,default:()=>Da,direction:()=>ja,strings:()=>Ua,thanks_to:()=>za});var za="Vladyslav Lyshenko ",Oa="",ja="ltr",Ua={placeholder:"\u041F\u043E\u0448\u0443\u043A",clear_search:"\u041E\u0447\u0438\u0441\u0442\u0438\u0442\u0438 \u043F\u043E\u043B\u0435",load_more:"\u0417\u0430\u0432\u0430\u043D\u0442\u0430\u0436\u0438\u0442\u0438 \u0449\u0435",search_label:"\u041F\u043E\u0448\u0443\u043A \u043F\u043E \u0441\u0430\u0439\u0442\u0443",filters_label:"\u0424\u0456\u043B\u044C\u0442\u0440\u0438",zero_results:"\u041D\u0456\u0447\u043E\u0433\u043E \u043D\u0435 \u0437\u043D\u0430\u0439\u0434\u0435\u043D\u043E \u0437\u0430 \u0437\u0430\u043F\u0438\u0442\u043E\u043C: [SEARCH_TERM]",many_results:"[COUNT] \u0440\u0435\u0437\u0443\u043B\u044C\u0442\u0430\u0442\u0456\u0432 \u043D\u0430 \u0437\u0430\u043F\u0438\u0442: [SEARCH_TERM]",one_result:"[COUNT] \u0440\u0435\u0437\u0443\u043B\u044C\u0442\u0430\u0442 \u0437\u0430 \u0437\u0430\u043F\u0438\u0442\u043E\u043C: [SEARCH_TERM]",alt_search:"\u041D\u0456\u0447\u043E\u0433\u043E \u043D\u0435 \u0437\u043D\u0430\u0439\u0434\u0435\u043D\u043E \u043D\u0430 \u0437\u0430\u043F\u0438\u0442: [SEARCH_TERM]. \u041F\u043E\u043A\u0430\u0437\u0430\u043D\u043E \u0440\u0435\u0437\u0443\u043B\u044C\u0442\u0430\u0442\u0438 \u043D\u0430 \u0437\u0430\u043F\u0438\u0442: [DIFFERENT_TERM]",search_suggestion:"\u041D\u0456\u0447\u043E\u0433\u043E \u043D\u0435 \u0437\u043D\u0430\u0439\u0434\u0435\u043D\u043E \u043D\u0430 \u0437\u0430\u043F\u0438\u0442: [SEARCH_TERM]. \u0421\u043F\u0440\u043E\u0431\u0443\u0439\u0442\u0435 \u043E\u0434\u0438\u043D \u0456\u0437 \u0442\u0430\u043A\u0438\u0445 \u0432\u0430\u0440\u0456\u0430\u043D\u0442\u0456\u0432",searching:"\u041F\u043E\u0448\u0443\u043A \u0437\u0430 \u0437\u0430\u043F\u0438\u0442\u043E\u043C: [SEARCH_TERM]"},Da={thanks_to:za,comments:Oa,direction:ja,strings:Ua};var hn={};A(hn,{comments:()=>Pa,default:()=>Ba,direction:()=>La,strings:()=>qa,thanks_to:()=>Ia});var Ia="Long Nhat Nguyen",Pa="",La="ltr",qa={placeholder:"T\xECm ki\u1EBFm",clear_search:"X\xF3a",load_more:"Nhi\u1EC1u k\u1EBFt qu\u1EA3 h\u01A1n",search_label:"T\xECm ki\u1EBFm trong trang n\xE0y",filters_label:"B\u1ED9 l\u1ECDc",zero_results:"Kh\xF4ng t\xECm th\u1EA5y k\u1EBFt qu\u1EA3 cho [SEARCH_TERM]",many_results:"[COUNT] k\u1EBFt qu\u1EA3 cho [SEARCH_TERM]",one_result:"[COUNT] k\u1EBFt qu\u1EA3 cho [SEARCH_TERM]",alt_search:"Kh\xF4ng t\xECm th\u1EA5y k\u1EBFt qu\u1EA3 cho [SEARCH_TERM]. Ki\u1EC3m th\u1ECB k\u1EBFt qu\u1EA3 thay th\u1EBF v\u1EDBi [DIFFERENT_TERM]",search_suggestion:"Kh\xF4ng t\xECm th\u1EA5y k\u1EBFt qu\u1EA3 cho [SEARCH_TERM]. Th\u1EED m\u1ED9t trong c\xE1c t\xECm ki\u1EBFm:",searching:"\u0110ang t\xECm ki\u1EBFm cho [SEARCH_TERM]..."},Ba={thanks_to:Ia,comments:Pa,direction:La,strings:qa};var mn={};A(mn,{comments:()=>Wa,default:()=>Ja,direction:()=>Ka,strings:()=>Ga,thanks_to:()=>Va});var Va="Amber Song",Wa="",Ka="ltr",Ga={placeholder:"\u641C\u7D22",clear_search:"\u6E05\u9664",load_more:"\u52A0\u8F7D\u66F4\u591A\u7ED3\u679C",search_label:"\u7AD9\u5185\u641C\u7D22",filters_label:"\u7B5B\u9009",zero_results:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C",many_results:"\u627E\u5230 [COUNT] \u4E2A [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C",one_result:"\u627E\u5230 [COUNT] \u4E2A [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C",alt_search:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C\u3002\u6539\u4E3A\u663E\u793A [DIFFERENT_TERM] \u7684\u76F8\u5173\u7ED3\u679C",search_suggestion:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C\u3002\u8BF7\u5C1D\u8BD5\u4EE5\u4E0B\u641C\u7D22\u3002",searching:"\u6B63\u5728\u641C\u7D22 [SEARCH_TERM]..."},Ja={thanks_to:Va,comments:Wa,direction:Ka,strings:Ga};var pn={};A(pn,{comments:()=>Za,default:()=>xa,direction:()=>Xa,strings:()=>Qa,thanks_to:()=>Ya});var Ya="Amber Song",Za="",Xa="ltr",Qa={placeholder:"\u641C\u7D22",clear_search:"\u6E05\u9664",load_more:"\u52A0\u8F09\u66F4\u591A\u7D50\u679C",search_label:"\u7AD9\u5167\u641C\u7D22",filters_label:"\u7BE9\u9078",zero_results:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u95DC\u7D50\u679C",many_results:"\u627E\u5230 [COUNT] \u500B [SEARCH_TERM] \u7684\u76F8\u95DC\u7D50\u679C",one_result:"\u627E\u5230 [COUNT] \u500B [SEARCH_TERM] \u7684\u76F8\u95DC\u7D50\u679C",alt_search:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u95DC\u7D50\u679C\u3002\u6539\u70BA\u986F\u793A [DIFFERENT_TERM] \u7684\u76F8\u95DC\u7D50\u679C",search_suggestion:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u95DC\u7D50\u679C\u3002\u8ACB\u5617\u8A66\u4EE5\u4E0B\u641C\u7D22\u3002",searching:"\u6B63\u5728\u641C\u7D22 [SEARCH_TERM]..."},xa={thanks_to:Ya,comments:Za,direction:Xa,strings:Qa};var gn={};A(gn,{comments:()=>eo,default:()=>ro,direction:()=>to,strings:()=>no,thanks_to:()=>$a});var $a="Amber Song",eo="",to="ltr",no={placeholder:"\u641C\u7D22",clear_search:"\u6E05\u9664",load_more:"\u52A0\u8F7D\u66F4\u591A\u7ED3\u679C",search_label:"\u7AD9\u5185\u641C\u7D22",filters_label:"\u7B5B\u9009",zero_results:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C",many_results:"\u627E\u5230 [COUNT] \u4E2A [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C",one_result:"\u627E\u5230 [COUNT] \u4E2A [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C",alt_search:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C\u3002\u6539\u4E3A\u663E\u793A [DIFFERENT_TERM] \u7684\u76F8\u5173\u7ED3\u679C",search_suggestion:"\u672A\u627E\u5230 [SEARCH_TERM] \u7684\u76F8\u5173\u7ED3\u679C\u3002\u8BF7\u5C1D\u8BD5\u4EE5\u4E0B\u641C\u7D22\u3002",searching:"\u6B63\u5728\u641C\u7D22 [SEARCH_TERM]..."},ro={thanks_to:$a,comments:eo,direction:to,strings:no};var so=[vt,Ht,wt,Ft,Nt,zt,Ot,jt,Ut,Dt,It,Pt,Lt,qt,Bt,Vt,Wt,Kt,Gt,Jt,Yt,Zt,Xt,Qt,xt,$t,en,tn,nn,rn,sn,ln,an,on,un,cn,_n,fn,dn,hn,mn,pn,gn],dr=so,hr=["../../translations/af.json","../../translations/ar.json","../../translations/bn.json","../../translations/ca.json","../../translations/cs.json","../../translations/da.json","../../translations/de.json","../../translations/en.json","../../translations/es.json","../../translations/eu.json","../../translations/fa.json","../../translations/fi.json","../../translations/fr.json","../../translations/gl.json","../../translations/he.json","../../translations/hi.json","../../translations/hr.json","../../translations/hu.json","../../translations/id.json","../../translations/it.json","../../translations/ja.json","../../translations/ko.json","../../translations/mi.json","../../translations/my.json","../../translations/nb.json","../../translations/nl.json","../../translations/nn.json","../../translations/no.json","../../translations/pl.json","../../translations/pt.json","../../translations/ro.json","../../translations/ru.json","../../translations/sr.json","../../translations/sv.json","../../translations/sw.json","../../translations/ta.json","../../translations/th.json","../../translations/tr.json","../../translations/uk.json","../../translations/vi.json","../../translations/zh-cn.json","../../translations/zh-tw.json","../../translations/zh.json"];function mr(n,e,t){let r=n.slice();return r[51]=e[t],r}function pr(n){let e,t,r;function s(i){n[37](i)}let l={show_empty_filters:n[5],open_filters:n[6],available_filters:n[18],translate:n[20],automatic_translations:n[19],translations:n[7]};return n[0]!==void 0&&(l.selected_filters=n[0]),e=new fr({props:l}),le.push(()=>Un(e,"selected_filters",s)),{c(){ut(e.$$.fragment)},m(i,a){me(e,i,a),r=!0},p(i,a){let o={};a[0]&32&&(o.show_empty_filters=i[5]),a[0]&64&&(o.open_filters=i[6]),a[0]&262144&&(o.available_filters=i[18]),a[0]&524288&&(o.automatic_translations=i[19]),a[0]&128&&(o.translations=i[7]),!t&&a[0]&1&&(t=!0,o.selected_filters=i[0],Nn(()=>t=!1)),e.$set(o)},i(i){r||(D(e.$$.fragment,i),r=!0)},o(i){P(e.$$.fragment,i),r=!1},d(i){ue(e,i)}}}function gr(n){let e,t,r,s,l=[ao,io],i=[];function a(o,f){return o[14]?0:1}return t=a(n,[-1,-1]),r=i[t]=l[t](n),{c(){e=C("div"),r.c(),m(e,"class","pagefind-ui__results-area svelte-e9gkc3")},m(o,f){S(o,e,f),i[t].m(e,null),s=!0},p(o,f){let c=t;t=a(o,f),t===c?i[t].p(o,f):(ae(),P(i[c],1,1,()=>{i[c]=null}),oe(),r=i[t],r?r.p(o,f):(r=i[t]=l[t](o),r.c()),D(r,1),r.m(e,null))},i(o){s||(D(r),s=!0)},o(o){P(r),s=!1},d(o){o&&k(e),i[t].d()}}}function io(n){let e,t,r,s=[],l=new Map,i,a,o;function f(_,E){return _[13].results.length===0?co:_[13].results.length===1?uo:oo}let c=f(n,[-1,-1]),d=c(n),p=n[13].results.slice(0,n[17]),h=_=>_[51].id;for(let _=0;_n[17]&&Rr(n);return{c(){e=C("p"),d.c(),t=v(),r=C("ol");for(let _=0;__[17]?u?u.p(_,E):(u=Rr(_),u.c(),u.m(a.parentNode,a)):u&&(u.d(1),u=null)},i(_){if(!o){for(let E=0;E{o[p]=null}),oe(),s=o[r],s?s.p(e,d):(s=o[r]=a[r](e),s.c()),D(s,1),s.m(l.parentNode,l))},i(c){i||(D(s),i=!0)},o(c){P(s),i=!1},d(c){c&&k(t),o[r].d(c),c&&k(l)}}}function Rr(n){let e,t=n[20]("load_more",n[19],n[7])+"",r,s,l;return{c(){e=C("button"),r=w(t),m(e,"type","button"),m(e,"class","pagefind-ui__button svelte-e9gkc3")},m(i,a){S(i,e,a),R(e,r),s||(l=J(e,"click",n[22]),s=!0)},p(i,a){a[0]&524416&&t!==(t=i[20]("load_more",i[19],i[7])+"")&&z(r,t)},d(i){i&&k(e),s=!1,l()}}}function br(n){let e,t=n[20]("searching",n[19],n[7]).replace(/\[SEARCH_TERM\]/,n[16])+"",r;return{c(){e=C("p"),r=w(t),m(e,"class","pagefind-ui__message svelte-e9gkc3")},m(s,l){S(s,e,l),R(e,r)},p(s,l){l[0]&589952&&t!==(t=s[20]("searching",s[19],s[7]).replace(/\[SEARCH_TERM\]/,s[16])+"")&&z(r,t)},d(s){s&&k(e)}}}function ho(n){let e,t,r,s,l,i,a,o=n[20]("clear_search",n[19],n[7])+"",f,c,d,p,h,u,_,E,b=n[12]&&pr(n),T=n[15]&&gr(n);return{c(){e=C("div"),t=C("form"),r=C("input"),i=v(),a=C("button"),f=w(o),c=v(),d=C("div"),b&&b.c(),p=v(),T&&T.c(),m(r,"class","pagefind-ui__search-input svelte-e9gkc3"),m(r,"type","text"),m(r,"placeholder",s=n[20]("placeholder",n[19],n[7])),m(r,"title",l=n[20]("placeholder",n[19],n[7])),m(r,"autocapitalize","none"),m(r,"enterkeyhint","search"),r.autofocus=n[8],m(a,"class","pagefind-ui__search-clear svelte-e9gkc3"),B(a,"pagefind-ui__suppressed",!n[9]),m(d,"class","pagefind-ui__drawer svelte-e9gkc3"),B(d,"pagefind-ui__hidden",!n[15]),m(t,"class","pagefind-ui__form svelte-e9gkc3"),m(t,"role","search"),m(t,"aria-label",h=n[20]("search_label",n[19],n[7])),m(t,"action","javascript:void(0);"),m(e,"class","pagefind-ui svelte-e9gkc3"),B(e,"pagefind-ui--reset",n[1])},m(M,y){S(M,e,y),R(e,t),R(t,r),Tt(r,n[9]),n[34](r),R(t,i),R(t,a),R(a,f),n[35](a),R(t,c),R(t,d),b&&b.m(d,null),R(d,p),T&&T.m(d,null),u=!0,n[8]&&r.focus(),_||(E=[J(r,"focus",n[21]),J(r,"keydown",n[32]),J(r,"input",n[33]),J(a,"click",n[36]),J(t,"submit",mo)],_=!0)},p(M,y){(!u||y[0]&524416&&s!==(s=M[20]("placeholder",M[19],M[7])))&&m(r,"placeholder",s),(!u||y[0]&524416&&l!==(l=M[20]("placeholder",M[19],M[7])))&&m(r,"title",l),(!u||y[0]&256)&&(r.autofocus=M[8]),y[0]&512&&r.value!==M[9]&&Tt(r,M[9]),(!u||y[0]&524416)&&o!==(o=M[20]("clear_search",M[19],M[7])+"")&&z(f,o),(!u||y[0]&512)&&B(a,"pagefind-ui__suppressed",!M[9]),M[12]?b?(b.p(M,y),y[0]&4096&&D(b,1)):(b=pr(M),b.c(),D(b,1),b.m(d,p)):b&&(ae(),P(b,1,1,()=>{b=null}),oe()),M[15]?T?(T.p(M,y),y[0]&32768&&D(T,1)):(T=gr(M),T.c(),D(T,1),T.m(d,null)):T&&(ae(),P(T,1,1,()=>{T=null}),oe()),(!u||y[0]&32768)&&B(d,"pagefind-ui__hidden",!M[15]),(!u||y[0]&524416&&h!==(h=M[20]("search_label",M[19],M[7])))&&m(t,"aria-label",h),(!u||y[0]&2)&&B(e,"pagefind-ui--reset",M[1])},i(M){u||(D(b),D(T),u=!0)},o(M){P(b),P(T),u=!1},d(M){M&&k(e),n[34](null),n[35](null),b&&b.d(),T&&T.d(),_=!1,K(E)}}}var mo=n=>n.preventDefault();function po(n,e,t){let r={},s=hr.map(g=>g.match(/([^\/]+)\.json$/)[1]);for(let g=0;gj[g]??N[g]??"";Ct(()=>{let g=document?.querySelector?.("html")?.getAttribute?.("lang")||"en",N=ct(g.toLocaleLowerCase());t(19,Sn=r[`${N.language}-${N.script}-${N.region}`]||r[`${N.language}-${N.region}`]||r[`${N.language}`]||r.en)}),kt(()=>{F?.destroy?.(),F=null});let Mn=async()=>{if(!ft&&(t(12,ft=!0),!F)){let g;try{g=await import(`${l}pagefind.js`)}catch(j){console.error(j),console.error([`Pagefind couldn't be loaded from ${this.options.bundlePath}pagefind.js`,"You can configure this by passing a bundlePath option to PagefindUI"].join(`
+`)),document?.currentScript&&document.currentScript.tagName.toUpperCase()==="SCRIPT"?console.error(`[DEBUG: Loaded from ${document.currentScript.src??"bad script location"}]`):console.error("no known script location")}c||t(24,c=f?12:30);let N={...E||{},excerptLength:c};await g.options(N);for(let j of b){if(!j.bundlePath)throw new Error("mergeIndex requires a bundlePath parameter");let L=j.bundlePath;delete j.bundlePath,await g.mergeIndex(L,j)}F=g,Sr()}},Sr=async()=>{F&&(kn=await F.filters(),(!ce||!Object.keys(ce).length)&&t(18,ce=kn))},Mr=g=>{let N={};return Object.entries(g).filter(([,j])=>j).forEach(([j])=>{let[L,te]=j.split(/:(.*)$/);N[L]=N[L]||[],N[L].push(te)}),N},_e,Ar=async(g,N)=>{if(!g){t(15,ht=!1),_e&&clearTimeout(_e);return}let j=Mr(N),L=()=>yr(g,j);_>0&&g?(_e&&clearTimeout(_e),_e=setTimeout(L,_),await An(),F.preload(g,{filters:j})):L(),vr()},An=async()=>{for(;!F;)Mn(),await new Promise(g=>setTimeout(g,50))},yr=async(g,N)=>{t(16,Cn=g||""),typeof p=="function"&&(g=p(g)),t(14,dt=!0),t(15,ht=!0),await An();let j=++Tn,L={filters:N};X&&typeof X=="object"&&(L.sort=X);let te=await F.search(g,L);Tn===j&&(te.filters&&Object.keys(te.filters)?.length&&t(18,ce=te.filters),t(13,bn=te),t(14,dt=!1),t(17,mt=i))},vr=()=>{let g=W.offsetWidth;g!=Cr&&t(10,O.style.paddingRight=`${g+2}px`,O)},Hr=g=>{g?.preventDefault(),t(17,mt+=i)},wr=g=>{g.key==="Escape"&&(t(9,H=""),O.blur()),g.key==="Enter"&&g.preventDefault()};function Fr(){H=this.value,t(9,H),t(23,T)}function Nr(g){le[g?"unshift":"push"](()=>{O=g,t(10,O)})}function zr(g){le[g?"unshift":"push"](()=>{W=g,t(11,W)})}let Or=()=>{t(9,H=""),O.blur()};function jr(g){V=g,t(0,V)}return n.$$set=g=>{"base_path"in g&&t(25,l=g.base_path),"page_size"in g&&t(26,i=g.page_size),"reset_styles"in g&&t(1,a=g.reset_styles),"show_images"in g&&t(2,o=g.show_images),"show_sub_results"in g&&t(3,f=g.show_sub_results),"excerpt_length"in g&&t(24,c=g.excerpt_length),"process_result"in g&&t(4,d=g.process_result),"process_term"in g&&t(27,p=g.process_term),"show_empty_filters"in g&&t(5,h=g.show_empty_filters),"open_filters"in g&&t(6,u=g.open_filters),"debounce_timeout_ms"in g&&t(28,_=g.debounce_timeout_ms),"pagefind_options"in g&&t(29,E=g.pagefind_options),"merge_index"in g&&t(30,b=g.merge_index),"trigger_search_term"in g&&t(23,T=g.trigger_search_term),"translations"in g&&t(7,M=g.translations),"autofocus"in g&&t(8,y=g.autofocus),"sort"in g&&t(31,X=g.sort),"selected_filters"in g&&t(0,V=g.selected_filters)},n.$$.update=()=>{if(n.$$.dirty[0]&8388608)e:T&&(t(9,H=T),t(23,T=""));if(n.$$.dirty[0]&513)e:Ar(H,V)},[V,a,o,f,d,h,u,M,y,H,O,W,ft,bn,dt,ht,Cn,mt,ce,Sn,kr,Mn,Hr,T,c,l,i,p,_,E,b,X,wr,Fr,Nr,zr,Or,jr]}var En=class extends q{constructor(e){super(),Y(this,e,po,ho,G,{base_path:25,page_size:26,reset_styles:1,show_images:2,show_sub_results:3,excerpt_length:24,process_result:4,process_term:27,show_empty_filters:5,open_filters:6,debounce_timeout_ms:28,pagefind_options:29,merge_index:30,trigger_search_term:23,translations:7,autofocus:8,sort:31,selected_filters:0},null,[-1,-1])}},Tr=En;var Rn;try{document?.currentScript&&document.currentScript.tagName.toUpperCase()==="SCRIPT"&&(Rn=new URL(document.currentScript.src).pathname.match(/^(.*\/)(?:pagefind-)?ui.js.*$/)[1])}catch{Rn="/pagefind/"}var _t=class{constructor(e){this._pfs=null;let t=e.element??"[data-pagefind-ui]",r=e.bundlePath??Rn,s=e.pageSize??5,l=e.resetStyles??!0,i=e.showImages??!0,a=e.showSubResults??!1,o=e.excerptLength??0,f=e.processResult??null,c=e.processTerm??null,d=e.showEmptyFilters??!0,p=e.openFilters??[],h=e.debounceTimeoutMs??300,u=e.mergeIndex??[],_=e.translations??[],E=e.autofocus??!1,b=e.sort??null;delete e.element,delete e.bundlePath,delete e.pageSize,delete e.resetStyles,delete e.showImages,delete e.showSubResults,delete e.excerptLength,delete e.processResult,delete e.processTerm,delete e.showEmptyFilters,delete e.openFilters,delete e.debounceTimeoutMs,delete e.mergeIndex,delete e.translations,delete e.autofocus,delete e.sort;let T=t instanceof HTMLElement?t:document.querySelector(t);T?this._pfs=new Tr({target:T,props:{base_path:r,page_size:s,reset_styles:l,show_images:i,show_sub_results:a,excerpt_length:o,process_result:f,process_term:c,show_empty_filters:d,open_filters:p,debounce_timeout_ms:h,merge_index:u,translations:_,autofocus:E,sort:b,pagefind_options:e}}):console.error(`Pagefind UI couldn't find the selector ${t}`)}triggerSearch(e){this._pfs.$$set({trigger_search_term:e})}triggerFilters(e){let t={};for(let[r,s]of Object.entries(e))if(Array.isArray(s))for(let l of s)t[`${r}:${l}`]=!0;else t[`${r}:${s}`]=!0;this._pfs.$$set({selected_filters:t})}destroy(){this._pfs.$destroy()}};window.PagefindUI=_t;})();
diff --git a/docs/public/_pagefind/pagefind.en_1755c4f2e4.pf_meta b/docs/public/_pagefind/pagefind.en_1755c4f2e4.pf_meta
new file mode 100644
index 0000000..ad00c4e
Binary files /dev/null and b/docs/public/_pagefind/pagefind.en_1755c4f2e4.pf_meta differ
diff --git a/docs/public/_pagefind/pagefind.en_9982ae0cad.pf_meta b/docs/public/_pagefind/pagefind.en_9982ae0cad.pf_meta
new file mode 100644
index 0000000..d496d1c
Binary files /dev/null and b/docs/public/_pagefind/pagefind.en_9982ae0cad.pf_meta differ
diff --git a/docs/public/_pagefind/pagefind.js b/docs/public/_pagefind/pagefind.js
new file mode 100644
index 0000000..54e2f7c
--- /dev/null
+++ b/docs/public/_pagefind/pagefind.js
@@ -0,0 +1,6 @@
+const pagefind_version="1.4.0";let wasm_bindgen;(function(){const __exports={};let script_src;if(typeof document!=='undefined'&&document.currentScript!==null){script_src=new URL("UNHANDLED",location.href).toString()}let wasm=undefined;let WASM_VECTOR_LEN=0;let cachedUint8Memory0=null;function getUint8Memory0(){if(cachedUint8Memory0===null||cachedUint8Memory0.byteLength===0){cachedUint8Memory0=new Uint8Array(wasm.memory.buffer)}return cachedUint8Memory0}const cachedTextEncoder=(typeof TextEncoder!=='undefined'?new TextEncoder('utf-8'):{encode:()=>{throw Error('TextEncoder not available')}});const encodeString=(typeof cachedTextEncoder.encodeInto==='function'?function(arg,view){return cachedTextEncoder.encodeInto(arg,view)}:function(arg,view){const buf=cachedTextEncoder.encode(arg);view.set(buf);return{read:arg.length,written:buf.length}});function passStringToWasm0(arg,malloc,realloc){if(realloc===undefined){const buf=cachedTextEncoder.encode(arg);const ptr=malloc(buf.length,1)>>>0;getUint8Memory0().subarray(ptr,ptr+buf.length).set(buf);WASM_VECTOR_LEN=buf.length;return ptr}let len=arg.length;let ptr=malloc(len,1)>>>0;const mem=getUint8Memory0();let offset=0;for(;offset0x7F)break;mem[ptr+offset]=code}if(offset!==len){if(offset!==0){arg=arg.slice(offset)}ptr=realloc(ptr,len,len=offset+arg.length*3,1)>>>0;const view=getUint8Memory0().subarray(ptr+offset,ptr+len);const ret=encodeString(arg,view);offset+=ret.written;ptr=realloc(ptr,len,offset,1)>>>0}WASM_VECTOR_LEN=offset;return ptr}let cachedInt32Memory0=null;function getInt32Memory0(){if(cachedInt32Memory0===null||cachedInt32Memory0.byteLength===0){cachedInt32Memory0=new Int32Array(wasm.memory.buffer)}return cachedInt32Memory0}const cachedTextDecoder=(typeof TextDecoder!=='undefined'?new TextDecoder('utf-8',{ignoreBOM:true,fatal:true}):{decode:()=>{throw Error('TextDecoder not available')}});if(typeof TextDecoder!=='undefined'){cachedTextDecoder.decode()};function getStringFromWasm0(ptr,len){ptr=ptr>>>0;return cachedTextDecoder.decode(getUint8Memory0().subarray(ptr,ptr+len))}__exports.request_indexes=function(ptr,query){let deferred2_0;let deferred2_1;try{const retptr=wasm.__wbindgen_add_to_stack_pointer(-16);const ptr0=passStringToWasm0(query,wasm.__wbindgen_malloc,wasm.__wbindgen_realloc);const len0=WASM_VECTOR_LEN;wasm.request_indexes(retptr,ptr,ptr0,len0);var r0=getInt32Memory0()[retptr/4+0];var r1=getInt32Memory0()[retptr/4+1];deferred2_0=r0;deferred2_1=r1;return getStringFromWasm0(r0,r1)}finally{wasm.__wbindgen_add_to_stack_pointer(16);wasm.__wbindgen_free(deferred2_0,deferred2_1,1)}};__exports.filters=function(ptr){let deferred1_0;let deferred1_1;try{const retptr=wasm.__wbindgen_add_to_stack_pointer(-16);wasm.filters(retptr,ptr);var r0=getInt32Memory0()[retptr/4+0];var r1=getInt32Memory0()[retptr/4+1];deferred1_0=r0;deferred1_1=r1;return getStringFromWasm0(r0,r1)}finally{wasm.__wbindgen_add_to_stack_pointer(16);wasm.__wbindgen_free(deferred1_0,deferred1_1,1)}};__exports.request_filter_indexes=function(ptr,filters){let deferred2_0;let deferred2_1;try{const retptr=wasm.__wbindgen_add_to_stack_pointer(-16);const ptr0=passStringToWasm0(filters,wasm.__wbindgen_malloc,wasm.__wbindgen_realloc);const len0=WASM_VECTOR_LEN;wasm.request_filter_indexes(retptr,ptr,ptr0,len0);var r0=getInt32Memory0()[retptr/4+0];var r1=getInt32Memory0()[retptr/4+1];deferred2_0=r0;deferred2_1=r1;return getStringFromWasm0(r0,r1)}finally{wasm.__wbindgen_add_to_stack_pointer(16);wasm.__wbindgen_free(deferred2_0,deferred2_1,1)}};__exports.enter_playground_mode=function(ptr){const ret=wasm.enter_playground_mode(ptr);return ret>>>0};__exports.request_all_filter_indexes=function(ptr){let deferred1_0;let deferred1_1;try{const retptr=wasm.__wbindgen_add_to_stack_pointer(-16);wasm.request_all_filter_indexes(retptr,ptr);var r0=getInt32Memory0()[retptr/4+0];var r1=getInt32Memory0()[retptr/4+1];deferred1_0=r0;deferred1_1=r1;return getStringFromWasm0(r0,r1)}finally{wasm.__wbindgen_add_to_stack_pointer(16);wasm.__wbindgen_free(deferred1_0,deferred1_1,1)}};function passArray8ToWasm0(arg,malloc){const ptr=malloc(arg.length*1,1)>>>0;getUint8Memory0().set(arg,ptr/1);WASM_VECTOR_LEN=arg.length;return ptr}__exports.init_pagefind=function(metadata_bytes){const ptr0=passArray8ToWasm0(metadata_bytes,wasm.__wbindgen_malloc);const len0=WASM_VECTOR_LEN;const ret=wasm.init_pagefind(ptr0,len0);return ret>>>0};__exports.search=function(ptr,query,filter,sort,exact){let deferred4_0;let deferred4_1;try{const retptr=wasm.__wbindgen_add_to_stack_pointer(-16);const ptr0=passStringToWasm0(query,wasm.__wbindgen_malloc,wasm.__wbindgen_realloc);const len0=WASM_VECTOR_LEN;const ptr1=passStringToWasm0(filter,wasm.__wbindgen_malloc,wasm.__wbindgen_realloc);const len1=WASM_VECTOR_LEN;const ptr2=passStringToWasm0(sort,wasm.__wbindgen_malloc,wasm.__wbindgen_realloc);const len2=WASM_VECTOR_LEN;wasm.search(retptr,ptr,ptr0,len0,ptr1,len1,ptr2,len2,exact);var r0=getInt32Memory0()[retptr/4+0];var r1=getInt32Memory0()[retptr/4+1];deferred4_0=r0;deferred4_1=r1;return getStringFromWasm0(r0,r1)}finally{wasm.__wbindgen_add_to_stack_pointer(16);wasm.__wbindgen_free(deferred4_0,deferred4_1,1)}};__exports.load_index_chunk=function(ptr,chunk_bytes){const ptr0=passArray8ToWasm0(chunk_bytes,wasm.__wbindgen_malloc);const len0=WASM_VECTOR_LEN;const ret=wasm.load_index_chunk(ptr,ptr0,len0);return ret>>>0};__exports.add_synthetic_filter=function(ptr,filter){const ptr0=passStringToWasm0(filter,wasm.__wbindgen_malloc,wasm.__wbindgen_realloc);const len0=WASM_VECTOR_LEN;const ret=wasm.add_synthetic_filter(ptr,ptr0,len0);return ret>>>0};__exports.set_ranking_weights=function(ptr,weights){const ptr0=passStringToWasm0(weights,wasm.__wbindgen_malloc,wasm.__wbindgen_realloc);const len0=WASM_VECTOR_LEN;const ret=wasm.set_ranking_weights(ptr,ptr0,len0);return ret>>>0};__exports.load_filter_chunk=function(ptr,chunk_bytes){const ptr0=passArray8ToWasm0(chunk_bytes,wasm.__wbindgen_malloc);const len0=WASM_VECTOR_LEN;const ret=wasm.load_filter_chunk(ptr,ptr0,len0);return ret>>>0};async function __wbg_load(module,imports){if(typeof Response==='function'&&module instanceof Response){if(typeof WebAssembly.instantiateStreaming==='function'){try{return await WebAssembly.instantiateStreaming(module,imports)}catch(e){if(module.headers.get('Content-Type')!='application/wasm'){console.warn("`WebAssembly.instantiateStreaming` failed because your server does not serve wasm with `application/wasm` MIME type. Falling back to `WebAssembly.instantiate` which is slower. Original error:\n",e)}else{throw e}}}const bytes=await module.arrayBuffer();return await WebAssembly.instantiate(bytes,imports)}else{const instance=await WebAssembly.instantiate(module,imports);if(instance instanceof WebAssembly.Instance){return{instance,module}}else{return instance}}}function __wbg_get_imports(){const imports={};imports.wbg={};return imports}function __wbg_init_memory(imports,maybe_memory){}function __wbg_finalize_init(instance,module){wasm=instance.exports;__wbg_init.__wbindgen_wasm_module=module;cachedInt32Memory0=null;cachedUint8Memory0=null;return wasm}function initSync(module){if(wasm!==undefined)return wasm;const imports=__wbg_get_imports();__wbg_init_memory(imports);if(!(module instanceof WebAssembly.Module)){module=new WebAssembly.Module(module)}const instance=new WebAssembly.Instance(module,imports);return __wbg_finalize_init(instance,module)}async function __wbg_init(input){if(wasm!==undefined)return wasm;if(typeof input==='undefined'&&typeof script_src!=='undefined'){input=script_src.replace(/\.js$/,'_bg.wasm')}const imports=__wbg_get_imports();if(typeof input==='string'||(typeof Request==='function'&&input instanceof Request)||(typeof URL==='function'&&input instanceof URL)){input=fetch(input)}__wbg_init_memory(imports);const{instance,module}=await __wbg_load(await input,imports);return __wbg_finalize_init(instance,module)}wasm_bindgen=Object.assign(__wbg_init,{initSync},__exports)})();var u8=Uint8Array;var u16=Uint16Array;var u32=Uint32Array;var fleb=new u8([0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,0]);var fdeb=new u8([0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0]);var clim=new u8([16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]);var freb=function(eb,start){var b=new u16(31);for(var i2=0;i2<31;++i2){b[i2]=start+=1<>>1|(i&21845)<<1;x=(x&52428)>>>2|(x&13107)<<2;x=(x&61680)>>>4|(x&3855)<<4;rev[i]=((x&65280)>>>8|(x&255)<<8)>>>1}var x;var i;var hMap=function(cd,mb,r){var s=cd.length;var i2=0;var l=new u16(mb);for(;i2>>rvb]=sv}}}}else{co=new u16(s);for(i2=0;i2>>15-cd[i2]}}}return co};var flt=new u8(288);for(i=0;i<144;++i)flt[i]=8;var i;for(i=144;i<256;++i)flt[i]=9;var i;for(i=256;i<280;++i)flt[i]=7;var i;for(i=280;i<288;++i)flt[i]=8;var i;var fdt=new u8(32);for(i=0;i<32;++i)fdt[i]=5;var i;var flrm=hMap(flt,9,1);var fdrm=hMap(fdt,5,1);var max=function(a){var m=a[0];for(var i2=1;i2m)m=a[i2]}return m};var bits=function(d,p,m){var o=p/8|0;return(d[o]|d[o+1]<<8)>>(p&7)&m};var bits16=function(d,p){var o=p/8|0;return(d[o]|d[o+1]<<8|d[o+2]<<16)>>(p&7)};var shft=function(p){return(p+7)/8|0};var slc=function(v,s,e){if(s==null||s<0)s=0;if(e==null||e>v.length)e=v.length;var n=new(v.BYTES_PER_ELEMENT==2?u16:v.BYTES_PER_ELEMENT==4?u32:u8)(e-s);n.set(v.subarray(s,e));return n};var ec=["unexpected EOF","invalid block type","invalid length/literal","invalid distance","stream finished","no stream handler",,"no callback","invalid UTF-8 data","extra field too long","date not in range 1980-2099","filename too long","stream finishing","invalid zip data"];var err=function(ind,msg,nt){var e=new Error(msg||ec[ind]);e.code=ind;if(Error.captureStackTrace)Error.captureStackTrace(e,err);if(!nt)throw e;return e};var inflt=function(dat,buf,st){var sl=dat.length;if(!sl||st&&st.f&&!st.l)return buf||new u8(0);var noBuf=!buf||st;var noSt=!st||st.i;if(!st)st={};if(!buf)buf=new u8(sl*3);var cbuf=function(l2){var bl=buf.length;if(l2>bl){var nbuf=new u8(Math.max(bl*2,l2));nbuf.set(buf);buf=nbuf}};var final=st.f||0,pos=st.p||0,bt=st.b||0,lm=st.l,dm=st.d,lbt=st.m,dbt=st.n;var tbts=sl*8;do{if(!lm){final=bits(dat,pos,1);var type=bits(dat,pos+1,3);pos+=3;if(!type){var s=shft(pos)+4,l=dat[s-4]|dat[s-3]<<8,t=s+l;if(t>sl){if(noSt)err(0);break}if(noBuf)cbuf(bt+l);buf.set(dat.subarray(s,t),bt);st.b=bt+=l,st.p=pos=t*8,st.f=final;continue}else if(type==1)lm=flrm,dm=fdrm,lbt=9,dbt=5;else if(type==2){var hLit=bits(dat,pos,31)+257,hcLen=bits(dat,pos+10,15)+4;var tl=hLit+bits(dat,pos+5,31)+1;pos+=14;var ldt=new u8(tl);var clt=new u8(19);for(var i2=0;i2>>4;if(s<16){ldt[i2++]=s}else{var c=0,n=0;if(s==16)n=3+bits(dat,pos,3),pos+=2,c=ldt[i2-1];else if(s==17)n=3+bits(dat,pos,7),pos+=3;else if(s==18)n=11+bits(dat,pos,127),pos+=7;while(n--)ldt[i2++]=c}}var lt=ldt.subarray(0,hLit),dt=ldt.subarray(hLit);lbt=max(lt);dbt=max(dt);lm=hMap(lt,lbt,1);dm=hMap(dt,dbt,1)}else err(1);if(pos>tbts){if(noSt)err(0);break}}if(noBuf)cbuf(bt+131072);var lms=(1<>>4;pos+=c&15;if(pos>tbts){if(noSt)err(0);break}if(!c)err(2);if(sym<256)buf[bt++]=sym;else if(sym==256){lpos=pos,lm=null;break}else{var add=sym-254;if(sym>264){var i2=sym-257,b=fleb[i2];add=bits(dat,pos,(1<>>4;if(!d)err(3);pos+=d&15;var dt=fd[dsym];if(dsym>3){var b=fdeb[dsym];dt+=bits16(dat,pos)&(1<tbts){if(noSt)err(0);break}if(noBuf)cbuf(bt+131072);var end=bt+add;for(;bt>3&1)+(flg>>4&1);zs>0;zs-=!d[st++]);return st+(flg&2)};var gzl=function(d){var l=d.length;return(d[l-4]|d[l-3]<<8|d[l-2]<<16|d[l-1]<<24)>>>0};function gunzipSync(data,out){return inflt(data.subarray(gzs(data),-8),out||new u8(gzl(data)))}var td=typeof TextDecoder!="undefined"&&new TextDecoder();var tds=0;try{td.decode(et,{stream:true});tds=1}catch(e){}var gz_default=gunzipSync;var calculate_excerpt_region=(word_positions,excerpt_length)=>{if(word_positions.length===0){return 0}let words=[];for(const word of word_positions){words[word.location]=words[word.location]||0;words[word.location]+=word.balanced_score}if(words.length<=excerpt_length){return 0}let densest=words.slice(0,excerpt_length).reduce((partialSum,a)=>partialSum+a,0);let working_sum=densest;let densest_at=[0];for(let i2=0;i2densest){densest=working_sum;densest_at=[i2]}else if(working_sum===densest&&densest_at[densest_at.length-1]===i2-1){densest_at.push(i2)}}let midpoint=densest_at[Math.floor(densest_at.length/2)];return midpoint};var build_excerpt=(content,start,length,locations,not_before,not_from)=>{let is_zws_delimited=content.includes("\u200B");let fragment_words=[];if(is_zws_delimited){fragment_words=content.split("\u200B")}else{fragment_words=content.split(/[\r\n\s]+/g)}for(let word of locations){if(fragment_words[word]?.startsWith(``)){continue}fragment_words[word]=`${fragment_words[word]} `}let endcap=not_from??fragment_words.length;let startcap=not_before??0;if(endcap-startcapendcap){start=endcap-length}if(start{const anchors=fragment.anchors.filter((a)=>/h\d/i.test(a.element)&&a.text?.length&&/\S/.test(a.text)).sort((a,b)=>a.location-b.location);const results=[];let current_anchor_position=0;let current_anchor={title:fragment.meta["title"],url:fragment.url,weighted_locations:[],locations:[],excerpt:""};const add_result=(end_range)=>{if(current_anchor.locations.length){const relative_weighted_locations=current_anchor.weighted_locations.map((l)=>{return{weight:l.weight,balanced_score:l.balanced_score,location:l.location-current_anchor_position}});const excerpt_start=calculate_excerpt_region(relative_weighted_locations,desired_excerpt_length)+current_anchor_position;const excerpt_length=end_range?Math.min(end_range-excerpt_start,desired_excerpt_length):desired_excerpt_length;current_anchor.excerpt=build_excerpt(fragment.raw_content??"",excerpt_start,excerpt_length,current_anchor.locations,current_anchor_position,end_range);results.push(current_anchor)}};for(let word of fragment.weighted_locations){if(!anchors.length||word.location=anchors[0].location){next_anchor=anchors.shift()}let anchored_url=fragment.url;try{const url_is_fq=/^((https?:)?\/\/)/.test(anchored_url);if(url_is_fq){let fq_url=new URL(anchored_url);fq_url.hash=next_anchor.id;anchored_url=fq_url.toString()}else{if(!/^\//.test(anchored_url)){anchored_url=`/${anchored_url}`}let fq_url=new URL(`https://example.com${anchored_url}`);fq_url.hash=next_anchor.id;anchored_url=fq_url.toString().replace(/^https:\/\/example.com/,"")}}catch(e){console.error(`Pagefind: Couldn't process ${anchored_url} for a search result`)}current_anchor_position=next_anchor.location;current_anchor={title:next_anchor.text,url:anchored_url,anchor:next_anchor,weighted_locations:[word],locations:[word.location],excerpt:""}}}add_result(anchors[0]?.location);return results};var asyncSleep=async(ms=100)=>{return new Promise((r)=>setTimeout(r,ms))};var isBrowser=typeof window!=="undefined"&&typeof document!=="undefined";var PagefindInstance=class{constructor(opts={}){this.version=pagefind_version;this.backend=wasm_bindgen;this.decoder=new TextDecoder("utf-8");this.wasm=null;this.basePath=opts.basePath||"/pagefind/";this.primary=opts.primary||false;if(this.primary&&!opts.basePath){this.initPrimary()}if(/[^\/]$/.test(this.basePath)){this.basePath=`${this.basePath}/`}if(isBrowser&&window?.location?.origin&&this.basePath.startsWith(window.location.origin)){this.basePath=this.basePath.replace(window.location.origin,"")}this.baseUrl=opts.baseUrl||this.defaultBaseUrl();if(!/^(\/|https?:\/\/)/.test(this.baseUrl)){this.baseUrl=`/${this.baseUrl}`}this.indexWeight=opts.indexWeight??1;this.excerptLength=opts.excerptLength??30;this.mergeFilter=opts.mergeFilter??{};this.ranking=opts.ranking;this.highlightParam=opts.highlightParam??null;this.loaded_chunks={};this.loaded_filters={};this.loaded_fragments={};this.raw_ptr=null;this.searchMeta=null;this.languages=null}initPrimary(){if(isBrowser&&typeof import.meta.url!=="undefined"){let derivedBasePath=import.meta.url.match(/^(.*\/)pagefind.js.*$/)?.[1];if(derivedBasePath){this.basePath=derivedBasePath}else{console.warn(["Pagefind couldn't determine the base of the bundle from the import path. Falling back to the default.","Set a basePath option when initialising Pagefind to ignore this message."].join("\n"))}}}defaultBaseUrl(){let default_base=this.basePath.match(/^(.*\/)_?pagefind/)?.[1];return default_base||"/"}async options(options2){const opts=["basePath","baseUrl","indexWeight","excerptLength","mergeFilter","highlightParam","ranking"];for(const[k,v]of Object.entries(options2)){if(k==="mergeFilter"){let filters2=this.stringifyFilters(v);let ptr=await this.getPtr();this.raw_ptr=this.backend.add_synthetic_filter(ptr,filters2)}else if(k==="ranking"){await this.set_ranking(options2.ranking)}else if(opts.includes(k)){if(k==="basePath"&&typeof v==="string")this.basePath=v;if(k==="baseUrl"&&typeof v==="string")this.baseUrl=v;if(k==="indexWeight"&&typeof v==="number")this.indexWeight=v;if(k==="excerptLength"&&typeof v==="number")this.excerptLength=v;if(k==="mergeFilter"&&typeof v==="object")this.mergeFilter=v;if(k==="highlightParam"&&typeof v==="string")this.highlightParam=v}else{console.warn(`Unknown Pagefind option ${k}. Allowed options: [${opts.join(", ")}]`)}}}async enterPlaygroundMode(){let ptr=await this.getPtr();this.raw_ptr=this.backend.enter_playground_mode(ptr)}decompress(data,file="unknown file"){if(this.decoder.decode(data.slice(0,12))==="pagefind_dcd"){return data.slice(12)}data=gz_default(data);if(this.decoder.decode(data.slice(0,12))!=="pagefind_dcd"){console.error(`Decompressing ${file} appears to have failed: Missing signature`);return data}return data.slice(12)}async set_ranking(ranking){if(!ranking)return;let rankingWeights={term_similarity:ranking.termSimilarity??null,page_length:ranking.pageLength??null,term_saturation:ranking.termSaturation??null,term_frequency:ranking.termFrequency??null};let ptr=await this.getPtr();this.raw_ptr=this.backend.set_ranking_weights(ptr,JSON.stringify(rankingWeights))}async init(language,opts){await this.loadEntry();let index=this.findIndex(language);let lang_wasm=index.wasm?index.wasm:"unknown";this.loadedLanguage=language;let resources=[this.loadMeta(index.hash)];if(opts.load_wasm===true){resources.push(this.loadWasm(lang_wasm))}await Promise.all(resources);this.raw_ptr=this.backend.init_pagefind(new Uint8Array(this.searchMeta));if(Object.keys(this.mergeFilter)?.length){let filters2=this.stringifyFilters(this.mergeFilter);let ptr=await this.getPtr();this.raw_ptr=this.backend.add_synthetic_filter(ptr,filters2)}if(this.ranking){await this.set_ranking(this.ranking)}}async loadEntry(){try{let entry_response=await fetch(`${this.basePath}pagefind-entry.json?ts=${Date.now()}`);let entry_json=await entry_response.json();this.languages=entry_json.languages;this.loadedVersion=entry_json.version;this.includeCharacters=entry_json.include_characters??[];if(entry_json.version!==this.version){if(this.primary){console.warn(["Pagefind JS version doesn't match the version in your search index.",`Pagefind JS: ${this.version}. Pagefind index: ${entry_json.version}`,"If you upgraded Pagefind recently, you likely have a cached pagefind.js file.","If you encounter any search errors, try clearing your cache."].join("\n"))}else{console.warn(["Merging a Pagefind index from a different version than the main Pagefind instance.",`Main Pagefind JS: ${this.version}. Merged index (${this.basePath}): ${entry_json.version}`,"If you encounter any search errors, make sure that both sites are running the same version of Pagefind."].join("\n"))}}}catch(e){console.error(`Failed to load Pagefind metadata:
+${e?.toString()}`);throw new Error("Failed to load Pagefind metadata")}}findIndex(language){if(this.languages){let index=this.languages[language];if(index)return index;index=this.languages[language.split("-")[0]];if(index)return index;let topLang=Object.values(this.languages).sort((a,b)=>b.page_count-a.page_count);if(topLang[0])return topLang[0]}throw new Error("Pagefind Error: No language indexes found.")}async loadMeta(index){try{let compressed_resp=await fetch(`${this.basePath}pagefind.${index}.pf_meta`);let compressed_meta=await compressed_resp.arrayBuffer();this.searchMeta=this.decompress(new Uint8Array(compressed_meta),"Pagefind metadata")}catch(e){console.error(`Failed to load the meta index:
+${e?.toString()}`)}}async loadWasm(language){try{const wasm_url=`${this.basePath}wasm.${language}.pagefind`;let compressed_resp=await fetch(wasm_url);let compressed_wasm=await compressed_resp.arrayBuffer();const final_wasm=this.decompress(new Uint8Array(compressed_wasm),"Pagefind WebAssembly");if(!final_wasm){throw new Error("No WASM after decompression")}this.wasm=await this.backend(final_wasm)}catch(e){console.error(`Failed to load the Pagefind WASM:
+${e?.toString()}`);throw new Error(`Failed to load the Pagefind WASM:
+${e?.toString()}`)}}async _loadGenericChunk(url,method){try{let compressed_resp=await fetch(url);let compressed_chunk=await compressed_resp.arrayBuffer();let chunk=this.decompress(new Uint8Array(compressed_chunk),url);let ptr=await this.getPtr();this.raw_ptr=this.backend[method](ptr,chunk)}catch(e){console.error(`Failed to load the index chunk ${url}:
+${e?.toString()}`)}}async loadChunk(hash){if(!this.loaded_chunks[hash]){const url=`${this.basePath}index/${hash}.pf_index`;this.loaded_chunks[hash]=this._loadGenericChunk(url,"load_index_chunk")}return await this.loaded_chunks[hash]}async loadFilterChunk(hash){if(!this.loaded_filters[hash]){const url=`${this.basePath}filter/${hash}.pf_filter`;this.loaded_filters[hash]=this._loadGenericChunk(url,"load_filter_chunk")}return await this.loaded_filters[hash]}async _loadFragment(hash){let compressed_resp=await fetch(`${this.basePath}fragment/${hash}.pf_fragment`);let compressed_fragment=await compressed_resp.arrayBuffer();let fragment=this.decompress(new Uint8Array(compressed_fragment),`Fragment ${hash}`);return JSON.parse(new TextDecoder().decode(fragment))}async loadFragment(hash,weighted_locations=[],search_term){if(!this.loaded_fragments[hash]){this.loaded_fragments[hash]=this._loadFragment(hash)}let fragment=await this.loaded_fragments[hash];fragment.weighted_locations=weighted_locations;fragment.locations=weighted_locations.map((l)=>l.location);if(!fragment.raw_content){fragment.raw_content=fragment.content.replace(//g,">");fragment.content=fragment.content.replace(/\u200B/g,"")}if(!fragment.raw_url){fragment.raw_url=fragment.url}fragment.url=this.processedUrl(fragment.raw_url,search_term);const excerpt_start=calculate_excerpt_region(weighted_locations,this.excerptLength);fragment.excerpt=build_excerpt(fragment.raw_content,excerpt_start,this.excerptLength,fragment.locations);fragment.sub_results=calculate_sub_results(fragment,this.excerptLength);return fragment}fullUrl(raw){if(/^(https?:)?\/\//.test(raw)){return raw}return`${this.baseUrl}/${raw}`.replace(/\/+/g,"/").replace(/^(https?:\/)/,"$1/")}processedUrl(url,search_term){const normalized=this.fullUrl(url);if(this.highlightParam===null){return normalized}let individual_terms=search_term.split(/\s+/);try{let processed=new URL(normalized);for(const term of individual_terms){processed.searchParams.append(this.highlightParam,term)}return processed.toString()}catch(e){try{let processed=new URL(`https://example.com${normalized}`);for(const term of individual_terms){processed.searchParams.append(this.highlightParam,term)}return processed.toString().replace(/^https:\/\/example\.com/,"")}catch(e2){return normalized}}}async getPtr(){while(this.raw_ptr===null){await asyncSleep(50)}if(!this.raw_ptr){console.error("Pagefind: WASM Error (No pointer)");throw new Error("Pagefind: WASM Error (No pointer)")}return this.raw_ptr}stringifyFilters(obj={}){return JSON.stringify(obj)}stringifySorts(obj={}){let sorts=Object.entries(obj);for(let[sort,direction]of sorts){if(sorts.length>1){console.warn(`Pagefind was provided multiple sort options in this search, but can only operate on one. Using the ${sort} sort.`)}if(direction!=="asc"&&direction!=="desc"){console.warn(`Pagefind was provided a sort with unknown direction ${direction}. Supported: [asc, desc]`)}return`${sort}:${direction}`}return``}async filters(){let ptr=await this.getPtr();let filters2=this.backend.request_all_filter_indexes(ptr);let filter_array=JSON.parse(filters2);if(Array.isArray(filter_array)){let filter_chunks=filter_array.filter((v)=>v).map((chunk)=>this.loadFilterChunk(chunk));await Promise.all([...filter_chunks])}ptr=await this.getPtr();let results=this.backend.filters(ptr);return JSON.parse(results)}async preload(term,options2={}){await this.search(term,{...options2,preload:true})}async search(term,options2={}){options2={verbose:false,filters:{},sort:{},...options2};const log=(str)=>{if(options2.verbose)console.log(str)};log(`Starting search on ${this.basePath}`);let start=Date.now();let ptr=await this.getPtr();let filter_only=term===null;term=term??"";let exact_search=/^\s*".+"\s*$/.test(term);if(exact_search){log(`Running an exact search`)}let trueLanguage=null;try{trueLanguage=Intl.getCanonicalLocales(this.loadedLanguage)[0]}catch(err2){}const term_chunks=[];let segments;if(trueLanguage&&typeof Intl.Segmenter!=="undefined"){const segmenter=new Intl.Segmenter(trueLanguage,{granularity:"grapheme"});segments=[...segmenter.segment(term)].map(({segment})=>segment)}else{segments=[...term]}for(const segment of segments){if(this.includeCharacters?.includes(segment)){term_chunks.push(segment)}else if(!/^\p{Pd}|\p{Pe}|\p{Pf}|\p{Pi}|\p{Po}|\p{Ps}$/u.test(segment)){term_chunks.push(segment.toLocaleLowerCase())}}term=term_chunks.join("").replace(/\s{2,}/g," ").trim();log(`Normalized search term to ${term}`);if(!term?.length&&!filter_only){return{results:[],unfilteredResultCount:0,filters:{},totalFilters:{},timings:{preload:Date.now()-start,search:Date.now()-start,total:Date.now()-start}}}let sort_list=this.stringifySorts(options2.sort);log(`Stringified sort to ${sort_list}`);const filter_list=this.stringifyFilters(options2.filters);log(`Stringified filters to ${filter_list}`);let index_resp=this.backend.request_indexes(ptr,term);let index_array=JSON.parse(index_resp);let filter_resp=this.backend.request_filter_indexes(ptr,filter_list);let filter_array=JSON.parse(filter_resp);let chunks=index_array.filter((v)=>v).map((chunk)=>this.loadChunk(chunk));let filter_chunks=filter_array.filter((v)=>v).map((chunk)=>this.loadFilterChunk(chunk));await Promise.all([...chunks,...filter_chunks]);log(`Loaded necessary chunks to run search`);if(options2.preload){log(`Preload \u2014 bailing out of search operation now.`);return null}ptr=await this.getPtr();let searchStart=Date.now();let result=this.backend.search(ptr,term,filter_list,sort_list,exact_search);log(`Got the raw search result: ${result}`);let{filtered_counts,total_counts,results,unfiltered_total,search_keywords}=JSON.parse(result);let resultsInterface=results.map((result2)=>{let weighted_locations=result2.l.map((l)=>{let loc={weight:l.w/24,balanced_score:l.s,location:l.l};if(l.v){loc.verbose={word_string:l.v.ws,length_bonus:l.v.lb}}return loc});let locations=weighted_locations.map((l)=>l.location);let res={id:result2.p,score:result2.s*this.indexWeight,words:locations,data:async()=>await this.loadFragment(result2.p,weighted_locations,term)};if(result2.params){res.params={document_length:result2.params.dl,average_page_length:result2.params.apl,total_pages:result2.params.tp}}if(result2.scores){res.scores=result2.scores.map((r)=>{return{search_term:r.w,idf:r.idf,saturating_tf:r.b_tf,raw_tf:r.r_tf,pagefind_tf:r.p_tf,score:r.s,params:{weighted_term_frequency:r.params.w_tf,pages_containing_term:r.params.pct,length_bonus:r.params.lb}}})}return res});const searchTime=Date.now()-searchStart;const realTime=Date.now()-start;log(`Found ${results.length} result${results.length == 1 ? "" : "s"} for "${term}" in ${Date.now() - searchStart}ms (${Date.now() - start}ms realtime)`);let response={results:resultsInterface,unfilteredResultCount:unfiltered_total,filters:filtered_counts,totalFilters:total_counts,timings:{preload:realTime-searchTime,search:searchTime,total:realTime}};if(search_keywords){response.search_keywords=search_keywords}return response}};var Pagefind=class{constructor(options2={}){this.backend=wasm_bindgen;this.primaryLanguage="unknown";this.searchID=0;this.primary=new PagefindInstance({...options2,primary:true});this.instances=[this.primary];this.init(options2?.language)}async options(options2){await this.primary.options(options2)}async enterPlaygroundMode(){await this.primary.enterPlaygroundMode()}async init(overrideLanguage){if(isBrowser&&document?.querySelector){const langCode=document.querySelector("html")?.getAttribute("lang")||"unknown";this.primaryLanguage=langCode.toLocaleLowerCase()}await this.primary.init(overrideLanguage?overrideLanguage:this.primaryLanguage,{load_wasm:true})}async mergeIndex(indexPath,options2={}){if(this.primary.basePath.startsWith(indexPath)){console.warn(`Skipping mergeIndex ${indexPath} that appears to be the same as the primary index (${this.primary.basePath})`);return}let newInstance=new PagefindInstance({primary:false,basePath:indexPath});this.instances.push(newInstance);while(this.primary.wasm===null){await asyncSleep(50)}await newInstance.init(options2.language||this.primaryLanguage,{load_wasm:false});delete options2["language"];await newInstance.options(options2)}mergeFilters(filters2){const merged={};for(const searchFilter of filters2){for(const[filterKey,values]of Object.entries(searchFilter)){if(!merged[filterKey]){merged[filterKey]=values;continue}else{const filter=merged[filterKey];for(const[valueKey,count]of Object.entries(values)){filter[valueKey]=(filter[valueKey]||0)+count}}}}return merged}async filters(){let filters2=await Promise.all(this.instances.map((i2)=>i2.filters()));return this.mergeFilters(filters2)}async preload(term,options2={}){await Promise.all(this.instances.map((i2)=>i2.preload(term,options2)))}async debouncedSearch(term,options2,debounceTimeoutMs){const thisSearchID=++this.searchID;this.preload(term,options2);await asyncSleep(debounceTimeoutMs);if(thisSearchID!==this.searchID){return null}const searchResult=await this.search(term,options2);if(thisSearchID!==this.searchID){return null}return searchResult}async search(term,options2={}){let search2=await Promise.all(this.instances.map((i2)=>i2.search(term,options2)));const filters2=this.mergeFilters(search2.map((s)=>s.filters));const totalFilters=this.mergeFilters(search2.map((s)=>s.totalFilters));const results=search2.map((s)=>s.results).flat().sort((a,b)=>b.score-a.score);const timings=search2.map((s)=>s.timings);const unfilteredResultCount=search2.reduce((sum,s)=>sum+s.unfilteredResultCount,0);let response={results,unfilteredResultCount,filters:filters2,totalFilters,timings};if(search2[0].search_keywords){response.search_keywords=search2[0].search_keywords}return response}};var pagefind=void 0;var initial_options=void 0;var init_pagefind=()=>{if(!pagefind){pagefind=new Pagefind(initial_options??{})}};var options=async(new_options)=>{if(pagefind){await pagefind.options(new_options)}else{initial_options=new_options}};var init=async()=>{init_pagefind()};var destroy=async()=>{pagefind=void 0;initial_options=void 0};var mergeIndex=async(indexPath,options2)=>{init_pagefind();return await pagefind.mergeIndex(indexPath,options2)};var search=async(term,options2)=>{init_pagefind();return await pagefind.search(term,options2)};var debouncedSearch=async(term,options2,debounceTimeoutMs=300)=>{init_pagefind();return await pagefind.debouncedSearch(term,options2,debounceTimeoutMs)};var preload=async(term,options2)=>{init_pagefind();return await pagefind.preload(term,options2)};var filters=async()=>{init_pagefind();return await pagefind.filters()};export{debouncedSearch,destroy,filters,init,mergeIndex,options,preload,search}
\ No newline at end of file
diff --git a/docs/public/_pagefind/wasm.en.pagefind b/docs/public/_pagefind/wasm.en.pagefind
new file mode 100644
index 0000000..e49ad34
Binary files /dev/null and b/docs/public/_pagefind/wasm.en.pagefind differ
diff --git a/docs/public/_pagefind/wasm.unknown.pagefind b/docs/public/_pagefind/wasm.unknown.pagefind
new file mode 100644
index 0000000..e3f5520
Binary files /dev/null and b/docs/public/_pagefind/wasm.unknown.pagefind differ
diff --git a/docs/public/llms-full.txt b/docs/public/llms-full.txt
new file mode 100644
index 0000000..55b6329
--- /dev/null
+++ b/docs/public/llms-full.txt
@@ -0,0 +1,121 @@
+# AltStack Docs — Complete Page Index
+
+> The World's First Sovereign Infrastructure Engine. Self-hosting guides that actually work.
+
+AltStack Docs (https://docs.thealtstack.com) is the documentation hub for The AltStack — a curated directory of 400+ open source alternatives to expensive proprietary software. Every guide includes tested Docker Compose configs, honest verdicts, and zero filler.
+
+---
+
+## Home
+- https://docs.thealtstack.com
+
+## Why These Docs Exist
+- https://docs.thealtstack.com/why
+
+---
+
+## Quick Start
+
+- What is Self-Hosting: https://docs.thealtstack.com/quick-start/what-is-self-hosting
+- Your First Deployment: https://docs.thealtstack.com/quick-start/first-deployment
+- Choosing a Server: https://docs.thealtstack.com/quick-start/choosing-a-server
+- Setting Up a Reverse Proxy: https://docs.thealtstack.com/quick-start/reverse-proxy
+- The AltStack Starter Kit: https://docs.thealtstack.com/quick-start/starter-kit
+
+---
+
+## Deploy Guides (65+ Tools)
+
+- Activepieces: https://docs.thealtstack.com/deploy/activepieces
+- Affine: https://docs.thealtstack.com/deploy/affine
+- Akaunting: https://docs.thealtstack.com/deploy/akaunting
+- AppFlowy: https://docs.thealtstack.com/deploy/appflowy
+- Appwrite: https://docs.thealtstack.com/deploy/appwrite
+- Authentik: https://docs.thealtstack.com/deploy/authentik
+- Bitwarden: https://docs.thealtstack.com/deploy/bitwarden
+- Cal.com: https://docs.thealtstack.com/deploy/calcom
+- Chaskiq: https://docs.thealtstack.com/deploy/chaskiq
+- Coder: https://docs.thealtstack.com/deploy/coder
+- Continue.dev: https://docs.thealtstack.com/deploy/continue-dev
+- Coolify: https://docs.thealtstack.com/deploy/coolify
+- DeepSeek: https://docs.thealtstack.com/deploy/deepseek
+- Documenso: https://docs.thealtstack.com/deploy/documenso
+- Dokku: https://docs.thealtstack.com/deploy/dokku
+- ERPNext: https://docs.thealtstack.com/deploy/erpnext
+- Flux: https://docs.thealtstack.com/deploy/flux
+- FreeCAD: https://docs.thealtstack.com/deploy/freecad
+- Gemma: https://docs.thealtstack.com/deploy/gemma
+- GIMP: https://docs.thealtstack.com/deploy/gimp
+- GlitchTip: https://docs.thealtstack.com/deploy/glitchtip
+- GPT4All: https://docs.thealtstack.com/deploy/gpt4all
+- Hunyuan Video: https://docs.thealtstack.com/deploy/hunyuan-video
+- Jitsi Meet: https://docs.thealtstack.com/deploy/jitsi-meet
+- Jitsu: https://docs.thealtstack.com/deploy/jitsu
+- Kdenlive: https://docs.thealtstack.com/deploy/kdenlive
+- KeePassXC: https://docs.thealtstack.com/deploy/keepassxc
+- Keycloak: https://docs.thealtstack.com/deploy/keycloak
+- Krita: https://docs.thealtstack.com/deploy/krita
+- LibreCAD: https://docs.thealtstack.com/deploy/librecad
+- Listmonk: https://docs.thealtstack.com/deploy/listmonk
+- Llama: https://docs.thealtstack.com/deploy/llama
+- Matomo: https://docs.thealtstack.com/deploy/matomo
+- Mattermost: https://docs.thealtstack.com/deploy/mattermost
+- Mautic: https://docs.thealtstack.com/deploy/mautic
+- Medusa: https://docs.thealtstack.com/deploy/medusa
+- Metabase: https://docs.thealtstack.com/deploy/metabase
+- MinIO: https://docs.thealtstack.com/deploy/minio
+- Mistral: https://docs.thealtstack.com/deploy/mistral
+- Mixpost: https://docs.thealtstack.com/deploy/mixpost
+- Mochi-1: https://docs.thealtstack.com/deploy/mochi-1
+- n8n: https://docs.thealtstack.com/deploy/n8n
+- Odoo: https://docs.thealtstack.com/deploy/odoo
+- Ollama: https://docs.thealtstack.com/deploy/ollama
+- ONLYOFFICE: https://docs.thealtstack.com/deploy/onlyoffice
+- OrangeHRM: https://docs.thealtstack.com/deploy/orangehrm
+- Outline: https://docs.thealtstack.com/deploy/outline
+- Penpot: https://docs.thealtstack.com/deploy/penpot
+- Plane: https://docs.thealtstack.com/deploy/plane
+- Plausible: https://docs.thealtstack.com/deploy/plausible
+- PocketBase: https://docs.thealtstack.com/deploy/pocketbase
+- Postal: https://docs.thealtstack.com/deploy/postal
+- PostHog: https://docs.thealtstack.com/deploy/posthog
+- Qwen: https://docs.thealtstack.com/deploy/qwen
+- Rocket.Chat: https://docs.thealtstack.com/deploy/rocketchat
+- SigNoz: https://docs.thealtstack.com/deploy/signoz
+- Stable Diffusion: https://docs.thealtstack.com/deploy/stable-diffusion
+- Supabase: https://docs.thealtstack.com/deploy/supabase
+- Superset: https://docs.thealtstack.com/deploy/superset
+- Tabby: https://docs.thealtstack.com/deploy/tabby
+- Taiga: https://docs.thealtstack.com/deploy/taiga
+- Twenty: https://docs.thealtstack.com/deploy/twenty
+- Uptime Kuma: https://docs.thealtstack.com/deploy/uptime-kuma
+- Vaultwarden: https://docs.thealtstack.com/deploy/vaultwarden
+- Zammad: https://docs.thealtstack.com/deploy/zammad
+
+---
+
+## Curated Stacks
+
+- Bootstrapper Stack: https://docs.thealtstack.com/stacks/bootstrapper
+- Designer Stack: https://docs.thealtstack.com/stacks/designer
+- DevOps Stack: https://docs.thealtstack.com/stacks/devops
+- Privacy Stack: https://docs.thealtstack.com/stacks/privacy
+- AI-First Stack: https://docs.thealtstack.com/stacks/ai-first
+
+---
+
+## Concepts
+
+- Docker Basics: https://docs.thealtstack.com/concepts/docker-basics
+- Reverse Proxies: https://docs.thealtstack.com/concepts/reverse-proxies
+- SSL/TLS: https://docs.thealtstack.com/concepts/ssl-tls
+- Backups: https://docs.thealtstack.com/concepts/backups
+
+---
+
+## Related
+
+- Main Directory (400+ tools): https://thealtstack.com
+- AI Model Matrix: https://thealtstack.com/ai
+- Savings Calculator: https://thealtstack.com/calculator
+- About: https://thealtstack.com/about
diff --git a/docs/public/llms.txt b/docs/public/llms.txt
new file mode 100644
index 0000000..4cdc525
--- /dev/null
+++ b/docs/public/llms.txt
@@ -0,0 +1,41 @@
+# AltStack Docs
+
+> The World's First Sovereign Infrastructure Engine. Self-hosting guides that actually work.
+
+AltStack Docs (https://docs.thealtstack.com) provides step-by-step deployment guides for 65+ open source tools. Every guide includes tested Docker Compose configs, honest verdicts, and zero filler.
+
+## Sections
+
+- Quick Start: https://docs.thealtstack.com/quick-start
+- Deploy Guides (65+ tools): https://docs.thealtstack.com/deploy
+- Curated Stacks: https://docs.thealtstack.com/stacks
+- Concepts: https://docs.thealtstack.com/concepts
+- Why These Docs Exist: https://docs.thealtstack.com/why
+
+## Quick Start Path
+
+- What is Self-Hosting: https://docs.thealtstack.com/quick-start/what-is-self-hosting
+- First Deployment: https://docs.thealtstack.com/quick-start/first-deployment
+- Choosing a Server: https://docs.thealtstack.com/quick-start/choosing-a-server
+- Reverse Proxy Setup: https://docs.thealtstack.com/quick-start/reverse-proxy
+- Starter Kit: https://docs.thealtstack.com/quick-start/starter-kit
+
+## Curated Stacks
+
+- Bootstrapper Stack: https://docs.thealtstack.com/stacks/bootstrapper
+- Designer Stack: https://docs.thealtstack.com/stacks/designer
+- DevOps Stack: https://docs.thealtstack.com/stacks/devops
+- Privacy Stack: https://docs.thealtstack.com/stacks/privacy
+- AI-First Stack: https://docs.thealtstack.com/stacks/ai-first
+
+## Concepts
+
+- Docker Basics: https://docs.thealtstack.com/concepts/docker-basics
+- Reverse Proxies: https://docs.thealtstack.com/concepts/reverse-proxies
+- SSL/TLS: https://docs.thealtstack.com/concepts/ssl-tls
+- Backups: https://docs.thealtstack.com/concepts/backups
+
+## Optional
+
+- Full tool list: https://docs.thealtstack.com/llms-full.txt
+- Main directory: https://thealtstack.com
diff --git a/docs/public/robots.txt b/docs/public/robots.txt
new file mode 100644
index 0000000..fb4daff
--- /dev/null
+++ b/docs/public/robots.txt
@@ -0,0 +1,4 @@
+User-agent: *
+Allow: /
+
+Sitemap: https://docs.thealtstack.com/sitemap.xml
diff --git a/docs/tailwind.config.js b/docs/tailwind.config.js
new file mode 100644
index 0000000..bd49f08
--- /dev/null
+++ b/docs/tailwind.config.js
@@ -0,0 +1,18 @@
+const path = require('path')
+
+
+/** @type {import('tailwindcss').Config} */
+module.exports = {
+ content: [
+ path.join(__dirname, 'app/**/*.{js,jsx,ts,tsx,md,mdx}'),
+ path.join(__dirname, 'content/**/*.{js,jsx,ts,tsx,md,mdx}'),
+ path.join(__dirname, 'components/**/*.{js,jsx,ts,tsx,md,mdx}'),
+ path.join(__dirname, 'mdx-components.tsx'),
+ path.join(__dirname, 'node_modules/nextra-theme-docs/dist/**/*.{js,jsx,ts,tsx}'),
+ ],
+ theme: {
+ extend: {},
+ },
+ plugins: [],
+ darkMode: 'class',
+}
diff --git a/docs/tsconfig.json b/docs/tsconfig.json
new file mode 100644
index 0000000..f5be63b
--- /dev/null
+++ b/docs/tsconfig.json
@@ -0,0 +1,42 @@
+{
+ "compilerOptions": {
+ "target": "ES2017",
+ "lib": [
+ "dom",
+ "dom.iterable",
+ "esnext"
+ ],
+ "allowJs": true,
+ "skipLibCheck": true,
+ "strict": false,
+ "noEmit": true,
+ "esModuleInterop": true,
+ "module": "esnext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "jsx": "preserve",
+ "incremental": true,
+ "plugins": [
+ {
+ "name": "next"
+ }
+ ],
+ "paths": {
+ "@/*": [
+ "./*"
+ ]
+ }
+ },
+ "include": [
+ "**/*.mdx",
+ "**/*.ts",
+ "**/*.tsx",
+ "next-env.d.ts",
+ ".next/types/**/*.ts",
+ ".next/dev/types/**/*.ts"
+ ],
+ "exclude": [
+ "node_modules"
+ ]
+}