commit 2a0ac1b1072ca8cb723817f06680a8c0a8c7ea92 Author: AltStack Bot Date: Wed Feb 25 22:36:27 2026 +0530 Initialize public data and docs repository diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2226e7a --- /dev/null +++ b/.gitignore @@ -0,0 +1,35 @@ +# Dependencies +node_modules +/.pnp +.pnp.js + +# Testing +/coverage + +# Next.js +.next/ +out/ + +# Production +/build + +# Misc +.DS_Store +*.pem +*.key + +# Debug +npm-debug.log* +yarn-debug.log* +pnpm-debug.log* + +# Local env files +.env*.local +*.bak +*_keys.json +cors.json + +# Vercel build artifacts +.vercel/output/ +.vercel/builders/ +build.log diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..af45c66 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,36 @@ +# Contributing to The Alt Stack Data & Docs 🥞 + +Thank you for your interest in contributing! Our community helps maintain the accuracy and quality of our open source data and documentation. + +## Types of Contributions + +1. **New Tools:** Adding an alternative to our dataset. +2. **Data Fixes:** Correcting URLs, pricing, descriptions, or pros/cons. +3. **Deployment Guides:** Writing or updating guides in `docs/app/deploy/`. + +## 1. Modifying Data (`/data/`) + +Our core data is stored in `data/tools.json`. + +1. Find the parent SaaS tool (e.g., "Slack") in the JSON structure. +2. Add or modify the alternative under the `alternatives` array. +3. Ensure you follow the structure defined in `data/schema/types.ts`. +4. Run validation (if applicable locally) before committing. + +## 2. Modifying Documentation (`/docs/`) + +Our documentation is built with Next.js and Nextra. All pages are under `docs/app/`. + +1. Navigate to the appropriate folder (e.g., `docs/app/deploy` for guides). +2. Create or edit the `.mdx` file. +3. If creating a new page, make sure to add it to the adjacent `_meta.ts` file so it appears in the sidebar! + +## Pull Request Process + +1. Fork the repository and create your feature branch: `git checkout -b fix/name-of-tool-data` +2. Make your targeted changes. **Keep PRs small** (e.g., fix one tool, don't change 50 things at once). +3. Commit your changes with a descriptive message: `fix(data): update RocketChat pricing link` +4. Open a Pull Request against our `main` branch. +5. A maintainer will review your PR. We may request changes or ask clarifying questions. + +By contributing to this repository, you agree that your data and documentation contributions will be licensed under the CC BY 4.0 license. diff --git a/README.md b/README.md new file mode 100644 index 0000000..488a4b2 --- /dev/null +++ b/README.md @@ -0,0 +1,39 @@ +# The Alt Stack Data & Docs 🥞 + +Welcome to the public repository for **The Alt Stack** context and data! + +This repository contains: +1. **The Alt Stack Dataset** (`/data`): Our curated JSON data of open source alternatives to popular SaaS products. +2. **The Documentation Site** (`/docs`): The source code for [docs.thealtstack.com](https://docs.thealtstack.com) containing 60+ deployment guides, concepts, and more. + +If you are looking for the main application (the UI, comparison engine, etc.), please note that the core application is closed-source. We open-source the data and documentation so the community can contribute to keeping the alternative software ecosystem accurate and well-documented. + +## 🤝 Contributing + +We welcome community contributions! This is the fastest way to get a new tool added or a deployment guide updated. + +Before submitting a pull request, please read our [Contributing Guidelines](CONTRIBUTING.md). + +### What you can contribute: +- **New Tools:** Submit an addition to `data/tools.json`. +- **Data Corrections:** Fix broken links, update pricing, or correct pros/cons. +- **Deployment Guides:** Write or update a self-hosting guide in `docs/app/deploy/`. +- **Typo Fixes:** Help us keep the documentation clean. + +### What NOT to contribute here: +- Feature requests for the main application UI. +- Bug reports for the closed-source platform (use the contact form on the main site). + +## 🗄️ Working with the Data + +Our core dataset lives in `data/tools.json` and follows the TypeScript schema defined in `data/schema/types.ts`. + +If you're building a project that references our data, you are welcome to consume it directly from this repository! + +## 📄 License + +This repository uses a dual-license model: +* **Documentation & Data** (`/docs/**/*.mdx`, `/data/**/*.json`): Creative Commons Attribution 4.0 International ([CC BY 4.0](https://creativecommons.org/licenses/by/4.0/)) +* **Scripts & Code** (`/scripts`, `.ts`/`.js` files): [Apache License 2.0](LICENSE) + +*All tool logos in `assets/logos/` are the property of their respective trademark holders and are used for identification purposes only.* diff --git a/assets/logos/1password.svg b/assets/logos/1password.svg new file mode 100644 index 0000000..4057040 --- /dev/null +++ b/assets/logos/1password.svg @@ -0,0 +1 @@ +1Password \ No newline at end of file diff --git a/assets/logos/appwrite.svg b/assets/logos/appwrite.svg new file mode 100644 index 0000000..f223307 --- /dev/null +++ b/assets/logos/appwrite.svg @@ -0,0 +1 @@ +Appwrite \ No newline at end of file diff --git a/assets/logos/auth0.svg b/assets/logos/auth0.svg new file mode 100644 index 0000000..85ac633 --- /dev/null +++ b/assets/logos/auth0.svg @@ -0,0 +1 @@ +Auth0 \ No newline at end of file diff --git a/assets/logos/autocad.svg b/assets/logos/autocad.svg new file mode 100644 index 0000000..d1c4e2a --- /dev/null +++ b/assets/logos/autocad.svg @@ -0,0 +1 @@ +Autodesk \ No newline at end of file diff --git a/assets/logos/bitwarden.svg b/assets/logos/bitwarden.svg new file mode 100644 index 0000000..65b8513 --- /dev/null +++ b/assets/logos/bitwarden.svg @@ -0,0 +1 @@ +Bitwarden \ No newline at end of file diff --git a/assets/logos/calcom.svg b/assets/logos/calcom.svg new file mode 100644 index 0000000..a24d70a --- /dev/null +++ b/assets/logos/calcom.svg @@ -0,0 +1 @@ +Cal.com \ No newline at end of file diff --git a/assets/logos/calendly.svg b/assets/logos/calendly.svg new file mode 100644 index 0000000..a7d771e --- /dev/null +++ b/assets/logos/calendly.svg @@ -0,0 +1 @@ +Calendly \ No newline at end of file diff --git a/assets/logos/codespaces.svg b/assets/logos/codespaces.svg new file mode 100644 index 0000000..775adcc --- /dev/null +++ b/assets/logos/codespaces.svg @@ -0,0 +1 @@ +GitHub \ No newline at end of file diff --git a/assets/logos/confluence.svg b/assets/logos/confluence.svg new file mode 100644 index 0000000..2daccf2 --- /dev/null +++ b/assets/logos/confluence.svg @@ -0,0 +1 @@ +Confluence \ No newline at end of file diff --git a/assets/logos/dashlane.svg b/assets/logos/dashlane.svg new file mode 100644 index 0000000..16e0fbb --- /dev/null +++ b/assets/logos/dashlane.svg @@ -0,0 +1 @@ +Dashlane \ No newline at end of file diff --git a/assets/logos/datadog.svg b/assets/logos/datadog.svg new file mode 100644 index 0000000..245bfb5 --- /dev/null +++ b/assets/logos/datadog.svg @@ -0,0 +1 @@ +Datadog \ No newline at end of file diff --git a/assets/logos/falcon.svg b/assets/logos/falcon.svg new file mode 100644 index 0000000..17066fd --- /dev/null +++ b/assets/logos/falcon.svg @@ -0,0 +1 @@ +Technology Innovation Institute diff --git a/assets/logos/figma.svg b/assets/logos/figma.svg new file mode 100644 index 0000000..8b2e765 --- /dev/null +++ b/assets/logos/figma.svg @@ -0,0 +1 @@ +Figma \ No newline at end of file diff --git a/assets/logos/firebase.svg b/assets/logos/firebase.svg new file mode 100644 index 0000000..f77fafc --- /dev/null +++ b/assets/logos/firebase.svg @@ -0,0 +1 @@ +Firebase \ No newline at end of file diff --git a/assets/logos/flux.svg b/assets/logos/flux.svg new file mode 100644 index 0000000..035d74a --- /dev/null +++ b/assets/logos/flux.svg @@ -0,0 +1 @@ +Flux diff --git a/assets/logos/freecad.svg b/assets/logos/freecad.svg new file mode 100644 index 0000000..936fe68 --- /dev/null +++ b/assets/logos/freecad.svg @@ -0,0 +1 @@ +FreeCAD \ No newline at end of file diff --git a/assets/logos/gemma.svg b/assets/logos/gemma.svg new file mode 100644 index 0000000..802a934 --- /dev/null +++ b/assets/logos/gemma.svg @@ -0,0 +1 @@ +Gemma diff --git a/assets/logos/gimp.svg b/assets/logos/gimp.svg new file mode 100644 index 0000000..db0b2e6 --- /dev/null +++ b/assets/logos/gimp.svg @@ -0,0 +1 @@ +GIMP \ No newline at end of file diff --git a/assets/logos/github-copilot.svg b/assets/logos/github-copilot.svg new file mode 100644 index 0000000..8716d6f --- /dev/null +++ b/assets/logos/github-copilot.svg @@ -0,0 +1 @@ +GitHub Copilot \ No newline at end of file diff --git a/assets/logos/google-analytics.svg b/assets/logos/google-analytics.svg new file mode 100644 index 0000000..d5a9dcd --- /dev/null +++ b/assets/logos/google-analytics.svg @@ -0,0 +1 @@ +Google Analytics \ No newline at end of file diff --git a/assets/logos/grok.svg b/assets/logos/grok.svg new file mode 100644 index 0000000..ac7c94c --- /dev/null +++ b/assets/logos/grok.svg @@ -0,0 +1 @@ +Grok diff --git a/assets/logos/hootsuite.svg b/assets/logos/hootsuite.svg new file mode 100644 index 0000000..098fb8d --- /dev/null +++ b/assets/logos/hootsuite.svg @@ -0,0 +1 @@ +Hootsuite \ No newline at end of file diff --git a/assets/logos/intercom.svg b/assets/logos/intercom.svg new file mode 100644 index 0000000..0a5b74c --- /dev/null +++ b/assets/logos/intercom.svg @@ -0,0 +1 @@ +Intercom \ No newline at end of file diff --git a/assets/logos/jira.svg b/assets/logos/jira.svg new file mode 100644 index 0000000..961caaf --- /dev/null +++ b/assets/logos/jira.svg @@ -0,0 +1 @@ +Jira \ No newline at end of file diff --git a/assets/logos/jitsi-meet.svg b/assets/logos/jitsi-meet.svg new file mode 100644 index 0000000..673bbbd --- /dev/null +++ b/assets/logos/jitsi-meet.svg @@ -0,0 +1 @@ +Jitsi \ No newline at end of file diff --git a/assets/logos/kdenlive.svg b/assets/logos/kdenlive.svg new file mode 100644 index 0000000..0392016 --- /dev/null +++ b/assets/logos/kdenlive.svg @@ -0,0 +1 @@ +Kdenlive \ No newline at end of file diff --git a/assets/logos/keepassxc.svg b/assets/logos/keepassxc.svg new file mode 100644 index 0000000..f679938 --- /dev/null +++ b/assets/logos/keepassxc.svg @@ -0,0 +1 @@ +KeePassXC \ No newline at end of file diff --git a/assets/logos/krita.svg b/assets/logos/krita.svg new file mode 100644 index 0000000..ca5590d --- /dev/null +++ b/assets/logos/krita.svg @@ -0,0 +1 @@ +Krita \ No newline at end of file diff --git a/assets/logos/mailchimp.svg b/assets/logos/mailchimp.svg new file mode 100644 index 0000000..0bfa8f5 --- /dev/null +++ b/assets/logos/mailchimp.svg @@ -0,0 +1 @@ +MailChimp \ No newline at end of file diff --git a/assets/logos/mailgun.svg b/assets/logos/mailgun.svg new file mode 100644 index 0000000..7253b76 --- /dev/null +++ b/assets/logos/mailgun.svg @@ -0,0 +1 @@ +Mailgun \ No newline at end of file diff --git a/assets/logos/matomo.svg b/assets/logos/matomo.svg new file mode 100644 index 0000000..bcff636 --- /dev/null +++ b/assets/logos/matomo.svg @@ -0,0 +1 @@ +Matomo \ No newline at end of file diff --git a/assets/logos/mattermost.svg b/assets/logos/mattermost.svg new file mode 100644 index 0000000..0e5fc0b --- /dev/null +++ b/assets/logos/mattermost.svg @@ -0,0 +1 @@ +Mattermost \ No newline at end of file diff --git a/assets/logos/meta.svg b/assets/logos/meta.svg new file mode 100644 index 0000000..3ec38d1 --- /dev/null +++ b/assets/logos/meta.svg @@ -0,0 +1 @@ +Meta diff --git a/assets/logos/minio.svg b/assets/logos/minio.svg new file mode 100644 index 0000000..0a4e35d --- /dev/null +++ b/assets/logos/minio.svg @@ -0,0 +1 @@ +MinIO \ No newline at end of file diff --git a/assets/logos/mistral.svg b/assets/logos/mistral.svg new file mode 100644 index 0000000..7c5e604 --- /dev/null +++ b/assets/logos/mistral.svg @@ -0,0 +1 @@ +Mistral diff --git a/assets/logos/n8n.svg b/assets/logos/n8n.svg new file mode 100644 index 0000000..c15b9c7 --- /dev/null +++ b/assets/logos/n8n.svg @@ -0,0 +1 @@ +n8n \ No newline at end of file diff --git a/assets/logos/notion.svg b/assets/logos/notion.svg new file mode 100644 index 0000000..86ee23f --- /dev/null +++ b/assets/logos/notion.svg @@ -0,0 +1 @@ +Notion \ No newline at end of file diff --git a/assets/logos/odoo.svg b/assets/logos/odoo.svg new file mode 100644 index 0000000..8813c31 --- /dev/null +++ b/assets/logos/odoo.svg @@ -0,0 +1 @@ +Odoo \ No newline at end of file diff --git a/assets/logos/okta.svg b/assets/logos/okta.svg new file mode 100644 index 0000000..455851d --- /dev/null +++ b/assets/logos/okta.svg @@ -0,0 +1 @@ +Okta \ No newline at end of file diff --git a/assets/logos/ollama.svg b/assets/logos/ollama.svg new file mode 100644 index 0000000..ee042d0 --- /dev/null +++ b/assets/logos/ollama.svg @@ -0,0 +1 @@ +Ollama \ No newline at end of file diff --git a/assets/logos/onlyoffice.svg b/assets/logos/onlyoffice.svg new file mode 100644 index 0000000..4a609be --- /dev/null +++ b/assets/logos/onlyoffice.svg @@ -0,0 +1 @@ +ONLYOFFICE \ No newline at end of file diff --git a/assets/logos/penpot.svg b/assets/logos/penpot.svg new file mode 100644 index 0000000..0ac8fba --- /dev/null +++ b/assets/logos/penpot.svg @@ -0,0 +1 @@ +Penpot \ No newline at end of file diff --git a/assets/logos/phi.svg b/assets/logos/phi.svg new file mode 100644 index 0000000..584d7b5 --- /dev/null +++ b/assets/logos/phi.svg @@ -0,0 +1 @@ +Azure diff --git a/assets/logos/plane.svg b/assets/logos/plane.svg new file mode 100644 index 0000000..b8b0100 --- /dev/null +++ b/assets/logos/plane.svg @@ -0,0 +1 @@ +Plane \ No newline at end of file diff --git a/assets/logos/plausible.svg b/assets/logos/plausible.svg new file mode 100644 index 0000000..b70c5ef --- /dev/null +++ b/assets/logos/plausible.svg @@ -0,0 +1 @@ +Plausible Analytics \ No newline at end of file diff --git a/assets/logos/pocketbase.svg b/assets/logos/pocketbase.svg new file mode 100644 index 0000000..005f76d --- /dev/null +++ b/assets/logos/pocketbase.svg @@ -0,0 +1 @@ +PocketBase \ No newline at end of file diff --git a/assets/logos/posthog.svg b/assets/logos/posthog.svg new file mode 100644 index 0000000..445259b --- /dev/null +++ b/assets/logos/posthog.svg @@ -0,0 +1 @@ +PostHog \ No newline at end of file diff --git a/assets/logos/quickbooks.svg b/assets/logos/quickbooks.svg new file mode 100644 index 0000000..39f1009 --- /dev/null +++ b/assets/logos/quickbooks.svg @@ -0,0 +1 @@ +QuickBooks \ No newline at end of file diff --git a/assets/logos/qwen.svg b/assets/logos/qwen.svg new file mode 100644 index 0000000..24d11f3 --- /dev/null +++ b/assets/logos/qwen.svg @@ -0,0 +1 @@ +Qwen diff --git a/assets/logos/rocket-chat.svg b/assets/logos/rocket-chat.svg new file mode 100644 index 0000000..334988c --- /dev/null +++ b/assets/logos/rocket-chat.svg @@ -0,0 +1 @@ +Rocket.Chat \ No newline at end of file diff --git a/assets/logos/sap.svg b/assets/logos/sap.svg new file mode 100644 index 0000000..b8c4f22 --- /dev/null +++ b/assets/logos/sap.svg @@ -0,0 +1 @@ +SAP \ No newline at end of file diff --git a/assets/logos/sentry.svg b/assets/logos/sentry.svg new file mode 100644 index 0000000..0b47057 --- /dev/null +++ b/assets/logos/sentry.svg @@ -0,0 +1 @@ +Sentry \ No newline at end of file diff --git a/assets/logos/shopify.svg b/assets/logos/shopify.svg new file mode 100644 index 0000000..3db6034 --- /dev/null +++ b/assets/logos/shopify.svg @@ -0,0 +1 @@ +Shopify \ No newline at end of file diff --git a/assets/logos/stability.svg b/assets/logos/stability.svg new file mode 100644 index 0000000..6276dc7 --- /dev/null +++ b/assets/logos/stability.svg @@ -0,0 +1 @@ +Stability diff --git a/assets/logos/supabase.svg b/assets/logos/supabase.svg new file mode 100644 index 0000000..b9760e7 --- /dev/null +++ b/assets/logos/supabase.svg @@ -0,0 +1 @@ +Supabase \ No newline at end of file diff --git a/assets/logos/typeform.svg b/assets/logos/typeform.svg new file mode 100644 index 0000000..23ba814 --- /dev/null +++ b/assets/logos/typeform.svg @@ -0,0 +1 @@ +Typeform \ No newline at end of file diff --git a/assets/logos/zapier.svg b/assets/logos/zapier.svg new file mode 100644 index 0000000..2a2c899 --- /dev/null +++ b/assets/logos/zapier.svg @@ -0,0 +1 @@ +Zapier \ No newline at end of file diff --git a/assets/logos/zendesk.svg b/assets/logos/zendesk.svg new file mode 100644 index 0000000..0849b82 --- /dev/null +++ b/assets/logos/zendesk.svg @@ -0,0 +1 @@ +Zendesk \ No newline at end of file diff --git a/assets/logos/zoom.svg b/assets/logos/zoom.svg new file mode 100644 index 0000000..238e6ce --- /dev/null +++ b/assets/logos/zoom.svg @@ -0,0 +1 @@ +Zoom \ No newline at end of file diff --git a/data/category_editorial.json b/data/category_editorial.json new file mode 100644 index 0000000..a55f728 --- /dev/null +++ b/data/category_editorial.json @@ -0,0 +1,226 @@ +{ + "Communication": { + "industry": [ + "Team communication is the backbone of every modern organization. Whether your team is remote, hybrid, or co-located, the tools you choose to communicate shape how quickly decisions get made, how aligned people stay, and ultimately, how fast you can ship. A poorly configured messaging setup leads to scattered conversations, missed updates, and the kind of context-switching that quietly erodes productivity over weeks and months.", + "The dominant players in this space have built their products around a simple bet: once a team adopts a communication platform, switching costs are high enough to justify annual price increases. And for the most part, that bet has paid off — Slack's per-seat pricing has become one of the most debated line items in SaaS budgets, especially at companies north of 50 employees. Microsoft Teams bundles itself into the Microsoft 365 suite, making the true cost harder to isolate but no less real.", + "What's changed in recent years is the maturity of open-source alternatives. Self-hosted messaging platforms now offer threaded conversations, video calling, file sharing, integrations with CI/CD pipelines, and the kind of compliance certifications (SOC2, HIPAA) that used to be exclusive to enterprise vendors. The real unlock isn't just cost savings — it's the ability to own your communication infrastructure the same way you own your code." + ], + "oss_benefits_title": "Why Open-Source Communication Tools Make Sense", + "oss_benefits": [ + "**Full data sovereignty** — every message, file, and call recording stays on your infrastructure. Essential for regulated industries, government contracts, and teams handling sensitive IP.", + "**No per-seat pricing** — most open-source communication platforms charge nothing regardless of team size. Even those with enterprise tiers charge a fraction of proprietary alternatives.", + "**Deep customization** — white-label the interface, build custom integrations, modify notification behavior, or integrate directly with your internal tools via open APIs.", + "**Community-driven roadmaps** — feature development is driven by actual users, not product managers optimizing for upsell opportunities." + ] + }, + "AI Models": { + "industry": [ + "Large language models have become foundational infrastructure for a growing number of companies — powering everything from customer support automation and code generation to legal document analysis and creative workflows. The pace of improvement has been relentless: models that would have been considered state-of-the-art six months ago are routinely surpassed by newer releases that are both more capable and more efficient to run.", + "For most of 2023 and 2024, the landscape was dominated by proprietary APIs — OpenAI's GPT series and Anthropic's Claude set the benchmarks, and building on top of them was the path of least resistance. But the economics of API-based inference don't scale well. At production volumes, per-token costs can easily reach five or six figures monthly. And once you factor in data privacy requirements, latency constraints, and the operational risk of depending on a single vendor, the calculus shifts significantly toward self-hosted alternatives.", + "The open-weight movement has delivered models that genuinely compete with proprietary offerings across most practical benchmarks. Meta's Llama series, DeepSeek's reasoning models, Mistral's efficient architectures, and Qwen's multilingual capabilities have collectively proven that you don't need to pay per token to get frontier-level performance. The remaining gap, which narrows with each release cycle, is increasingly a matter of fine-tuning and deployment infrastructure rather than raw model quality." + ], + "oss_benefits_title": "The Case for Open-Weight Models", + "oss_benefits": [ + "**Zero marginal cost at scale** — once you've invested in inference hardware, every additional query is essentially free. This transforms the economics of AI-powered features from variable cost to fixed cost.", + "**Complete data privacy** — your prompts, context, and outputs never leave your network. Critical for healthcare, finance, legal, and any business handling PII.", + "**Full customization** — fine-tune on your domain data, adjust system prompts without restrictions, modify tokenizers, or quantize for your specific hardware profile.", + "**No vendor lock-in** — swap models as better ones emerge without rewriting integration code. Most open-weight models converge on compatible APIs and inference formats." + ] + }, + "AI Runners": { + "industry": [ + "Running AI models locally has gone from a hobbyist curiosity to a legitimate infrastructure choice. The shift was driven by three converging forces: rapidly improving open-weight models, dramatic reductions in quantization quality loss, and the rising cost consciousness around API-based inference. What was once a weekend experiment — getting a model to respond on your laptop — is now a production deployment pattern used by companies that need predictable costs and absolute data privacy.", + "The tooling around local inference has matured to match. Modern AI runners handle model downloading, quantization, context management, GPU memory allocation, and API serving with minimal configuration. Many offer OpenAI-compatible endpoints, which means existing application code that was built against commercial APIs can switch to self-hosted inference with a single URL change.", + "The hardware requirements have also dropped significantly. GGUF quantization, mixed-precision inference, and speculative decoding techniques mean that capable 7B-13B models run comfortably on consumer GPUs, and even larger 70B models are practical on workstation-grade hardware. For teams that don't need frontier-scale reasoning on every query, local inference is now the more economical choice by a wide margin." + ], + "oss_benefits_title": "Why Self-Hosted Inference Matters", + "oss_benefits": [ + "**Predictable, fixed costs** — no per-token billing, no usage spikes, no surprise invoices. Your inference cost is your hardware amortization, period.", + "**Complete privacy and compliance** — prompts and outputs never leave your premises. Non-negotiable for legal, medical, and classified workloads.", + "**Latency control** — local inference eliminates network round-trips. Sub-100ms time-to-first-token is achievable for many model sizes.", + "**Model flexibility** — swap, fine-tune, or quantize models to match your exact performance-cost trade-off without waiting for a vendor to add support." + ] + }, + "Monitoring": { + "industry": [ + "Monitoring is one of those infrastructure investments that only gets appreciated after something goes wrong. Every minute of undetected downtime translates directly to lost revenue, eroded user trust, and cascading failures that are exponentially harder to debug the longer they persist. For modern web applications with distributed architectures, monitoring isn't optional — it's the difference between catching a degradation at 2% error rate versus discovering it at 20% when customers start complaining on social media.", + "The commercial monitoring landscape has consolidated around a few major players — Datadog, New Relic, and Splunk — that offer comprehensive platforms with enterprise features. But their pricing models, which typically scale with data ingestion volume, create a perverse incentive: the more you instrument your application (which you should), the more you pay. Teams routinely find themselves reducing log verbosity or sampling metrics just to stay within budget, which defeats the purpose of monitoring in the first place.", + "Open-source monitoring has matured to the point where self-hosted stacks built on Prometheus, Grafana, and OpenTelemetry can match commercial platforms in capability. The trade-off is operational overhead — you're responsible for keeping the monitoring infrastructure itself running. But for teams with the DevOps muscle to maintain it, the cost savings at scale are substantial, and the absence of data ingestion limits means you can instrument without compromise." + ], + "oss_benefits_title": "Why Open-Source Monitoring Wins at Scale", + "oss_benefits": [ + "**No data ingestion limits** — instrument everything without worrying about per-GB pricing that punishes thoroughness.", + "**Full stack visibility** — combine metrics, logs, traces, and alerting in a single self-hosted stack with complete control over retention policies.", + "**Community-maintained integrations** — Prometheus exporters exist for virtually every database, message queue, web server, and cloud service you're running.", + "**Customizable alerting** — define alert rules, escalation policies, and notification channels that match your operational workflow exactly." + ] + }, + "Analytics": { + "industry": [ + "Understanding how users interact with your product is fundamental to making good decisions — about which features to build, where friction exists, and what's actually driving growth. Analytics tooling has become ubiquitous, but the way most teams implement it creates a quiet tension between insight and privacy. Every pageview, click, and scroll event sent to a third-party analytics service is data about your users that lives on someone else's infrastructure.", + "Google Analytics dominated this space for over a decade by being free and comprehensive. But the shift to GA4, growing regulatory pressure from GDPR and CCPA, and increasing user awareness of tracking have created an opening for alternatives that respect privacy by default. The question has moved from 'Should we track user behavior?' to 'Can we understand our users without compromising their privacy?'", + "The answer, increasingly, is yes. Privacy-first analytics platforms — both commercial and open-source — have proven that you can get actionable insights from aggregate data without building individual user profiles, dropping cookies, or sending behavioral data to third-party ad networks. For many teams, the switch isn't just about compliance; it's about building trust with users who are increasingly aware of how their data is being used." + ], + "oss_benefits_title": "Why Privacy-First Analytics is the Future", + "oss_benefits": [ + "**GDPR compliant without banners** — no cookies means no consent dialogs interrupting your users' experience.", + "**Lightweight by design** — most open-source analytics scripts are under 5KB, versus 40-70KB for Google Analytics, directly improving page load times.", + "**Complete data ownership** — your analytics data stays on your servers. No data mining, no profile building, no third-party data sharing.", + "**Transparent methodology** — open-source means you can audit exactly how metrics are calculated and ensure the numbers are trustworthy." + ] + }, + "Backend as a Service": { + "industry": [ + "Building a backend from scratch — authentication, database, file storage, realtime subscriptions, serverless functions — is weeks or months of work before you ship a single user-facing feature. Backend-as-a-service platforms compress that timeline by providing these building blocks as managed services with SDKs for every major frontend framework. Firebase showed the industry what's possible; Supabase proved it could be done with open-source technology.", + "The trade-off with managed BaaS has always been control. Firebase's real-time database is fast to get started with but notoriously difficult to migrate away from. Pricing structures that charge per read/write operation create anxiety at scale. And for applications that need to comply with data residency requirements, the inability to choose where your data lives is a non-starter.", + "Self-hosted BaaS platforms have reached the point where the developer experience is genuinely comparable to managed alternatives. PostgreSQL-backed platforms like Supabase offer SQL access, row-level security, and realtime subscriptions. Appwrite and PocketBase provide complete backend stacks that deploy with a single Docker command. The infrastructure complexity that once made self-hosting impractical has been abstracted away by mature tooling." + ], + "oss_benefits_title": "Why Self-Hosted Backends Make Sense", + "oss_benefits": [ + "**Zero vendor lock-in** — your data lives in standard PostgreSQL or SQLite databases. Migration is a pg_dump away.", + "**Predictable costs** — no per-operation billing. Your costs scale with your infrastructure, not your traffic patterns.", + "**Data residency control** — deploy wherever compliance requires, from EU data centers to air-gapped environments.", + "**Full stack access** — extend functionality at the database level, not just through vendor-defined SDKs and rules." + ] + }, + "Project Management": { + "industry": [ + "Project management tools are the operating system for how teams plan, prioritize, and track work. Get it right and you have visibility into what's happening across the organization, clear ownership of tasks, and a shared source of truth for deadlines. Get it wrong — or outgrow your tooling — and you end up with scattered context across Slack threads, Google Docs, and someone's mental model of what 'in progress' means.", + "Jira has been the default choice in enterprise settings for years, but its complexity has become its own kind of cost. Teams routinely spend more time configuring workflows, maintaining custom fields, and navigating a UI that feels designed for administrators rather than the people doing the actual work. Linear's success proved there's massive demand for tools that are fast, opinionated, and pleasant to use — but Linear's pricing and closed-source nature aren't for everyone.", + "Open-source project management tools have learned from both extremes. The current generation offers clean, modern interfaces inspired by Linear's speed and simplicity, while providing the flexibility to customize workflows that Jira users expect. Self-hosting means your planning data — which often contains sensitive roadmap and strategy information — stays within your infrastructure." + ], + "oss_benefits_title": "Why Open-Source Project Management", + "oss_benefits": [ + "**No per-seat tax on growth** — add team members without budget conversations with finance.", + "**Sensitive roadmap data stays internal** — product strategy, timelines, and resource allocation don't leave your network.", + "**Customizable workflows** — modify issue types, states, and automation rules at the code level, not through limited configuration UIs.", + "**Integration freedom** — connect with your CI/CD, Slack, Git, and custom internal tools through open APIs." + ] + }, + "Security": { + "industry": [ + "Security infrastructure — password management, identity providers, authentication services, and encryption tooling — sits at the foundation of every application. A breach in any of these layers doesn't just affect one feature; it compromises the entire trust relationship with your users. The stakes are high enough that many teams default to commercial security products, reasoning that the cost of a vendor is trivial compared to the cost of a security incident.", + "But trusting a security vendor also means trusting their infrastructure, their access controls, their employee vetting, and their incident response. After high-profile breaches at LastPass and Okta, more teams are asking whether the convenience of managed security services justifies the concentration of risk. When your password vault or identity provider is a single vendor's cloud service, a compromise at that vendor becomes your compromise too.", + "Open-source security tools offer an alternative model: trust through transparency. When the source code is public, security researchers worldwide can audit it. When the data stays on your infrastructure, a vendor breach doesn't affect you. The trade-off is operational responsibility — but for teams that already manage their own infrastructure, self-hosting a password manager or identity provider is a natural extension." + ], + "oss_benefits_title": "Why Open-Source Security Infrastructure", + "oss_benefits": [ + "**Auditable code** — the source is public, reviewed by the community, and regularly audited by independent security researchers.", + "**Zero-knowledge architecture** — your secrets never leave your infrastructure. No vendor employees can access your vaults or tokens.", + "**Air-gap capability** — deploy in fully isolated environments when compliance or classification requirements demand it.", + "**No subscription for essential security** — password management and authentication shouldn't be a recurring cost per user." + ] + }, + "DevOps": { + "industry": [ + "The promise of DevOps was simple: developers should be able to deploy their code without filing tickets, waiting for ops teams, or configuring infrastructure by hand. Platforms like Heroku, Vercel, and Railway delivered on that promise beautifully — git push and your app is live. But the convenience comes with constraints: vendor-specific build systems, pricing that scales with compute time, and the nagging awareness that your deployment pipeline is someone else's product decision.", + "Self-hosted Platform-as-a-Service alternatives have closed the experience gap significantly. Tools like Coolify, Dokku, and CapRover provide the same git-push deployment workflow on your own servers, with the added benefit of running on hardware you control. The pricing model shifts from per-project or per-build-minute to a flat monthly VPS cost that supports as many projects as your server can handle.", + "For freelancers and small teams running multiple projects, the economics are especially compelling. A $20/month VPS running a self-hosted PaaS can handle workloads that would cost $200+ across managed platforms. For larger teams, the value proposition shifts toward control: custom build pipelines, deployment policies, and the ability to integrate with internal infrastructure that cloud PaaS providers don't support." + ], + "oss_benefits_title": "Why Self-Hosted Deployment Platforms", + "oss_benefits": [ + "**Flat infrastructure costs** — one server, unlimited projects. No per-app or per-build pricing.", + "**Full pipeline control** — customize build, test, and deployment steps without platform constraints.", + "**Infrastructure portability** — move between cloud providers or on-prem without rewriting deployment configurations.", + "**Multi-service support** — deploy databases, message queues, and background workers alongside your apps on the same platform." + ] + }, + "Productivity": { + "industry": [ + "Productivity software — document editors, note-taking apps, knowledge bases, wikis — is the digital workspace where ideas become artifacts. Notion's success proved that people want more than just a text editor; they want tools that can organize information spatially, link concepts together, and serve as both a writing surface and a lightweight database. But Notion and its commercial peers store every thought, draft, and internal document on their servers.", + "For companies, that means proprietary knowledge, strategic planning documents, and sensitive internal communications live on third-party infrastructure. For individuals, it means personal notes, journals, and creative work exist at the mercy of a subscription billing cycle. When your knowledge base is someone else's SaaS product, they have leverage over your most important asset: your accumulated knowledge.", + "Open-source productivity tools have evolved from basic Markdown editors into full workspace platforms. AppFlowy and AFFiNE offer Notion-like block editors with local-first architectures. Outline provides team knowledge bases with Slack integration. ONLYOFFICE delivers collaborative document editing that genuinely competes with Google Workspace. The common thread is data ownership — your documents, your server, your rules." + ], + "oss_benefits_title": "Why Open-Source Productivity Tools", + "oss_benefits": [ + "**Local-first architecture** — your data exists on your device first, synced on your terms. No internet required to access your own notes.", + "**No content lock-in** — export everything in standard formats. Your knowledge base shouldn't be trapped in a proprietary database.", + "**Collaborative without compromise** — real-time editing and sharing without routing every keystroke through a third-party server.", + "**Offline-capable** — work anywhere, sync when you're ready. Perfect for environments with intermittent connectivity." + ] + }, + "Design": { + "industry": [ + "Design tools shape how products look and feel. For the last decade, Adobe's Creative Suite and Figma have defined what professional design tooling looks like — and what it costs. Adobe's subscription model transformed perpetual licenses into recurring revenue, while Figma proved that browser-based collaboration could rival native application performance. Both are excellent tools. Both also represent significant ongoing costs and deep vendor dependencies.", + "The open-source design ecosystem has expanded beyond GIMP as the sole Photoshop alternative. Krita has become the tool of choice for digital painters and illustrators, with a brush engine that many artists prefer over Photoshop's. Penpot offers browser-based collaborative design with SVG-native output. Inkscape handles vector graphics with a feature set that covers 90% of what Illustrator does. Each has carved out a niche where it genuinely excels rather than trying to replicate commercial tools feature-for-feature.", + "For teams considering a switch, the question isn't whether open-source design tools are 'good enough' — several are genuinely better for specific workflows. The question is whether your existing asset libraries, plugins, and team workflows can adapt. The answer, increasingly, is yes." + ], + "oss_benefits_title": "Why Open-Source Design Tools", + "oss_benefits": [ + "**No subscription treadmill** — professional design capability without monthly fees that increase every year.", + "**Standard file formats** — SVG, PNG, PSD, and OpenRaster support means your assets aren't locked into one vendor's format.", + "**Extensible through plugins** — customize your workflow with community-built extensions, scripts, and brush packs.", + "**Cross-platform freedom** — run the same tool on Linux, macOS, and Windows without feature disparity." + ] + }, + "CRM": { + "industry": [ + "Customer relationship management sits at the heart of revenue operations. Every interaction — from first touchpoint through closed deal to ongoing account management — flows through the CRM. That centrality is exactly why CRM vendors can charge premium prices: once your sales process, reporting, and integrations are built around a platform, the switching cost feels enormous.", + "Salesforce perfected this dynamic. Its ecosystem of apps, consultants, and certifications creates gravitational pull that's hard to escape. HubSpot offered a friendlier on-ramp but follows the same playbook: free tier to get you in, premium features to keep you paying. For growing companies, CRM costs can quietly become one of the largest line items in the tools budget.", + "Open-source CRM alternatives approach the problem differently. Platforms like Twenty and Odoo offer modern interfaces with full control over your customer data. The functionality gap has narrowed — pipeline management, email tracking, activity logging, and reporting are all available. What's changed is the recognition that customer data is too strategically important to store on someone else's servers." + ], + "oss_benefits_title": "Why Open-Source CRM", + "oss_benefits": [ + "**Your customer data, your servers** — sales intelligence and customer communications are among the most sensitive data a company has.", + "**No per-seat sales tax** — add SDRs, AEs, and CSMs without budget negotiations for each headcount.", + "**Deep customization** — modify deal stages, fields, automations, and reporting at the code level.", + "**Integration on your terms** — connect to your email, calendar, and internal tools without marketplace surcharges." + ] + }, + "Marketing": { + "industry": [ + "Marketing technology — email automation, newsletter platforms, campaign management, and transactional email — has become a critical layer in how businesses communicate with their audiences. The volume of email sent by companies has grown year over year, and with it, the bills from platforms like Mailchimp, SendGrid, and HubSpot. What starts as $50/month for a small list can grow to thousands as your subscriber base expands.", + "The economics of email marketing have a unique quirk: the value of your list compounds over time, but so does the cost of maintaining it on a managed platform. Switching providers means migrating subscriber data, rebuilding templates, re-verifying domains, and potentially losing engagement history. This lock-in is subtle but expensive — many teams continue paying premium prices simply because migration is daunting.", + "Self-hosted email and marketing tools fundamentally change this equation. Platforms like Listmonk can handle millions of subscribers on a single server. Mautic provides marketing automation comparable to HubSpot. Postal handles transactional email delivery at scale. The infrastructure cost is a fraction of managed alternatives, and the data — your subscriber lists, engagement metrics, and campaign history — stays entirely within your control." + ], + "oss_benefits_title": "Why Self-Hosted Marketing Infrastructure", + "oss_benefits": [ + "**Scale without per-subscriber pricing** — your list can grow to millions without your bill growing proportionally.", + "**Full deliverability control** — manage your own IP reputation, DKIM, SPF, and DMARC settings.", + "**No data sharing with ad platforms** — your subscriber data isn't being used to train ad targeting models.", + "**Campaign data ownership** — engagement metrics, A/B test results, and audience segments stay on your infrastructure." + ] + }, + "Support": { + "industry": [ + "Customer support tooling directly impacts how quickly and effectively you can help your users. The experience of submitting a ticket, chatting with support, or reading documentation shapes perception of your entire product. Zendesk and Intercom have set the baseline for what teams expect from support platforms, but their per-agent pricing means that scaling your support team scales your tooling costs linearly.", + "The support space has also seen significant feature creep in pricing. Chatbots, knowledge bases, analytics dashboards, and multichannel inboxes are increasingly gated behind higher-tier plans. Teams frequently find themselves paying for an 'enterprise' plan not because they need enterprise features, but because the one specific capability they need was strategically placed in that tier.", + "Open-source helpdesk platforms offer the core functionality — ticketing, live chat, knowledge bases, and multichannel support — without per-agent pricing or feature gating. Tools like Zammad and Chaskiq provide mature platforms that can be self-hosted and customized to match your support workflow exactly." + ], + "oss_benefits_title": "Why Open-Source Support Platforms", + "oss_benefits": [ + "**No per-agent pricing** — scale your support team without scaling your tooling costs.", + "**Omnichannel without upsells** — email, chat, social, and phone support in one platform, included by default.", + "**Complete conversation history ownership** — support interactions contain valuable product feedback. Keep that data accessible on your terms.", + "**Customizable workflows** — build escalation rules, SLA tracking, and routing logic that matches your specific support process." + ] + }, + "Automation": { + "industry": [ + "Workflow automation connects the tools your team already uses, eliminating the repetitive manual work that consumes hours every week. When a new lead fills out a form, automation can create a CRM record, send a welcome email, notify the sales team, and update a spreadsheet — all without human intervention. Zapier and Make have made this accessible to non-technical users, but at $20-50+ per month for serious usage, the cost adds up.", + "The real cost of managed automation isn't just the subscription — it's the per-task or per-operation pricing that makes complex workflows expensive. A workflow that triggers 10,000 times per month might cost $100+ on Zapier. Run that same workflow on a self-hosted platform like n8n, and the cost is whatever you're already paying for your server.", + "Open-source automation platforms have reached the point where they offer comparable visual builders, similar integration libraries, and the added benefit of running custom code nodes for anything the pre-built integrations don't cover. For technical teams, the ability to add JavaScript or Python logic directly into a workflow is a significant advantage over the more constrained no-code approaches." + ], + "oss_benefits_title": "Why Open-Source Automation", + "oss_benefits": [ + "**No per-execution pricing** — run workflows as often as needed without counting operations.", + "**Code when you need it** — drop into JavaScript or Python for custom logic that no-code builders can't handle.", + "**Data stays local** — sensitive business data flowing through automation workflows doesn't leave your infrastructure.", + "**Custom integrations** — build connectors for internal APIs that managed platforms will never support." + ] + }, + "E-commerce": { + "industry": [ + "E-commerce platforms are the foundation of online retail. Shopify has made launching a store remarkably simple, but that simplicity comes with trade-offs: transaction fees on every sale, limited customization depth, and a dependency on Shopify's infrastructure for your entire business. When the platform decides to change its API, adjust its pricing, or deprecate a feature you rely on, you adapt or you scramble.", + "The headless commerce movement has shifted the conversation from 'which all-in-one platform' to 'which best-of-breed components.' By decoupling the storefront from the commerce engine, teams can use any frontend framework while plugging into a commerce backend for product management, orders, payments, and fulfillment. Open-source headless platforms like Medusa.js make this architecture accessible without enterprise licensing fees.", + "For brands that have outgrown template-based storefronts or need multi-market support, self-hosted commerce infrastructure offers the flexibility to build exactly the shopping experience their customers expect, without the constraints and costs of managed platforms." + ], + "oss_benefits_title": "Why Open-Source Commerce", + "oss_benefits": [ + "**No transaction fees** — zero percent commission on every sale, regardless of volume.", + "**Complete storefront control** — build any frontend experience without template limitations.", + "**Multi-region and multi-currency** — handle international commerce without per-market licensing fees.", + "**Plugin architecture** — extend functionality with community-built modules for payments, fulfillment, and CMS integrations." + ] + } +} \ No newline at end of file diff --git a/data/schema/types.ts b/data/schema/types.ts new file mode 100644 index 0000000..9157a5c --- /dev/null +++ b/data/schema/types.ts @@ -0,0 +1,66 @@ +export interface DeploymentConfig { + image: string; + port: number; + env?: { key: string; value: string }[]; + volumes?: string[]; + command?: string; + local_path?: string; + type?: 'docker-compose' | 'dockerfile'; +} + +export interface Tool { + slug: string; + name: string; + category: string; + is_open_source: boolean; + description: string; + website: string; + github_repo?: string; + stars?: number; + description_long?: string; + pros?: string[]; + cons?: string[]; + min_cost?: number; + avg_monthly_cost?: number; // Estimated self-hosting or SaaS cost + pricing_model?: 'Free' | 'Freemium' | 'Paid' | 'Paid (Subscription)' | 'Paid (One-time)'; + has_free_trial?: boolean; + self_hostable?: boolean; + license?: string; + language?: string; + tags?: string[]; + alternatives?: string[]; + last_commit?: string; + logo_url?: string; + affiliate_url?: string; + referral_url?: string; // New field for specific referral links + deployment?: DeploymentConfig; + hardware_req?: string; // e.g., "16GB VRAM", "CPU only", "Cloud API" + hosting_type?: 'cloud' | 'self-hosted' | 'both'; // How the tool/model is accessed + + ai_metadata?: { + vram_inference_gb?: number; // Recommended VRAM for inference (FP16/BF16) + context_window_tokens?: number; // Max sequence length + parameters_total_b?: number; // Billion parameters + parameters_active_b?: number; // For MoE models + is_multimodal?: boolean; + }; +} + +export interface Stack { + id: string; + name: string; + emoji: string; + tagline: string; + description: string; + monthlySaved: number; + + // Ordered list of tools with specific roles in this stack + tools: { + category: string; // e.g. "The Database", "Authentication" + toolSlug: string; + }[]; + + // SEO + seo_title?: string; + seo_description?: string; +} diff --git a/data/seo.ts b/data/seo.ts new file mode 100644 index 0000000..9870c49 --- /dev/null +++ b/data/seo.ts @@ -0,0 +1,34 @@ +import { Tool } from '../app/types'; +import toolsData from './tools.json'; + +const tools = toolsData as Tool[]; + +export interface VsPair { + slug: string; // "slack-vs-mattermost" + proprietaryTool: Tool; + opensourceTool: Tool; +} + +export function generateVsPairs(): VsPair[] { + const pairs: VsPair[] = []; + + // Find all proprietary tools + const proprietaryTools = tools.filter(t => !t.is_open_source); + + proprietaryTools.forEach(propTool => { + if (!propTool.alternatives) return; + + propTool.alternatives.forEach(altSlug => { + const altTool = tools.find(t => t.slug === altSlug); + if (altTool) { + pairs.push({ + slug: `${propTool.slug}-vs-${altTool.slug}`, + proprietaryTool: propTool, + opensourceTool: altTool + }); + } + }); + }); + + return pairs; +} diff --git a/data/stacks.ts b/data/stacks.ts new file mode 100644 index 0000000..901e6d6 --- /dev/null +++ b/data/stacks.ts @@ -0,0 +1,90 @@ +import { Stack } from '../app/types'; + +export const STACKS: Stack[] = [ + { + id: 'bootstrapper', + name: 'The Bootstrapper Stack', + emoji: '🚀', + tagline: 'Launch for $0/mo', + description: 'Everything you need to build, ship, and manage a SaaS product without spending a dime on software. Perfect for solo founders and early-stage startups.', + monthlySaved: 310, + tools: [ + { category: 'Database & Auth', toolSlug: 'supabase' }, + { category: 'Project Mgmt', toolSlug: 'plane' }, + { category: 'Communication', toolSlug: 'rocketchat' }, + { category: 'Deployment', toolSlug: 'coolify' }, + { category: 'Analytics', toolSlug: 'plausible' }, + { category: 'Design', toolSlug: 'penpot' } + ], + seo_title: 'The Bootstrapper Stack - Build SaaS for Free', + seo_description: 'The ultimate open-source stack for solo founders. Database, Auth, DevOps, and Design tools that cost $0/mo.' + }, + { + id: 'designer', + name: 'The Designer Stack', + emoji: '🎨', + tagline: 'Ditch Creative Cloud', + description: 'Professional design tools that rival Adobe. From UI/UX prototyping to photo editing and digital art — all open source, all free.', + monthlySaved: 110, + tools: [ + { category: 'UI/UX Design', toolSlug: 'penpot' }, + { category: 'Photo Editing', toolSlug: 'gimp' }, + { category: 'Digital Art', toolSlug: 'krita' }, + { category: 'Knowledge Base', toolSlug: 'appflowy' } + ], + seo_title: 'Open Source Design Stack - Adobe Alternatives', + seo_description: 'Free, professional design tools to replace Adobe Creative Cloud. Penpot, GIMP, Krita, and more.' + }, + { + id: 'ai-first', + name: 'The AI-First Stack', + emoji: '🤖', + tagline: 'Own your AI', + description: 'Run powerful AI locally. No API keys, no usage limits, no data leaving your machine. LLMs, image generation, and code completion — all self-hosted.', + monthlySaved: 69, + tools: [ + { category: 'LLM Inference', toolSlug: 'llama' }, + { category: 'Coding Model', toolSlug: 'deepseek' }, + { category: 'Image Generation', toolSlug: 'stable-diffusion' }, + { category: 'IDE Assistant', toolSlug: 'continue-dev' }, + { category: 'Autocomplete', toolSlug: 'tabby' } + ], + seo_title: 'Local AI Stack - Self-Hosted LLMs & Tools', + seo_description: 'Run AI locally with this curated stack. Llama 3, Stable Diffusion, and coding assistants that respect your privacy.' + }, + { + id: 'devops', + name: 'The DevOps Stack', + emoji: '⚙️', + tagline: 'Self-host everything', + description: 'From backend to hosting to monitoring — deploy and manage your entire infrastructure on your own terms. Zero vendor lock-in.', + monthlySaved: 375, + tools: [ + { category: 'Backend as a Service', toolSlug: 'supabase' }, + { category: 'PaaS (Vercel Alt)', toolSlug: 'coolify' }, + { category: 'Git Deployment', toolSlug: 'dokku' }, + { category: 'Web Analytics', toolSlug: 'plausible' }, + { category: 'Product Analytics', toolSlug: 'posthog' } + ], + seo_title: 'Open Source DevOps Stack - Self-Hosted PaaS', + seo_description: 'Deploy like a pro with tools like Coolify, Dokku, and Supabase. The ultimate self-hosted infrastructure stack.' + }, + { + id: 'privacy', + name: 'The Privacy Stack', + emoji: '🔒', + tagline: 'Zero data leaks', + description: 'Every tool runs on your infrastructure. Your data never touches a third-party server. For teams and individuals who take privacy seriously.', + monthlySaved: 185, + tools: [ + { category: 'Password Manager', toolSlug: 'bitwarden' }, + { category: 'Team Chat', toolSlug: 'mattermost' }, + { category: 'Video Calls', toolSlug: 'jitsi' }, + { category: 'Analytics', toolSlug: 'matomo' }, + { category: 'Notes & Docs', toolSlug: 'appflowy' }, + { category: 'Knowledge Base', toolSlug: 'affine' } + ], + seo_title: 'Privacy-First Software Stack - Secure Alternatives', + seo_description: 'A 100% self-hostable stack for maximum privacy. Bitwarden, Mattermost, Jitsi, and more.' + } +]; diff --git a/data/tools-min.json b/data/tools-min.json new file mode 100644 index 0000000..66849ca --- /dev/null +++ b/data/tools-min.json @@ -0,0 +1,1653 @@ +[ + { + "slug": "firebase", + "name": "Firebase", + "category": "Backend as a Service", + "is_open_source": false, + "description": "Google's app development platform.", + "logo_url": "/logos/firebase.svg", + "pricing_model": "Paid/Freemium", + "avg_monthly_cost": 25, + "alternatives": [ + "supabase", + "appwrite", + "pocketbase" + ], + "tags": [ + "Cloud", + "Database", + "Auth" + ], + "website": "https://firebase.google.com" + }, + { + "slug": "supabase", + "name": "Supabase", + "category": "Backend as a Service", + "is_open_source": true, + "description": "The Postgres development platform. Supabase gives you a dedicated Postgres database to build your web, mobile, and AI applications.", + "logo_url": "/logos/supabase.svg", + "alternatives": [], + "tags": [ + "Database", + "Realtime", + "Postgres" + ], + "license": "Apache License 2.0", + "website": "https://supabase.com" + }, + { + "slug": "appwrite", + "name": "Appwrite", + "category": "Backend as a Service", + "is_open_source": true, + "description": "Appwrite® - complete cloud infrastructure for your web, mobile and AI apps. Including Auth, Databases, Storage, Functions, Messaging, Hosting, Realtime and more", + "logo_url": "/logos/appwrite.svg", + "alternatives": [], + "tags": [ + "Database", + "Auth", + "Self-Hosted" + ], + "license": "BSD 3-Clause \"New\" or \"Revised\" License", + "website": "https://appwrite.io" + }, + { + "slug": "pocketbase", + "name": "PocketBase", + "category": "Backend as a Service", + "is_open_source": true, + "description": "Open Source realtime backend in 1 file", + "logo_url": "/logos/pocketbase.svg", + "alternatives": [], + "tags": [], + "license": "MIT License", + "website": "https://pocketbase.io" + }, + { + "slug": "salesforce", + "name": "Salesforce", + "category": "CRM", + "is_open_source": false, + "description": "The world's #1 CRM.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=salesforce.com", + "pricing_model": "Paid", + "avg_monthly_cost": 25, + "alternatives": [ + "odoo", + "erpnext" + ], + "tags": [], + "website": "https://salesforce.com" + }, + { + "slug": "slack", + "name": "Slack", + "category": "Communication", + "is_open_source": false, + "description": "Team communication platform.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=slack.com", + "pricing_model": "Paid/Freemium", + "avg_monthly_cost": 12, + "alternatives": [ + "mattermost", + "rocketchat" + ], + "tags": [], + "website": "https://slack.com" + }, + { + "slug": "mattermost", + "name": "Mattermost", + "category": "Communication", + "is_open_source": true, + "description": "Mattermost is an open source platform for secure collaboration across the entire software development lifecycle..", + "logo_url": "/logos/mattermost.svg", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://mattermost.com" + }, + { + "slug": "rocketchat", + "name": "Rocket.Chat", + "category": "Communication", + "is_open_source": true, + "description": "The Secure CommsOS™ for mission-critical operations", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=rocket.chat", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://rocket.chat" + }, + { + "slug": "jira", + "name": "Jira", + "category": "Project Management", + "is_open_source": false, + "description": "Issue tracking and project management tool.", + "logo_url": "/logos/jira.svg", + "pricing_model": "Paid", + "avg_monthly_cost": 15, + "alternatives": [ + "plane", + "taiga" + ], + "tags": [], + "website": "https://www.atlassian.com/software/jira" + }, + { + "slug": "plane", + "name": "Plane", + "category": "Project Management", + "is_open_source": true, + "description": "🔥🔥🔥 Open-source Jira, Linear, Monday, and ClickUp alternative. Plane is a modern project management platform to manage tasks, sprints, docs, and triage.", + "logo_url": "/logos/plane.svg", + "alternatives": [], + "tags": [], + "license": "GNU Affero General Public License v3.0", + "website": "https://plane.so" + }, + { + "slug": "taiga", + "name": "Taiga", + "category": "Project Management", + "is_open_source": true, + "description": null, + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=taiga.io", + "alternatives": [], + "tags": [], + "license": "Mozilla Public License 2.0", + "website": "https://taiga.io" + }, + { + "slug": "zoom", + "name": "Zoom", + "category": "Communication", + "is_open_source": false, + "description": "Video conferencing platform, cloud phone, webinars, and chat.", + "logo_url": "/logos/zoom.svg", + "pricing_model": "Paid/Freemium", + "avg_monthly_cost": 15, + "alternatives": [ + "jitsi-meet" + ], + "tags": [], + "website": "https://zoom.us" + }, + { + "slug": "jitsi-meet", + "name": "Jitsi Meet", + "category": "Communication", + "is_open_source": true, + "description": "Jitsi Meet - Secure, Simple and Scalable Video Conferences that you use as a standalone app or embed in your web application.", + "logo_url": "/logos/jitsi-meet.svg", + "alternatives": [], + "tags": [], + "license": "Apache License 2.0", + "website": "https://jitsi.org" + }, + { + "slug": "photoshop", + "name": "Adobe Photoshop", + "category": "Design", + "is_open_source": false, + "description": "Industry standard image editing software.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.adobe.com", + "pricing_model": "Paid (Monthly)", + "avg_monthly_cost": 60, + "alternatives": [ + "gimp", + "krita" + ], + "tags": [], + "website": "https://www.adobe.com/products/photoshop.html" + }, + { + "slug": "gimp", + "name": "GIMP", + "category": "Design", + "is_open_source": true, + "description": "Read-only mirror of https://gitlab.gnome.org/GNOME/gimp", + "logo_url": "/logos/gimp.svg", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://www.gimp.org" + }, + { + "slug": "krita", + "name": "Krita", + "category": "Design", + "is_open_source": true, + "description": "Krita is a free and open source cross-platform application that offers an end-to-end solution for creating digital art files from scratch built on the KDE and Qt frameworks.", + "logo_url": "/logos/krita.svg", + "alternatives": [], + "tags": [], + "license": "GNU General Public License v3.0", + "website": "https://krita.org" + }, + { + "slug": "figma", + "name": "Figma", + "category": "Design", + "is_open_source": false, + "description": "Collaborative interface design tool.", + "logo_url": "/logos/figma.svg", + "pricing_model": "Freemium/Paid", + "avg_monthly_cost": 15, + "alternatives": [ + "penpot" + ], + "tags": [], + "website": "https://www.figma.com" + }, + { + "slug": "penpot", + "name": "Penpot", + "category": "Design", + "is_open_source": true, + "description": "Penpot: The open-source design tool for design and code collaboration", + "logo_url": "/logos/penpot.svg", + "alternatives": [], + "tags": [], + "license": "Mozilla Public License 2.0", + "website": "https://penpot.app" + }, + { + "slug": "notion", + "name": "Notion", + "category": "Productivity", + "is_open_source": false, + "description": "All-in-one workspace.", + "logo_url": "/logos/notion.svg", + "pricing_model": "Freemium/Paid", + "avg_monthly_cost": 10, + "alternatives": [ + "appflowy", + "affine" + ], + "tags": [], + "website": "https://www.notion.so" + }, + { + "slug": "appflowy", + "name": "AppFlowy", + "category": "Productivity", + "is_open_source": true, + "description": "Bring projects, wikis, and teams together with AI. AppFlowy is the AI collaborative workspace where you achieve more without losing control of your data. The leading open source Notion alternative.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.appflowy.io", + "alternatives": [], + "tags": [], + "license": "GNU Affero General Public License v3.0", + "website": "https://www.appflowy.io" + }, + { + "slug": "affine", + "name": "AFFiNE", + "category": "Productivity", + "is_open_source": true, + "description": "There can be more than Notion and Miro. AFFiNE(pronounced [ə‘fain]) is a next-gen knowledge base that brings planning, sorting and creating all together. Privacy first, open-source, customizable and ready to use. ", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=affine.pro", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://affine.pro" + }, + { + "slug": "google-analytics", + "name": "Google Analytics", + "category": "Analytics", + "is_open_source": false, + "description": "Web analytics service.", + "logo_url": "/logos/google-analytics.svg", + "pricing_model": "Free/Paid", + "avg_monthly_cost": 150, + "alternatives": [ + "plausible", + "posthog", + "matomo" + ], + "tags": [], + "website": "https://analytics.google.com" + }, + { + "slug": "plausible", + "name": "Plausible", + "category": "Analytics", + "is_open_source": true, + "description": "Simple, open source, lightweight and privacy-friendly web analytics alternative to Google Analytics.", + "logo_url": "/logos/plausible.svg", + "alternatives": [], + "tags": [ + "Analytics", + "Privacy", + "GDPR" + ], + "license": "GNU Affero General Public License v3.0", + "website": "https://plausible.io" + }, + { + "slug": "posthog", + "name": "PostHog", + "category": "Analytics", + "is_open_source": true, + "description": "🦔 PostHog is an all-in-one developer platform for building successful products. We offer product analytics, web analytics, session replay, error tracking, feature flags, experimentation, surveys, data warehouse, a CDP, and an AI product assistant to help debug your code, ship features faster, and keep all your usage and customer data in one stack.", + "logo_url": "/logos/posthog.svg", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://posthog.com" + }, + { + "slug": "matomo", + "name": "Matomo", + "category": "Analytics", + "is_open_source": true, + "description": "Empowering People Ethically 🚀 — Matomo is hiring! Join us → https://matomo.org/jobs Matomo is the leading open-source alternative to Google Analytics, giving you complete control and built-in privacy. Easily collect, visualise, and analyse data from websites & apps. Star us on GitHub ⭐️ – Pull Requests welcome! ", + "logo_url": "/logos/matomo.svg", + "alternatives": [], + "tags": [], + "license": "GNU General Public License v3.0", + "website": "https://matomo.org" + }, + { + "slug": "1password", + "name": "1Password", + "category": "Security", + "is_open_source": false, + "description": "Password manager.", + "logo_url": "/logos/1password.svg", + "pricing_model": "Paid", + "avg_monthly_cost": 8, + "alternatives": [ + "bitwarden", + "keepassxc" + ], + "tags": [], + "website": "https://1password.com" + }, + { + "slug": "bitwarden", + "name": "Bitwarden", + "category": "Security", + "is_open_source": true, + "description": "Bitwarden infrastructure/backend (API, database, Docker, etc).", + "logo_url": "/logos/bitwarden.svg", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://bitwarden.com" + }, + { + "slug": "keepassxc", + "name": "KeePassXC", + "category": "Security", + "is_open_source": true, + "description": "KeePassXC is a cross-platform community-driven port of the Windows application “KeePass Password Safe”.", + "logo_url": "/logos/keepassxc.svg", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://keepassxc.org" + }, + { + "slug": "heroku", + "name": "Heroku", + "category": "DevOps", + "is_open_source": false, + "description": "Platform as a service.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=heroku.com", + "pricing_model": "Paid", + "avg_monthly_cost": 8, + "alternatives": [ + "coolify", + "dokku" + ], + "tags": [], + "website": "https://heroku.com" + }, + { + "slug": "coolify", + "name": "Coolify", + "category": "DevOps", + "is_open_source": true, + "description": "An open-source, self-hostable PaaS alternative to Vercel, Heroku & Netlify that lets you easily deploy static sites, databases, full-stack applications and 280+ one-click services on your own servers.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coolify.io", + "alternatives": [], + "tags": [ + "DevOps", + "PaaS", + "Self-Hosted" + ], + "license": "Apache License 2.0", + "website": "https://coolify.io" + }, + { + "slug": "sap", + "name": "SAP S/4HANA", + "category": "ERP", + "is_open_source": false, + "description": "The world leader in enterprise resource planning software.", + "logo_url": "/logos/sap.svg", + "pricing_model": "Paid (Enterprise)", + "avg_monthly_cost": 100, + "alternatives": [ + "odoo", + "erpnext" + ], + "tags": [], + "website": "https://www.sap.com" + }, + { + "slug": "odoo", + "name": "Odoo", + "category": "ERP", + "is_open_source": true, + "description": "A suite of open source business apps: CRM, eCommerce, accounting, manufacturing, warehouse, and more.", + "logo_url": "/logos/odoo.svg", + "alternatives": [], + "tags": [], + "license": "LGPL-3.0", + "website": "https://www.odoo.com" + }, + { + "slug": "erpnext", + "name": "ERPNext", + "category": "ERP", + "is_open_source": true, + "description": "A free and open-source integrated Enterprise Resource Planning (ERP) software.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=erpnext.com", + "alternatives": [], + "tags": [], + "license": "GNU General Public License v3.0", + "website": "https://erpnext.com" + }, + { + "slug": "autocad", + "name": "AutoCAD", + "category": "CAD", + "is_open_source": false, + "description": "Professional computer-aided design (CAD) and drafting software.", + "logo_url": "/logos/autocad.svg", + "pricing_model": "Paid (Subscription)", + "avg_monthly_cost": 75, + "alternatives": [ + "librecad", + "freecad" + ], + "tags": [], + "website": "https://www.autodesk.com/products/autocad" + }, + { + "slug": "librecad", + "name": "LibreCAD", + "category": "CAD", + "is_open_source": true, + "description": "A mature, feature-rich 2D CAD application with a loyal user community.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=librecad.org", + "alternatives": [], + "tags": [], + "license": "GPLv2", + "website": "https://librecad.org" + }, + { + "slug": "freecad", + "name": "FreeCAD", + "category": "CAD", + "is_open_source": true, + "description": "A general-purpose parametric 3D CAD modeler and a BIM software application.", + "logo_url": "/logos/freecad.svg", + "alternatives": [], + "tags": [], + "license": "LGPLv2+", + "website": "https://www.freecad.org" + }, + { + "slug": "zapier", + "name": "Zapier", + "category": "Automation", + "is_open_source": false, + "description": "The pioneer in workflow automation for everyone.", + "logo_url": "/logos/zapier.svg", + "pricing_model": "Paid (Task-based)", + "avg_monthly_cost": 20, + "alternatives": [ + "n8n", + "activepieces" + ], + "tags": [], + "website": "https://zapier.com" + }, + { + "slug": "n8n", + "name": "n8n", + "category": "Automation", + "is_open_source": true, + "description": "Fair-code workflow automation tool. Easily automate tasks across different services.", + "logo_url": "/logos/n8n.svg", + "alternatives": [], + "tags": [], + "license": "Sustainable Use License", + "website": "https://n8n.io" + }, + { + "slug": "activepieces", + "name": "Activepieces", + "category": "Automation", + "is_open_source": true, + "description": "Open source alternative to Zapier. Automate your work with 200+ apps.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=activepieces.com", + "alternatives": [], + "tags": [], + "license": "MIT", + "website": "https://www.activepieces.com" + }, + { + "slug": "tableau", + "name": "Tableau", + "category": "Analytics", + "is_open_source": false, + "description": "Powerful data visualization and business intelligence platform.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tableau.com", + "pricing_model": "Paid (Seat-based)", + "avg_monthly_cost": 70, + "alternatives": [ + "metabase", + "superset" + ], + "tags": [], + "website": "https://www.tableau.com" + }, + { + "slug": "metabase", + "name": "Metabase", + "category": "Analytics", + "is_open_source": true, + "description": "The simplest, fastest way to get business intelligence and analytics throughout your company.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=metabase.com", + "alternatives": [], + "tags": [], + "license": "AGPLv3", + "website": "https://www.metabase.com" + }, + { + "slug": "superset", + "name": "Apache Superset", + "category": "Analytics", + "is_open_source": true, + "description": "Enterprise-ready business intelligence web application.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=superset.apache.org", + "alternatives": [], + "tags": [], + "license": "Apache 2.0", + "website": "https://superset.apache.org" + }, + { + "slug": "auth0", + "name": "Auth0", + "category": "Security", + "is_open_source": false, + "description": "The leading authentication and authorization platform.", + "logo_url": "/logos/auth0.svg", + "pricing_model": "Paid (MAU-based)", + "avg_monthly_cost": 23, + "alternatives": [ + "keycloak", + "authentik" + ], + "tags": [], + "website": "https://auth0.com" + }, + { + "slug": "keycloak", + "name": "Keycloak", + "category": "Security", + "is_open_source": true, + "description": "Open source identity and access management for modern applications and services.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=keycloak.org", + "alternatives": [], + "tags": [], + "license": "Apache 2.0", + "website": "https://www.keycloak.org" + }, + { + "slug": "okta", + "name": "Okta", + "category": "Security", + "is_open_source": false, + "description": "The World's Identity Company, providing enterprise-grade IAM.", + "logo_url": "/logos/okta.svg", + "pricing_model": "Paid (User-based)", + "avg_monthly_cost": 6, + "alternatives": [ + "authentik", + "keycloak" + ], + "tags": [], + "website": "https://okta.com" + }, + { + "slug": "authentik", + "name": "Authentik", + "category": "Security", + "is_open_source": true, + "description": "The overall-best open-source identity provider, focused on flexibility and versatility.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=goauthentik.io", + "alternatives": [], + "tags": [], + "license": "MIT", + "website": "https://goauthentik.io" + }, + { + "slug": "s3", + "name": "Amazon S3", + "category": "Cloud Infrastructure", + "is_open_source": false, + "description": "Object storage built to retrieve any amount of data from anywhere.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=aws.amazon.com", + "pricing_model": "Paid (Usage-based)", + "avg_monthly_cost": 23, + "alternatives": [ + "minio" + ], + "tags": [], + "website": "https://aws.amazon.com/s3" + }, + { + "slug": "minio", + "name": "MinIO", + "category": "Cloud Infrastructure", + "is_open_source": true, + "description": "High-performance, S3-compatible object storage for AI and enterprise data.", + "logo_url": "/logos/minio.svg", + "alternatives": [], + "tags": [], + "license": "AGPLv3", + "website": "https://min.io" + }, + { + "slug": "zendesk", + "name": "Zendesk", + "category": "Support", + "is_open_source": false, + "description": "The leader in customer service and engagement software.", + "logo_url": "/logos/zendesk.svg", + "pricing_model": "Paid (Agent-based)", + "avg_monthly_cost": 19, + "alternatives": [ + "zammad" + ], + "tags": [], + "website": "https://www.zendesk.com" + }, + { + "slug": "zammad", + "name": "Zammad", + "category": "Support", + "is_open_source": true, + "description": "A web-based, open source helpdesk/customer support system with many features.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=zammad.org", + "alternatives": [], + "tags": [], + "license": "AGPLv3", + "website": "https://zammad.org" + }, + { + "slug": "workday", + "name": "Workday", + "category": "HR", + "is_open_source": false, + "description": "Enterprise management cloud for finance and human resources.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=workday.com", + "pricing_model": "Paid (Enterprise)", + "avg_monthly_cost": 45, + "alternatives": [ + "orangehrm" + ], + "tags": [], + "website": "https://www.workday.com" + }, + { + "slug": "orangehrm", + "name": "OrangeHRM", + "category": "HR", + "is_open_source": true, + "description": "The world's most popular open source human resource management software.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=orangehrm.com", + "alternatives": [], + "tags": [], + "license": "GPLv2", + "website": "https://www.orangehrm.com" + }, + { + "slug": "m365", + "name": "Microsoft 365", + "category": "Productivity", + "is_open_source": false, + "description": "The world's most popular office suite and cloud collaboration platform.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=office.com", + "pricing_model": "Paid (Subscription)", + "avg_monthly_cost": 12, + "alternatives": [ + "onlyoffice", + "nextcloud" + ], + "tags": [], + "website": "https://www.office.com" + }, + { + "slug": "onlyoffice", + "name": "ONLYOFFICE", + "category": "Productivity", + "is_open_source": true, + "description": "Powerful online document editors for text, spreadsheets, and presentations. Highly compatible with MS Office.", + "logo_url": "/logos/onlyoffice.svg", + "alternatives": [], + "tags": [], + "license": "AGPLv3", + "website": "https://www.onlyoffice.com" + }, + { + "slug": "shopify", + "name": "Shopify", + "category": "E-commerce", + "is_open_source": false, + "description": "Commerical platform that allows anyone to set up an online store.", + "logo_url": "/logos/shopify.svg", + "pricing_model": "Paid (Subscription)", + "avg_monthly_cost": 39, + "alternatives": [ + "medusa", + "saleor" + ], + "tags": [], + "website": "https://www.shopify.com" + }, + { + "slug": "medusa", + "name": "Medusa.js", + "category": "E-commerce", + "is_open_source": true, + "description": "The open-source alternative to Shopify. Building blocks for digital commerce.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=medusajs.com", + "alternatives": [], + "tags": [], + "license": "MIT", + "website": "https://medusajs.com" + }, + { + "slug": "docusign", + "name": "DocuSign", + "category": "Legal", + "is_open_source": false, + "description": "The world's #1 way to sign electronically on practically any device, from almost anywhere, at any time.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=docusign.com", + "pricing_model": "Paid (Envelope-based)", + "avg_monthly_cost": 25, + "alternatives": [ + "documenso" + ], + "tags": [], + "website": "https://www.docusign.com" + }, + { + "slug": "documenso", + "name": "Documenso", + "category": "Legal", + "is_open_source": true, + "description": "The open-source DocuSign alternative. We aim to be the world's most trusted document signing platform.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=documenso.com", + "alternatives": [], + "tags": [], + "license": "AGPL-3.0", + "website": "https://documenso.com" + }, + { + "slug": "mailchimp", + "name": "Mailchimp", + "category": "Marketing", + "is_open_source": false, + "description": "All-in-one marketing platform that helps you manage and talk to your clients, customers, and other interested parties.", + "logo_url": "/logos/mailchimp.svg", + "pricing_model": "Paid (Contact-based)", + "avg_monthly_cost": 13, + "alternatives": [ + "listmonk", + "mautic" + ], + "tags": [], + "website": "https://mailchimp.com" + }, + { + "slug": "listmonk", + "name": "Listmonk", + "category": "Marketing", + "is_open_source": true, + "description": "High performance, self-hosted newsletter and mailing list manager with a modern dashboard.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=listmonk.app", + "alternatives": [], + "tags": [], + "license": "AGPL-3.0", + "website": "https://listmonk.app" + }, + { + "slug": "mautic", + "name": "Mautic", + "category": "Marketing", + "is_open_source": true, + "description": "World's largest open source marketing automation project.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mautic.org", + "alternatives": [], + "tags": [], + "license": "GPL-3.0", + "website": "https://www.mautic.org" + }, + { + "slug": "statuspage", + "name": "Statuspage", + "category": "Monitoring", + "is_open_source": false, + "description": "The best way to communicate status and downtime to your customers.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=atlassian.com", + "pricing_model": "Paid (Atlassian)", + "avg_monthly_cost": 29, + "alternatives": [ + "uptime-kuma" + ], + "tags": [], + "website": "https://www.atlassian.com/software/statuspage" + }, + { + "slug": "uptime-kuma", + "name": "Uptime Kuma", + "category": "Monitoring", + "is_open_source": true, + "description": "A fancy self-hosted monitoring tool.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=uptime.kuma.pet", + "alternatives": [], + "tags": [], + "license": "MIT", + "website": "https://uptime.kuma.pet" + }, + { + "slug": "datadog", + "name": "Datadog", + "category": "Monitoring", + "is_open_source": false, + "description": "Modern monitoring and security that gives you full visibility into your applications and infrastructure.", + "logo_url": "/logos/datadog.svg", + "pricing_model": "Paid (Usage-based)", + "avg_monthly_cost": 23, + "alternatives": [ + "signoz" + ], + "tags": [], + "website": "https://www.datadoghq.com" + }, + { + "slug": "signoz", + "name": "SigNoz", + "category": "Monitoring", + "is_open_source": true, + "description": "Open source observability platform. SigNoz helps developers monitor applications and troubleshoot problems.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=signoz.io", + "alternatives": [], + "tags": [], + "license": "MIT", + "website": "https://signoz.io" + }, + { + "slug": "typeform", + "name": "Typeform", + "category": "Productivity", + "is_open_source": false, + "description": "Build beautiful, interactive forms, surveys, quizzes, and something else entirely.", + "logo_url": "/logos/typeform.svg", + "pricing_model": "Paid (Response-based)", + "avg_monthly_cost": 25, + "alternatives": [ + "tally" + ], + "tags": [], + "website": "https://www.typeform.com" + }, + { + "slug": "tally", + "name": "Tally", + "category": "Productivity", + "is_open_source": false, + "description": "The simplest way to create forms. Tally is a new type of form builder that works like a doc.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tally.so", + "pricing_model": "Free/Paid", + "alternatives": [], + "tags": [ + "Forms", + "Surveys", + "No-code" + ], + "website": "https://tally.so" + }, + { + "slug": "confluence", + "name": "Confluence", + "category": "Productivity", + "is_open_source": false, + "description": "Your remote-friendly team workspace where knowledge and collaboration meet.", + "logo_url": "/logos/confluence.svg", + "pricing_model": "Paid (Atlassian)", + "avg_monthly_cost": 10, + "alternatives": [ + "outline" + ], + "tags": [], + "website": "https://www.atlassian.com/software/confluence" + }, + { + "slug": "outline", + "name": "Outline", + "category": "Productivity", + "is_open_source": true, + "description": "Fast, collaborative, knowledge base for your team built using React and Markdown.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=getoutline.com", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://www.getoutline.com" + }, + { + "slug": "hootsuite", + "name": "Hootsuite", + "category": "Marketing", + "is_open_source": false, + "description": "Social media marketing and management dashboard.", + "logo_url": "/logos/hootsuite.svg", + "pricing_model": "Paid (Seat-based)", + "avg_monthly_cost": 49, + "alternatives": [ + "mixpost" + ], + "tags": [], + "website": "https://www.hootsuite.com" + }, + { + "slug": "mixpost", + "name": "Mixpost", + "category": "Marketing", + "is_open_source": true, + "description": "Self-hosted social media management software.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mixpost.app", + "alternatives": [], + "tags": [], + "license": "Other", + "website": "https://mixpost.app" + }, + { + "slug": "codespaces", + "name": "GitHub Codespaces", + "category": "DevOps", + "is_open_source": false, + "description": "Fast, cloud-hosted developer environments.", + "logo_url": "/logos/codespaces.svg", + "pricing_model": "Paid (Usage-based)", + "avg_monthly_cost": 15, + "alternatives": [ + "coder" + ], + "tags": [], + "website": "https://github.com/features/codespaces" + }, + { + "slug": "coder", + "name": "Coder", + "category": "DevOps", + "is_open_source": true, + "description": "Provision software development environments as code on your infrastructure.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coder.com", + "alternatives": [], + "tags": [], + "license": "AGPL-3.0", + "website": "https://coder.com" + }, + { + "slug": "quickbooks", + "name": "QuickBooks", + "category": "Financial", + "is_open_source": false, + "description": "Smart, simple online accounting software for small businesses.", + "logo_url": "/logos/quickbooks.svg", + "pricing_model": "Paid (Monthly Subscription)", + "avg_monthly_cost": 25, + "alternatives": [ + "akaunting", + "erpnext" + ], + "tags": [], + "website": "https://quickbooks.intuit.com" + }, + { + "slug": "akaunting", + "name": "Akaunting", + "category": "Financial", + "is_open_source": true, + "description": "Free and open source online accounting software for small businesses and freelancers.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=akaunting.com", + "alternatives": [], + "tags": [], + "license": "GPL-3.0", + "website": "https://akaunting.com" + }, + { + "slug": "premiere", + "name": "Adobe Premiere Pro", + "category": "Creative", + "is_open_source": false, + "description": "Industry-leading video editing software for film, TV, and the web.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=adobe.com", + "pricing_model": "Paid (Creative Cloud)", + "avg_monthly_cost": 35, + "alternatives": [ + "kdenlive" + ], + "tags": [], + "website": "https://www.adobe.com/products/premiere.html" + }, + { + "slug": "kdenlive", + "name": "Kdenlive", + "category": "Creative", + "is_open_source": true, + "description": "Open source video editing software based on the MLT Framework and KDE.", + "logo_url": "/logos/kdenlive.svg", + "alternatives": [], + "tags": [], + "license": "GPL-3.0", + "website": "https://kdenlive.org" + }, + { + "slug": "dashlane", + "name": "Dashlane", + "category": "Security", + "is_open_source": false, + "description": "Cloud-based password manager and digital wallet.", + "logo_url": "/logos/dashlane.svg", + "pricing_model": "Paid (Subscription)", + "avg_monthly_cost": 8, + "alternatives": [ + "vaultwarden", + "bitwarden" + ], + "tags": [], + "website": "https://www.dashlane.com" + }, + { + "slug": "vaultwarden", + "name": "Vaultwarden", + "category": "Security", + "is_open_source": true, + "description": "Unofficial Bitwarden compatible server written in Rust, formerly known as bitwarden_rs.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=bitwarden.com", + "alternatives": [], + "tags": [], + "license": "AGPL-3.0", + "website": "https://github.com/dani-garcia/vaultwarden" + }, + { + "slug": "pipedrive", + "name": "Pipedrive", + "category": "CRM", + "is_open_source": false, + "description": "Sales CRM & pipeline management software that helps you get more organized.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=pipedrive.com", + "pricing_model": "Paid (Seat-based)", + "avg_monthly_cost": 15, + "alternatives": [ + "twenty" + ], + "tags": [], + "website": "https://www.pipedrive.com" + }, + { + "slug": "twenty", + "name": "Twenty", + "category": "CRM", + "is_open_source": true, + "description": "A modern open-source CRM alternative to Salesforce and Pipedrive.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=twenty.com", + "alternatives": [], + "tags": [], + "license": "AGPL-3.0", + "website": "https://twenty.com" + }, + { + "slug": "sentry", + "name": "Sentry", + "category": "Monitoring", + "is_open_source": false, + "description": "Developer-first error tracking and performance monitoring.", + "logo_url": "/logos/sentry.svg", + "pricing_model": "Paid (Usage-based)", + "avg_monthly_cost": 26, + "alternatives": [ + "glitchtip" + ], + "tags": [], + "website": "https://sentry.io" + }, + { + "slug": "glitchtip", + "name": "GlitchTip", + "category": "Monitoring", + "is_open_source": true, + "description": "Open source error tracking that's compatible with Sentry SDKs.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=glitchtip.com", + "alternatives": [], + "tags": [], + "license": "MIT", + "website": "https://glitchtip.com" + }, + { + "slug": "calendly", + "name": "Calendly", + "category": "Productivity", + "is_open_source": false, + "description": "The modern scheduling platform that makes 'finding time' a breeze.", + "logo_url": "/logos/calendly.svg", + "pricing_model": "Paid (Seat-based)", + "avg_monthly_cost": 10, + "alternatives": [ + "calcom" + ], + "tags": [], + "website": "https://calendly.com" + }, + { + "slug": "calcom", + "name": "Cal.com", + "category": "Productivity", + "is_open_source": true, + "description": "The open-source Calendly alternative. Take control of your scheduling.", + "logo_url": "/logos/calcom.svg", + "alternatives": [], + "tags": [], + "license": "AGPL-3.0", + "website": "https://cal.com" + }, + { + "slug": "intercom", + "name": "Intercom", + "category": "Support", + "is_open_source": false, + "description": "The business messenger that builds real-time connections.", + "logo_url": "/logos/intercom.svg", + "pricing_model": "Paid (Seat-based)", + "avg_monthly_cost": 39, + "alternatives": [ + "chaskiq", + "chatwoot" + ], + "tags": [], + "website": "https://www.intercom.com" + }, + { + "slug": "chaskiq", + "name": "Chaskiq", + "category": "Support", + "is_open_source": true, + "description": "Open source conversational marketing platform alternative to Intercom and Drift.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=chaskiq.io", + "alternatives": [], + "tags": [], + "license": "GPL-3.0", + "website": "https://chaskiq.io" + }, + { + "slug": "mailgun", + "name": "Mailgun", + "category": "Marketing", + "is_open_source": false, + "description": "Electronic mail delivery service for developers.", + "logo_url": "/logos/mailgun.svg", + "pricing_model": "Paid (Usage-based)", + "avg_monthly_cost": 15, + "alternatives": [ + "postal" + ], + "tags": [], + "website": "https://www.mailgun.com" + }, + { + "slug": "postal", + "name": "Postal", + "category": "Marketing", + "is_open_source": true, + "description": "A fully featured open source mail delivery platform for incoming & outgoing e-mail.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=postalserver.io", + "alternatives": [], + "tags": [], + "license": "MIT", + "website": "https://postalserver.io" + }, + { + "slug": "segment", + "name": "Segment", + "category": "Marketing", + "is_open_source": false, + "description": "The leading customer data platform (CDP).", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=segment.com", + "pricing_model": "Paid (Usage-based)", + "avg_monthly_cost": 120, + "alternatives": [ + "jitsu" + ], + "tags": [], + "website": "https://segment.com" + }, + { + "slug": "jitsu", + "name": "Jitsu", + "category": "Marketing", + "is_open_source": true, + "description": "High-performance data collection platform and open-source Segment alternative.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jitsu.com", + "alternatives": [], + "tags": [], + "license": "MIT", + "website": "https://jitsu.com" + }, + { + "slug": "dokku", + "name": "Dokku", + "category": "DevOps", + "is_open_source": true, + "description": "A docker-powered PaaS that helps you build and manage the lifecycle of applications", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=dokku.com", + "alternatives": [], + "tags": [], + "license": "MIT License", + "website": "https://dokku.com" + }, + { + "slug": "chatgpt", + "name": "ChatGPT / OpenAI", + "category": "AI Models", + "is_open_source": false, + "description": "The leading commercial AI assistant and API platform (GPT-4o, o1).", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openai.com", + "pricing_model": "Paid/Freemium", + "avg_monthly_cost": 20, + "alternatives": [ + "llama", + "deepseek", + "mistral" + ], + "tags": [ + "AI", + "LLM", + "Chat" + ], + "website": "https://openai.com" + }, + { + "slug": "llama", + "name": "Meta Llama 3.1", + "category": "AI Models", + "is_open_source": true, + "description": "Meta's flagship open-weight model with 128K context. Supports 8B, 70B, and 405B parameters.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=meta.com", + "alternatives": [], + "tags": [ + "AI", + "LLM", + "128K Context" + ], + "license": "Llama 3.1 Community License", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 128000, + "parameters_total_b": 405, + "is_multimodal": false + }, + "website": "https://llama.meta.com" + }, + { + "slug": "deepseek", + "name": "DeepSeek-V3 / R1", + "category": "AI Models", + "is_open_source": true, + "description": "Powerful open-source models including V3 (671B) and R1 (Reasoning). Rivals GPT-4o and o1.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=deepseek.com", + "alternatives": [], + "tags": [ + "AI", + "LLM", + "Reasoning" + ], + "license": "MIT License", + "ai_metadata": { + "vram_inference_gb": 160, + "context_window_tokens": 128000, + "parameters_total_b": 671, + "parameters_active_b": 37, + "is_multimodal": false + }, + "website": "https://deepseek.com" + }, + { + "slug": "mistral", + "name": "Mistral Large 2", + "category": "AI Models", + "is_open_source": true, + "description": "Flagship 123B model from Mistral AI. Optimized for multilingual, reasoning, and coding tasks.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mistral.ai", + "alternatives": [], + "tags": [ + "AI", + "LLM", + "EU" + ], + "license": "Mistral Research License", + "ai_metadata": { + "vram_inference_gb": 80, + "context_window_tokens": 128000, + "parameters_total_b": 123, + "is_multimodal": false + }, + "website": "https://mistral.ai" + }, + { + "slug": "gemma", + "name": "Google Gemma 2", + "category": "AI Models", + "is_open_source": true, + "description": "Google's open-weight models (9B, 27B) with class-leading performance and efficient architecture.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=google.com", + "alternatives": [], + "tags": [ + "AI", + "LLM", + "Google" + ], + "license": "Gemma License", + "ai_metadata": { + "vram_inference_gb": 18, + "context_window_tokens": 8192, + "parameters_total_b": 27, + "is_multimodal": false + }, + "website": "https://ai.google.dev/gemma" + }, + { + "slug": "qwen", + "name": "Qwen 2.5", + "category": "AI Models", + "is_open_source": true, + "description": "Comprehensive LLM series from Alibaba Cloud, excelling in coding, math, and multilingual support.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=alibaba.com", + "alternatives": [], + "tags": [ + "AI", + "LLM", + "Coding" + ], + "license": "Apache License 2.0", + "ai_metadata": { + "vram_inference_gb": 40, + "context_window_tokens": 128000, + "parameters_total_b": 72, + "is_multimodal": false + }, + "website": "https://qwenlm.github.io" + }, + { + "slug": "midjourney", + "name": "Midjourney", + "category": "AI Image Generation", + "is_open_source": false, + "description": "Leading AI image generation tool, known for artistic and photorealistic outputs.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=midjourney.com", + "pricing_model": "Paid (Subscription)", + "avg_monthly_cost": 10, + "alternatives": [ + "stable-diffusion", + "flux" + ], + "tags": [ + "AI", + "Image", + "Art" + ], + "website": "https://midjourney.com" + }, + { + "slug": "stable-diffusion", + "name": "Stable Diffusion 3.5", + "category": "AI Image Generation", + "is_open_source": true, + "description": "The latest open-weights image generation model from Stability AI, offering superior prompt adherence.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=stability.ai", + "alternatives": [], + "tags": [ + "AI", + "Image", + "Prompt Adherence" + ], + "license": "Stability Community License", + "website": "https://stability.ai" + }, + { + "slug": "mochi-1", + "name": "Mochi-1", + "category": "AI Video Generation", + "is_open_source": true, + "description": "High-fidelity open-weights video generation model from Genmo, rivaling closed-source alternatives.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=genmo.ai", + "alternatives": [], + "tags": [ + "AI", + "Video", + "Motion" + ], + "license": "Apache License 2.0", + "website": "https://www.genmo.ai" + }, + { + "slug": "hunyuan-video", + "name": "HunyuanVideo 1.5", + "category": "AI Video Generation", + "is_open_source": true, + "description": "Tencent's state-of-the-art open-source video generation model with 13B parameters.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tencent.com", + "alternatives": [], + "tags": [ + "AI", + "Video", + "HD" + ], + "license": "Apache License 2.0", + "website": "https://github.com/Tencent/HunyuanVideo" + }, + { + "slug": "flux", + "name": "FLUX", + "category": "AI Image Generation", + "is_open_source": true, + "description": "Next-gen open image generation model from Black Forest Labs. State-of-the-art quality rivaling Midjourney.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=blackforestlabs.ai", + "alternatives": [], + "tags": [ + "AI", + "Image", + "New" + ], + "license": "Apache License 2.0", + "website": "https://blackforestlabs.ai" + }, + { + "slug": "github-copilot", + "name": "GitHub Copilot", + "category": "AI Coding", + "is_open_source": false, + "description": "AI pair programmer by GitHub/OpenAI. Integrates into VS Code and JetBrains.", + "logo_url": "/logos/github-copilot.svg", + "pricing_model": "Paid (Subscription)", + "avg_monthly_cost": 10, + "alternatives": [ + "continue-dev", + "tabby" + ], + "tags": [ + "AI", + "Coding", + "IDE" + ], + "website": "https://github.com/features/copilot" + }, + { + "slug": "continue-dev", + "name": "Continue", + "category": "AI Coding", + "is_open_source": true, + "description": "Open-source AI code assistant for VS Code and JetBrains. Use any model (local or API).", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=continue.dev", + "alternatives": [], + "tags": [ + "AI", + "Coding", + "IDE" + ], + "license": "Apache License 2.0", + "website": "https://continue.dev" + }, + { + "slug": "tabby", + "name": "TabbyML", + "category": "AI Coding", + "is_open_source": true, + "description": "Self-hosted AI coding assistant. An open-source, self-hosted alternative to GitHub Copilot.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tabby.tabbyml.com", + "alternatives": [], + "tags": [ + "AI", + "Coding", + "Self-Hosted" + ], + "license": "Apache License 2.0", + "website": "https://tabby.tabbyml.com" + }, + { + "slug": "ollama", + "name": "Ollama", + "category": "AI Runners", + "is_open_source": true, + "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models locally.", + "logo_url": "/logos/ollama.svg", + "alternatives": [], + "tags": [ + "AI", + "Local", + "Runner" + ], + "license": "MIT License", + "website": "https://ollama.com" + }, + { + "slug": "open-webui", + "name": "Open WebUI", + "category": "AI Interfaces", + "is_open_source": true, + "description": "User-friendly WebUI for LLMs (Formerly Ollama WebUI). Supports Ollama and OpenAI-compatible APIs.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openwebui.com", + "alternatives": [], + "tags": [ + "AI", + "UI", + "Chat" + ], + "license": "MIT License", + "website": "https://openwebui.com" + }, + { + "slug": "jan", + "name": "Jan", + "category": "AI Interfaces", + "is_open_source": true, + "description": "Jan is an open source alternative to ChatGPT that runs 100% offline on your computer.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jan.ai", + "alternatives": [], + "tags": [ + "AI", + "Desktop", + "Offline" + ], + "license": "AGPL-3.0", + "website": "https://jan.ai" + }, + { + "slug": "lm-studio", + "name": "LM Studio", + "category": "AI Runners", + "is_open_source": false, + "description": "Discover, download, and run local LLMs. Easy GUI for GGUF models.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=lmstudio.ai", + "pricing_model": "Free (Proprietary)", + "alternatives": [ + "ollama", + "gpt4all" + ], + "tags": [ + "AI", + "Desktop", + "GUI" + ], + "website": "https://lmstudio.ai" + }, + { + "slug": "gpt4all", + "name": "GPT4All", + "category": "AI Runners", + "is_open_source": true, + "description": "Run open-source LLMs locally on your CPU and GPU. No internet required.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=gpt4all.io", + "alternatives": [], + "tags": [ + "AI", + "Desktop", + "CPU" + ], + "license": "Apache License 2.0", + "website": "https://gpt4all.io" + }, + { + "slug": "localai", + "name": "LocalAI", + "category": "AI Runners", + "is_open_source": true, + "description": "The specific build of LocalAI, the free, Open Source OpenAI alternative. Drop-in replacement for OpenAI API.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=localai.io", + "alternatives": [], + "tags": [ + "AI", + "API", + "Backend" + ], + "license": "MIT License", + "website": "https://localai.io" + }, + { + "slug": "flowise", + "name": "Flowise", + "category": "AI Tools", + "is_open_source": true, + "description": "Drag & drop UI to build your customized LLM flow using LangChainJS.", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=flowiseai.com", + "alternatives": [], + "tags": [ + "AI", + "Low-Code", + "LangChain" + ], + "license": "Apache License 2.0", + "website": "https://flowiseai.com" + } +] \ No newline at end of file diff --git a/data/tools.json b/data/tools.json new file mode 100644 index 0000000..ca9a71b --- /dev/null +++ b/data/tools.json @@ -0,0 +1,18080 @@ +[ + { + "slug": "firebase", + "name": "Firebase", + "category": "Backend as a Service", + "is_open_source": false, + "pricing_model": "Paid/Freemium", + "website": "https://firebase.google.com", + "description": "Google's app development platform.", + "alternatives": [ + "supabase", + "appwrite", + "pocketbase" + ], + "tags": [ + "Cloud", + "Database", + "Auth" + ], + "logo_url": "/logos/firebase.svg", + "avg_monthly_cost": 25, + "pros": [ + "Seamless Google ecosystem integration", + "Generous free tier (Spark plan)", + "Real-time database out of the box", + "Excellent mobile SDK support", + "Cloud Functions for serverless logic" + ], + "cons": [ + "Vendor lock-in to Google", + "Pricing can spike unpredictably at scale", + "Limited query capabilities vs SQL" + ] + }, + { + "slug": "supabase", + "name": "Supabase", + "category": "Backend as a Service", + "is_open_source": true, + "github_repo": "supabase/supabase", + "stars": 97401, + "website": "https://supabase.com", + "description": "The Postgres development platform. Supabase gives you a dedicated Postgres database to build your web, mobile, and AI applications.", + "pros": [ + "Postgres under the hood", + "No vendor lock-in" + ], + "cons": [ + "Self-hosting can be complex" + ], + "last_commit": "2026-02-09T16:09:10Z", + "language": "TypeScript", + "license": "Apache License 2.0", + "tags": [ + "Database", + "Realtime", + "Postgres", + "AI" + ], + "logo_url": "/logos/supabase.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/supabase" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "appwrite", + "name": "Appwrite", + "category": "Backend as a Service", + "is_open_source": true, + "github_repo": "appwrite/appwrite", + "stars": 54727, + "website": "https://appwrite.io", + "description": "Appwrite® - complete cloud infrastructure for your web, mobile and AI apps. Including Auth, Databases, Storage, Functions, Messaging, Hosting, Realtime and more", + "pros": [ + "Self-hosted with a single Docker command", + "Modular architecture — use only what you need" + ], + "cons": [ + "Smaller ecosystem than Firebase or Supabase", + "Limited built-in analytics and reporting" + ], + "last_commit": "2026-02-09T16:12:32Z", + "language": "TypeScript", + "license": "BSD 3-Clause \"New\" or \"Revised\" License", + "tags": [ + "Database", + "Auth", + "Self-Hosted" + ], + "logo_url": "/logos/appwrite.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/appwrite" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "pocketbase", + "name": "PocketBase", + "category": "Backend as a Service", + "is_open_source": true, + "github_repo": "pocketbase/pocketbase", + "website": "https://pocketbase.io", + "description": "Open Source realtime backend in 1 file", + "pros": [ + "Ships as a single binary — no dependencies", + "Deploy anywhere in seconds with zero config", + "Embedded SQLite with realtime subscriptions" + ], + "cons": [ + "SQLite only (for now)" + ], + "stars": 55980, + "last_commit": "2026-02-01T08:09:48Z", + "language": "Go", + "license": "MIT License", + "logo_url": "/logos/pocketbase.svg", + "deployment": { + "image": "pocketbase/pocketbase:latest", + "port": 8090, + "volumes": [ + "./pb_data:/pb/pb_data" + ], + "command": "serve --http=0.0.0.0:8090", + "type": "docker-compose", + "local_path": "./.docker-deploy/pocketbase" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "salesforce", + "name": "Salesforce", + "category": "CRM", + "is_open_source": false, + "pricing_model": "Paid", + "avg_monthly_cost": 25, + "website": "https://salesforce.com", + "description": "The world's #1 CRM.", + "alternatives": [ + "odoo", + "erpnext" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=salesforce.com", + "pros": [ + "Industry-leading CRM platform", + "Massive app marketplace (AppExchange)", + "Highly customizable workflows", + "Enterprise-grade security and compliance" + ], + "cons": [ + "Expensive per-seat licensing", + "Steep learning curve", + "Heavy and complex for small teams" + ] + }, + { + "slug": "slack", + "name": "Slack", + "category": "Communication", + "is_open_source": false, + "pricing_model": "Paid/Freemium", + "website": "https://slack.com", + "description": "Team communication platform.", + "alternatives": [ + "mattermost", + "rocketchat" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=slack.com", + "avg_monthly_cost": 12, + "pros": [ + "Best-in-class team communication UX", + "Huge integration ecosystem (2,000+ apps)", + "Powerful search across conversations", + "Thread-based discussions reduce noise" + ], + "cons": [ + "Expensive at scale ($8.75+/user/mo)", + "Can become a constant distraction", + "Message history limits on free plan" + ] + }, + { + "slug": "mattermost", + "name": "Mattermost", + "category": "Communication", + "is_open_source": true, + "github_repo": "mattermost/mattermost", + "website": "https://mattermost.com", + "description": "Mattermost is an open source platform for secure collaboration across the entire software development lifecycle..", + "pros": [ + "Enterprise-grade security with SOC2 and HIPAA compliance", + "Granular access control and audit logging", + "Slack-compatible webhook and bot ecosystem" + ], + "cons": [ + "Self-hosting maintenance" + ], + "stars": 35213, + "last_commit": "2026-02-09T16:03:54Z", + "language": "TypeScript", + "license": "Other", + "logo_url": "/logos/mattermost.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/mattermost" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "rocketchat", + "name": "Rocket.Chat", + "category": "Communication", + "is_open_source": true, + "github_repo": "RocketChat/Rocket.Chat", + "website": "https://rocket.chat", + "description": "The Secure CommsOS™ for mission-critical operations", + "pros": [ + "Unified inbox with omnichannel support for live chat, email, and social", + "Highly customizable with white-labeling options", + "End-to-end encrypted messaging available" + ], + "cons": [ + "Resource intensive" + ], + "stars": 44546, + "last_commit": "2026-02-09T16:20:40Z", + "language": "TypeScript", + "license": "Other", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=rocket.chat", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/rocketchat" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "jira", + "name": "Jira", + "category": "Project Management", + "is_open_source": false, + "pricing_model": "Paid", + "avg_monthly_cost": 15, + "website": "https://www.atlassian.com/software/jira", + "description": "Issue tracking and project management tool.", + "alternatives": [ + "plane", + "taiga" + ], + "logo_url": "/logos/jira.svg", + "pros": [ + "Industry standard for project management", + "Deep Agile/Scrum/Kanban support", + "Powerful custom workflows and automation", + "Extensive integration ecosystem" + ], + "cons": [ + "Notoriously complex UI", + "Slow performance with large projects", + "Expensive for growing teams" + ] + }, + { + "slug": "plane", + "name": "Plane", + "category": "Project Management", + "is_open_source": true, + "github_repo": "makeplane/plane", + "website": "https://plane.so", + "description": "🔥🔥🔥 Open-source Jira, Linear, Monday, and ClickUp alternative. Plane is a modern project management platform to manage tasks, sprints, docs, and triage.", + "pros": [ + "Clean, modern interface inspired by Linear", + "Blazing fast — sub-100ms interactions", + "Built-in cycles, modules, and views" + ], + "cons": [ + "Still relatively new" + ], + "stars": 45490, + "last_commit": "2026-02-09T13:56:47Z", + "language": "TypeScript", + "license": "GNU Affero General Public License v3.0", + "logo_url": "/logos/plane.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/plane" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "taiga", + "name": "Taiga", + "category": "Project Management", + "is_open_source": true, + "github_repo": "taigaio/taiga-back", + "website": "https://taiga.io", + "description": null, + "pros": [ + "Beautiful, kanban and scrum boards with drag-and-drop", + "Full Agile toolkit: epics, sprints, user stories", + "Built-in wiki and project documentation" + ], + "cons": [ + "Complex setup" + ], + "stars": 807, + "last_commit": "2026-01-09T07:28:59Z", + "language": "Python", + "license": "Mozilla Public License 2.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=taiga.io", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/taiga" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "zoom", + "name": "Zoom", + "category": "Communication", + "is_open_source": false, + "pricing_model": "Paid/Freemium", + "avg_monthly_cost": 15, + "website": "https://zoom.us", + "description": "Video conferencing platform, cloud phone, webinars, and chat.", + "alternatives": [ + "jitsi-meet" + ], + "logo_url": "/logos/zoom.svg", + "pros": [ + "Reliable video quality even on poor connections", + "Easy to join without creating an account", + "Breakout rooms and webinar support for large events", + "Cross-platform with desktop, mobile, and web apps" + ], + "cons": [ + "Free plan limited to 40-minute meetings", + "Privacy concerns and past security issues", + "Zoom fatigue is real" + ] + }, + { + "slug": "jitsi-meet", + "name": "Jitsi Meet", + "category": "Communication", + "is_open_source": true, + "github_repo": "jitsi/jitsi-meet", + "website": "https://jitsi.org", + "description": "Jitsi Meet - Secure, Simple and Scalable Video Conferences that you use as a standalone app or embed in your web application.", + "pros": [ + "Join calls without creating an account", + "End-to-end encrypted video conferencing", + "Scales to hundreds of participants with Jitsi Videobridge" + ], + "cons": [ + "Performance on large calls" + ], + "stars": 28562, + "last_commit": "2026-02-09T12:49:10Z", + "language": "TypeScript", + "license": "Apache License 2.0", + "logo_url": "/logos/jitsi-meet.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/jitsi-meet" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "photoshop", + "name": "Adobe Photoshop", + "category": "Design", + "is_open_source": false, + "pricing_model": "Paid (Monthly)", + "avg_monthly_cost": 60, + "website": "https://www.adobe.com/products/photoshop.html", + "description": "Industry standard image editing software.", + "alternatives": [ + "gimp", + "krita" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.adobe.com", + "pros": [ + "Industry gold standard for image editing", + "Unmatched feature depth and precision", + "Huge plugin and template ecosystem", + "AI-powered generative fill and selection" + ], + "cons": [ + "Subscription-only pricing ($22.99/mo)", + "Steep learning curve for beginners", + "Resource-heavy — needs powerful hardware" + ] + }, + { + "slug": "gimp", + "name": "GIMP", + "category": "Design", + "is_open_source": true, + "github_repo": "GNOME/gimp", + "website": "https://www.gimp.org", + "description": "Read-only mirror of https://gitlab.gnome.org/GNOME/gimp", + "pros": [ + "Professional-grade photo editing tools rivaling Photoshop", + "Extensible with Python and Script-Fu plugins", + "Cross-platform with native support for PSD, TIFF, and RAW" + ], + "cons": [ + "Steep learning curve", + "Dated UI" + ], + "stars": 5960, + "last_commit": "2026-02-09T16:20:25Z", + "language": "C", + "license": "Other", + "logo_url": "/logos/gimp.svg", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "krita", + "name": "Krita", + "category": "Design", + "is_open_source": true, + "github_repo": "KDE/krita", + "website": "https://krita.org", + "description": "Krita is a free and open source cross-platform application that offers an end-to-end solution for creating digital art files from scratch built on the KDE and Qt frameworks.", + "pros": [ + "Modern brush engine with 100+ built-in presets", + "HDR painting and animation timeline support", + "Optimized for drawing tablets with pressure sensitivity" + ], + "cons": [ + "Less focused on photo manipulation" + ], + "stars": 9333, + "last_commit": "2026-02-09T13:47:56Z", + "language": "C++", + "license": "GNU General Public License v3.0", + "logo_url": "/logos/krita.svg", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "figma", + "name": "Figma", + "category": "Design", + "is_open_source": false, + "pricing_model": "Freemium/Paid", + "website": "https://www.figma.com", + "description": "Collaborative interface design tool.", + "alternatives": [ + "penpot" + ], + "logo_url": "/logos/figma.svg", + "avg_monthly_cost": 15, + "pros": [ + "Real-time multiplayer collaboration", + "Runs entirely in the browser", + "Excellent component and design system support", + "Free tier is generous for individuals" + ], + "cons": [ + "Owned by Adobe (future pricing concerns)", + "Offline support is limited", + "Performance with very large files can lag" + ] + }, + { + "slug": "penpot", + "name": "Penpot", + "category": "Design", + "is_open_source": true, + "github_repo": "penpot/penpot", + "website": "https://penpot.app", + "description": "Penpot: The open-source design tool for design and code collaboration", + "pros": [ + "Runs entirely in the browser — no desktop app needed", + "SVG-native design — exports are pixel-perfect at any scale", + "Real-time multiplayer collaboration" + ], + "cons": [ + "Newer ecosystem" + ], + "stars": 44155, + "last_commit": "2026-02-09T15:47:35Z", + "language": "Clojure", + "license": "Mozilla Public License 2.0", + "logo_url": "/logos/penpot.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/penpot" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "notion", + "name": "Notion", + "category": "Productivity", + "is_open_source": false, + "pricing_model": "Freemium/Paid", + "website": "https://www.notion.so", + "description": "All-in-one workspace.", + "alternatives": [ + "appflowy", + "affine" + ], + "logo_url": "/logos/notion.svg", + "avg_monthly_cost": 10, + "pros": [ + "All-in-one workspace (docs, wikis, databases)", + "Beautiful and intuitive interface", + "Powerful database views and relations", + "Great template gallery" + ], + "cons": [ + "Can be slow with large workspaces", + "Offline mode is unreliable", + "No true end-to-end encryption" + ] + }, + { + "slug": "appflowy", + "name": "AppFlowy", + "category": "Productivity", + "is_open_source": true, + "github_repo": "AppFlowy-IO/AppFlowy", + "website": "https://www.appflowy.io", + "description": "Bring projects, wikis, and teams together with AI. AppFlowy is the AI collaborative workspace where you achieve more without losing control of your data. The leading open source Notion alternative.", + "pros": [ + "Local-first architecture — your data never leaves your machine", + "Privacy-focused alternative to Notion", + "Built in Rust for native desktop performance" + ], + "cons": [ + "No web version (yet)" + ], + "stars": 68006, + "last_commit": "2026-01-28T09:20:38Z", + "language": "Dart", + "license": "GNU Affero General Public License v3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.appflowy.io", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/appflowy" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "affine", + "name": "AFFiNE", + "category": "Productivity", + "is_open_source": true, + "github_repo": "toeverything/AFFiNE", + "website": "https://affine.pro", + "description": "There can be more than Notion and Miro. AFFiNE(pronounced [ə‘fain]) is a next-gen knowledge base that brings planning, sorting and creating all together. Privacy first, open-source, customizable and ready to use. ", + "pros": [ + "Modern block editor with Notion-like feel", + "Spatial canvas for whiteboarding and visual thinking", + "Hybrid local-first and cloud sync architecture" + ], + "cons": [ + "Still in beta" + ], + "stars": 62693, + "last_commit": "2026-02-09T11:16:50Z", + "language": "TypeScript", + "license": "Other", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=affine.pro", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/affine" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "google-analytics", + "name": "Google Analytics", + "category": "Analytics", + "is_open_source": false, + "pricing_model": "Free/Paid", + "website": "https://analytics.google.com", + "description": "Web analytics service.", + "alternatives": [ + "plausible", + "posthog", + "matomo" + ], + "logo_url": "/logos/google-analytics.svg", + "avg_monthly_cost": 150, + "pros": [ + "Industry-standard reporting with Google Ads and Search Console integration", + "Advanced audience segmentation and cohort analysis", + "Free tier handles up to 10M hits per month" + ], + "cons": [ + "Privacy concerns — data goes to Google", + "GA4 migration frustrated many users", + "Blocked by most ad blockers", + "Complex for beginners" + ] + }, + { + "slug": "plausible", + "name": "Plausible", + "category": "Analytics", + "is_open_source": true, + "github_repo": "plausible/analytics", + "website": "https://plausible.io", + "description": "Simple, open source, lightweight and privacy-friendly web analytics alternative to Google Analytics.", + "pros": [ + "Fully GDPR compliant with no cookies required", + "Lightweight script under 1KB — zero impact on page speed", + "Clean dashboard that shows what matters, nothing more" + ], + "cons": [ + "Limited advanced features" + ], + "stars": 24198, + "last_commit": "2026-02-09T16:20:52Z", + "language": "Elixir", + "license": "GNU Affero General Public License v3.0", + "tags": [ + "Analytics", + "Privacy", + "GDPR" + ], + "logo_url": "/logos/plausible.svg", + "deployment": { + "image": "plausible/analytics:latest", + "port": 8000, + "env": [ + { + "key": "BASE_URL", + "value": "http://localhost:8000" + }, + { + "key": "SECRET_KEY_BASE", + "value": "REPLACE_WITH_RANDOM_STRING" + } + ], + "volumes": [ + "./plausible_db:/var/lib/clickhouse", + "./plausible_events:/var/lib/postgresql/data" + ], + "type": "docker-compose", + "local_path": "./.docker-deploy/plausible" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "posthog", + "name": "PostHog", + "category": "Analytics", + "is_open_source": true, + "github_repo": "PostHog/posthog", + "website": "https://posthog.com", + "description": "🦔 PostHog is an all-in-one developer platform for building successful products. We offer product analytics, web analytics, session replay, error tracking, feature flags, experimentation, surveys, data warehouse, a CDP, and an AI product assistant to help debug your code, ship features faster, and keep all your usage and customer data in one stack.", + "pros": [ + "Session recording with heatmaps and click tracking", + "Built-in feature flags, A/B testing, and surveys", + "Warehouse-native — query your data with SQL" + ], + "cons": [ + "Complex to self-host" + ], + "stars": 31181, + "last_commit": "2026-02-09T16:25:10Z", + "language": "Python", + "license": "Other", + "logo_url": "/logos/posthog.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/posthog" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "matomo", + "name": "Matomo", + "category": "Analytics", + "is_open_source": true, + "github_repo": "matomo-org/matomo", + "website": "https://matomo.org", + "description": "Empowering People Ethically 🚀 — Matomo is hiring! Join us → https://matomo.org/jobs Matomo is the leading open-source alternative to Google Analytics, giving you complete control and built-in privacy. Easily collect, visualise, and analyse data from websites & apps. Star us on GitHub ⭐️ – Pull Requests welcome! ", + "pros": [ + "Feature-rich analytics rivaling Google Analytics", + "GDPR and CCPA compliant out of the box", + "Heatmaps, session recordings, and funnel analysis included" + ], + "cons": [ + "UI feels dated" + ], + "stars": 21270, + "last_commit": "2026-02-09T15:36:30Z", + "language": "PHP", + "license": "GNU General Public License v3.0", + "logo_url": "/logos/matomo.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/matomo" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "1password", + "name": "1Password", + "category": "Security", + "is_open_source": false, + "pricing_model": "Paid", + "website": "https://1password.com", + "description": "Password manager.", + "alternatives": [ + "bitwarden", + "keepassxc" + ], + "logo_url": "/logos/1password.svg", + "avg_monthly_cost": 8, + "pros": [ + "Excellent cross-platform support", + "Travel Mode hides sensitive vaults", + "Watchtower alerts for compromised passwords", + "Family and team sharing built in" + ], + "cons": [ + "No free tier ($2.99/mo minimum)", + "Cloud-only — no self-hosting option", + "Subscription model with no lifetime option" + ] + }, + { + "slug": "bitwarden", + "name": "Bitwarden", + "category": "Security", + "is_open_source": true, + "github_repo": "bitwarden/server", + "website": "https://bitwarden.com", + "description": "Bitwarden infrastructure/backend (API, database, Docker, etc).", + "pros": [ + "Independently audited security with full transparency reports", + "Cross-platform apps for every OS, browser, and device", + "Organization vaults with fine-grained sharing controls" + ], + "cons": [ + "UI is functional but basic" + ], + "stars": 18027, + "last_commit": "2026-02-09T15:52:04Z", + "language": "C#", + "license": "Other", + "logo_url": "/logos/bitwarden.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/bitwarden" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "keepassxc", + "name": "KeePassXC", + "category": "Security", + "is_open_source": true, + "github_repo": "keepassxreboot/keepassxc", + "website": "https://keepassxc.org", + "description": "KeePassXC is a cross-platform community-driven port of the Windows application “KeePass Password Safe”.", + "pros": [ + "Fully offline — database stored locally with AES-256 encryption", + "No cloud dependency — you control the sync method", + "Browser integration via KeePassXC-Browser extension" + ], + "cons": [ + "No automatic sync (requires Dropbox/Syncthing)" + ], + "stars": 25810, + "last_commit": "2026-01-18T15:46:48Z", + "language": "C++", + "license": "Other", + "logo_url": "/logos/keepassxc.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/keepassxc" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "heroku", + "name": "Heroku", + "category": "DevOps", + "is_open_source": false, + "pricing_model": "Paid", + "avg_monthly_cost": 8, + "website": "https://heroku.com", + "description": "Platform as a service.", + "alternatives": [ + "coolify", + "dokku" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=heroku.com", + "pros": [ + "Dead-simple deployment (git push)", + "Great for prototypes and MVPs", + "Managed Postgres included", + "Add-ons marketplace for common services" + ], + "cons": [ + "Eliminated free tier in 2022", + "Expensive at scale vs VPS", + "Limited container customization", + "Owned by Salesforce (less innovation)" + ] + }, + { + "slug": "coolify", + "name": "Coolify", + "category": "DevOps", + "is_open_source": true, + "github_repo": "coollabsio/coolify", + "website": "https://coolify.io", + "description": "An open-source, self-hostable PaaS alternative to Vercel, Heroku & Netlify that lets you easily deploy static sites, databases, full-stack applications and 280+ one-click services on your own servers.", + "pros": [ + "Polished, beautiful dashboard that rivals Vercel and Netlify", + "Deploy anything — Docker, static sites, databases, services", + "Automatic SSL, backups, and monitoring included" + ], + "cons": [ + "One-man project (mostly)" + ], + "stars": 50421, + "last_commit": "2026-02-09T16:01:12Z", + "language": "PHP", + "license": "Apache License 2.0", + "tags": [ + "DevOps", + "PaaS", + "Self-Hosted" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coolify.io", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/coolify" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "sap", + "name": "SAP S/4HANA", + "category": "ERP", + "is_open_source": false, + "pricing_model": "Paid (Enterprise)", + "avg_monthly_cost": 100, + "website": "https://www.sap.com", + "description": "The world leader in enterprise resource planning software.", + "alternatives": [ + "odoo", + "erpnext" + ], + "logo_url": "/logos/sap.svg", + "pros": [ + "Enterprise ERP market leader", + "Handles massive organizational complexity", + "Deep industry-specific solutions", + "Strong compliance and audit trails" + ], + "cons": [ + "Extremely expensive to implement", + "Implementation takes months to years", + "Requires specialized consultants", + "Overkill for SMBs" + ] + }, + { + "slug": "odoo", + "name": "Odoo", + "category": "ERP", + "is_open_source": true, + "github_repo": "odoo/odoo", + "stars": 48919, + "website": "https://www.odoo.com", + "description": "A suite of open source business apps: CRM, eCommerce, accounting, manufacturing, warehouse, and more.", + "pros": [ + "All-in-one suite covering CRM, HR, inventory, and accounting", + "Modular app marketplace with 30,000+ extensions", + "Dual licensing — Community (free) and Enterprise" + ], + "cons": [ + "Can be complex to customize", + "Enterprise features are paid" + ], + "last_commit": "2026-02-09T16:18:46Z", + "language": "Python", + "license": "LGPL-3.0", + "logo_url": "/logos/odoo.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/odoo" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "erpnext", + "name": "ERPNext", + "category": "ERP", + "is_open_source": true, + "github_repo": "frappe/erpnext", + "website": "https://erpnext.com", + "description": "A free and open-source integrated Enterprise Resource Planning (ERP) software.", + "pros": [ + "Fully open source", + "No licensing fees" + ], + "cons": [ + "Steep learning curve" + ], + "stars": 31635, + "last_commit": "2026-02-09T15:52:29Z", + "language": "Python", + "license": "GNU General Public License v3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=erpnext.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/erpnext" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "autocad", + "name": "AutoCAD", + "category": "CAD", + "is_open_source": false, + "pricing_model": "Paid (Subscription)", + "website": "https://www.autodesk.com/products/autocad", + "description": "Professional computer-aided design (CAD) and drafting software.", + "alternatives": [ + "librecad", + "freecad" + ], + "logo_url": "/logos/autocad.svg", + "avg_monthly_cost": 75, + "pros": [ + "Industry standard for CAD/engineering", + "Precise 2D and 3D modeling", + "Extensive library of tools and templates", + "Strong file format compatibility" + ], + "cons": [ + "Expensive subscription ($1,975/yr)", + "Steep learning curve", + "Resource-intensive — needs workstation hardware" + ] + }, + { + "slug": "librecad", + "name": "LibreCAD", + "category": "CAD", + "is_open_source": true, + "github_repo": "LibreCAD/LibreCAD", + "stars": 6500, + "website": "https://librecad.org", + "description": "A mature, feature-rich 2D CAD application with a loyal user community.", + "pros": [ + "Purpose-built lightweight 2D CAD application", + "Native DXF support for industry-standard file exchange", + "Cross-platform with minimal system requirements" + ], + "cons": [ + "2D only" + ], + "last_commit": "2026-02-05T10:00:00Z", + "language": "C++", + "license": "GPLv2", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=librecad.org", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "freecad", + "name": "FreeCAD", + "category": "CAD", + "is_open_source": true, + "github_repo": "FreeCAD/FreeCAD", + "stars": 21000, + "website": "https://www.freecad.org", + "description": "A general-purpose parametric 3D CAD modeler and a BIM software application.", + "pros": [ + "Full parametric 3D modeling with constraint-based sketcher", + "Extensible 3D capabilities for mechanical engineering, architecture, and BIM", + "Python scripting and macro system for automation" + ], + "cons": [ + "UI learning curve" + ], + "last_commit": "2026-02-08T14:00:00Z", + "language": "C++", + "license": "LGPLv2+", + "logo_url": "/logos/freecad.svg", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "zapier", + "name": "Zapier", + "category": "Automation", + "is_open_source": false, + "pricing_model": "Paid (Task-based)", + "website": "https://zapier.com", + "description": "The pioneer in workflow automation for everyone.", + "alternatives": [ + "n8n", + "activepieces" + ], + "logo_url": "/logos/zapier.svg", + "avg_monthly_cost": 20, + "pros": [ + "Connect 6,000+ apps without code", + "Easy visual workflow builder", + "Reliable trigger-based automation", + "Good for non-technical users" + ], + "cons": [ + "Gets expensive fast (per-task pricing)", + "Limited logic and branching on lower tiers", + "5-minute polling delay on some triggers" + ] + }, + { + "slug": "n8n", + "name": "n8n", + "category": "Automation", + "is_open_source": true, + "github_repo": "n8n-io/n8n", + "stars": 49000, + "website": "https://n8n.io", + "description": "Fair-code workflow automation tool. Easily automate tasks across different services.", + "pros": [ + "Self-hosted workflow automation with 400+ integrations", + "Visual node-based editor for complex multi-step workflows", + "JavaScript/Python code nodes for custom logic" + ], + "cons": [ + "Requires hosting knowledge" + ], + "last_commit": "2026-02-09T15:00:00Z", + "language": "TypeScript", + "license": "Sustainable Use License", + "logo_url": "/logos/n8n.svg", + "deployment": { + "image": "n8nio/n8n", + "port": 5678, + "env": [ + { + "key": "N8N_BASIC_AUTH_ACTIVE", + "value": "true" + }, + { + "key": "N8N_BASIC_AUTH_USER", + "value": "admin" + }, + { + "key": "N8N_BASIC_AUTH_PASSWORD", + "value": "password" + } + ], + "volumes": [ + "./n8n_data:/home/node/.n8n" + ], + "type": "docker-compose", + "local_path": "./.docker-deploy/n8n" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "activepieces", + "name": "Activepieces", + "category": "Automation", + "is_open_source": true, + "github_repo": "activepieces/activepieces", + "stars": 11000, + "website": "https://www.activepieces.com", + "description": "Open source alternative to Zapier. Automate your work with 200+ apps.", + "pros": [ + "Beginner-friendly UI with a low learning curve", + "Open-source and self-hostable with Docker", + "Growing library of community-built connectors" + ], + "cons": [ + "Smaller connector library than Zapier" + ], + "last_commit": "2026-02-09T16:00:00Z", + "language": "TypeScript", + "license": "MIT", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=activepieces.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/activepieces" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "tableau", + "name": "Tableau", + "category": "Analytics", + "is_open_source": false, + "pricing_model": "Paid (Seat-based)", + "avg_monthly_cost": 70, + "website": "https://www.tableau.com", + "description": "Powerful data visualization and business intelligence platform.", + "alternatives": [ + "metabase", + "superset" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tableau.com", + "pros": [ + "Best-in-class data visualization", + "Drag-and-drop dashboard creation", + "Handles massive datasets well", + "Strong community and learning resources" + ], + "cons": [ + "Expensive licensing ($70+/user/mo)", + "Requires a data warehouse setup", + "Desktop app feels dated" + ] + }, + { + "slug": "metabase", + "name": "Metabase", + "category": "Analytics", + "is_open_source": true, + "github_repo": "metabase/metabase", + "stars": 38000, + "website": "https://www.metabase.com", + "description": "The simplest, fastest way to get business intelligence and analytics throughout your company.", + "pros": [ + "Extremely user friendly", + "Easy query builder" + ], + "cons": [ + "Advanced visualizations can be limited" + ], + "last_commit": "2026-02-09T14:30:00Z", + "language": "Clojure", + "license": "AGPLv3", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=metabase.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/metabase" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "superset", + "name": "Apache Superset", + "category": "Analytics", + "is_open_source": true, + "github_repo": "apache/superset", + "stars": 59000, + "website": "https://superset.apache.org", + "description": "Enterprise-ready business intelligence web application.", + "pros": [ + "Scaling to petabytes", + "Huge variety of charts" + ], + "cons": [ + "Complex configuration" + ], + "last_commit": "2026-02-09T12:00:00Z", + "language": "Python", + "license": "Apache 2.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=superset.apache.org", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/superset" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "auth0", + "name": "Auth0", + "category": "Security", + "is_open_source": false, + "pricing_model": "Paid (MAU-based)", + "website": "https://auth0.com", + "description": "The leading authentication and authorization platform.", + "alternatives": [ + "keycloak", + "authentik" + ], + "logo_url": "/logos/auth0.svg", + "avg_monthly_cost": 23, + "pros": [ + "Feature-rich authentication platform", + "Social login, MFA, and SSO out of the box", + "Extensive SDK support across languages", + "Rules and hooks for custom auth logic" + ], + "cons": [ + "Pricing jumps sharply after free tier", + "Can be complex to configure properly", + "Owned by Okta — consolidation concerns" + ] + }, + { + "slug": "keycloak", + "name": "Keycloak", + "category": "Security", + "is_open_source": true, + "github_repo": "keycloak/keycloak", + "stars": 23000, + "website": "https://www.keycloak.org", + "description": "Open source identity and access management for modern applications and services.", + "pros": [ + "Enterprise-standard identity provider supporting SAML and OIDC", + "Federated identity with social login and LDAP integration", + "Battle-tested by Red Hat in production environments" + ], + "cons": [ + "UI can be clunky", + "Heavy resource usage" + ], + "last_commit": "2026-02-09T16:30:00Z", + "language": "Java", + "license": "Apache 2.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=keycloak.org", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/keycloak" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "okta", + "name": "Okta", + "category": "Security", + "is_open_source": false, + "pricing_model": "Paid (User-based)", + "website": "https://okta.com", + "description": "The World's Identity Company, providing enterprise-grade IAM.", + "alternatives": [ + "authentik", + "keycloak" + ], + "logo_url": "/logos/okta.svg", + "avg_monthly_cost": 6, + "pros": [ + "Enterprise SSO and identity management leader", + "Strong security and compliance certifications", + "Universal directory for user management", + "Extensive pre-built integrations" + ], + "cons": [ + "Very expensive for small teams", + "Admin interface has a learning curve", + "Overkill for simple auth needs" + ] + }, + { + "slug": "authentik", + "name": "Authentik", + "category": "Security", + "is_open_source": true, + "github_repo": "goauthentik/authentik", + "stars": 15000, + "website": "https://goauthentik.io", + "description": "The overall-best open-source identity provider, focused on flexibility and versatility.", + "pros": [ + "Modern, intuitive admin interface with drag-and-drop flows", + "Easy customization of login pages and branding", + "Supports SAML, OAuth2, LDAP proxy, and SCIM" + ], + "cons": [ + "Smaller community than Keycloak" + ], + "last_commit": "2026-02-09T17:00:00Z", + "language": "Python", + "license": "MIT", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=goauthentik.io", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/authentik" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "s3", + "name": "Amazon S3", + "category": "Cloud Infrastructure", + "is_open_source": false, + "pricing_model": "Paid (Usage-based)", + "website": "https://aws.amazon.com/s3", + "description": "Object storage built to retrieve any amount of data from anywhere.", + "alternatives": [ + "minio" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=aws.amazon.com", + "avg_monthly_cost": 23, + "pros": [ + "99.999999999% durability (11 nines)", + "Scales to virtually unlimited storage", + "Pay only for what you use", + "Industry standard — everything integrates with it" + ], + "cons": [ + "Egress costs can surprise you", + "Complex IAM/bucket policy configuration", + "Vendor lock-in to AWS ecosystem" + ] + }, + { + "slug": "minio", + "name": "MinIO", + "category": "Cloud Infrastructure", + "is_open_source": true, + "github_repo": "minio/minio", + "stars": 45000, + "website": "https://min.io", + "description": "High-performance, S3-compatible object storage for AI and enterprise data.", + "pros": [ + "S3-compatible API — drop-in replacement for AWS S3", + "Extremely fast object storage optimized for AI/ML workloads", + "Kubernetes-native with operator support" + ], + "cons": [ + "AGPL license can be strict" + ], + "last_commit": "2026-02-09T14:00:00Z", + "language": "Go", + "license": "AGPLv3", + "logo_url": "/logos/minio.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/minio" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "zendesk", + "name": "Zendesk", + "category": "Support", + "is_open_source": false, + "pricing_model": "Paid (Agent-based)", + "avg_monthly_cost": 19, + "website": "https://www.zendesk.com", + "description": "The leader in customer service and engagement software.", + "alternatives": [ + "zammad" + ], + "logo_url": "/logos/zendesk.svg", + "pros": [ + "Comprehensive customer support platform", + "Omnichannel support (email, chat, phone)", + "Powerful ticket management and routing", + "Large marketplace of integrations" + ], + "cons": [ + "Expensive per-agent pricing", + "UI can feel bloated and slow", + "Basic plans lack important features" + ] + }, + { + "slug": "zammad", + "name": "Zammad", + "category": "Support", + "is_open_source": true, + "github_repo": "zammad/zammad", + "stars": 5000, + "website": "https://zammad.org", + "description": "A web-based, open source helpdesk/customer support system with many features.", + "pros": [ + "Omnichannel helpdesk with email, phone, chat, and social media", + "Full-text search with Elasticsearch integration", + "Customizable ticket workflows and SLA management" + ], + "cons": [ + "Ruby hosting can be tricky" + ], + "last_commit": "2026-02-09T11:00:00Z", + "language": "Ruby", + "license": "AGPLv3", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=zammad.org", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/zammad" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "workday", + "name": "Workday", + "category": "HR", + "is_open_source": false, + "pricing_model": "Paid (Enterprise)", + "avg_monthly_cost": 45, + "website": "https://www.workday.com", + "description": "Enterprise management cloud for finance and human resources.", + "alternatives": [ + "orangehrm" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=workday.com", + "pros": [ + "Leading cloud HR and finance platform", + "Strong workforce analytics", + "Regular feature updates included", + "Built for enterprise compliance" + ], + "cons": [ + "Extremely expensive to implement", + "Long implementation timelines", + "Complex for smaller organizations" + ] + }, + { + "slug": "orangehrm", + "name": "OrangeHRM", + "category": "HR", + "is_open_source": true, + "github_repo": "orangehrm/orangehrm", + "stars": 1200, + "website": "https://www.orangehrm.com", + "description": "The world's most popular open source human resource management software.", + "pros": [ + "Comprehensive HR suite covering recruitment, leave, and performance", + "Highly customizable with module-based architecture", + "Employee self-service portal for time-off and expenses" + ], + "cons": [ + "UI feels a bit dated", + "Enterprise features are paid" + ], + "last_commit": "2026-02-09T10:00:00Z", + "language": "PHP", + "license": "GPLv2", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=orangehrm.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/orangehrm" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "m365", + "name": "Microsoft 365", + "category": "Productivity", + "is_open_source": false, + "pricing_model": "Paid (Subscription)", + "website": "https://www.office.com", + "description": "The world's most popular office suite and cloud collaboration platform.", + "alternatives": [ + "onlyoffice" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=office.com", + "avg_monthly_cost": 12, + "pros": [ + "Full productivity suite (Word, Excel, Teams)", + "Deep enterprise integration", + "1TB OneDrive storage included", + "Regular AI feature updates (Copilot)" + ], + "cons": [ + "Subscription fatigue — perpetual payments", + "Teams can be resource-heavy", + "Complex licensing tiers" + ] + }, + { + "slug": "onlyoffice", + "name": "ONLYOFFICE", + "category": "Productivity", + "is_open_source": true, + "github_repo": "ONLYOFFICE/DocumentServer", + "stars": 11000, + "website": "https://www.onlyoffice.com", + "description": "Powerful online document editors for text, spreadsheets, and presentations. Highly compatible with MS Office.", + "pros": [ + "Full-featured collaborative editing for docs, sheets, and slides", + "Drop-in MS Office compatibility with high-fidelity rendering", + "Self-hosted integration with Nextcloud, Seafile, and more" + ], + "cons": [ + "Self-hosting can be complex" + ], + "last_commit": "2026-02-09T15:30:00Z", + "language": "JavaScript", + "license": "AGPLv3", + "logo_url": "/logos/onlyoffice.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/onlyoffice" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "shopify", + "name": "Shopify", + "category": "E-commerce", + "is_open_source": false, + "pricing_model": "Paid (Subscription)", + "website": "https://www.shopify.com", + "description": "Commerical platform that allows anyone to set up an online store.", + "alternatives": [ + "medusa" + ], + "logo_url": "/logos/shopify.svg", + "avg_monthly_cost": 39, + "pros": [ + "Easiest way to start selling online", + "Beautiful themes and fast checkout", + "Apps for almost any e-commerce need", + "Handles payments, shipping, and taxes" + ], + "cons": [ + "Transaction fees unless using Shopify Payments", + "Monthly costs add up with apps", + "Limited customization vs self-hosted solutions" + ] + }, + { + "slug": "medusa", + "name": "Medusa.js", + "category": "E-commerce", + "is_open_source": true, + "github_repo": "medusajs/medusa", + "stars": 24000, + "website": "https://medusajs.com", + "description": "The open-source alternative to Shopify. Building blocks for digital commerce.", + "pros": [ + "Headless commerce with extreme flexibility for custom storefronts", + "Plugin architecture for payments, fulfillment, and CMS", + "Multi-region and multi-currency support built in" + ], + "cons": [ + "Requires developer knowledge" + ], + "last_commit": "2026-02-09T16:45:00Z", + "language": "TypeScript", + "license": "MIT", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=medusajs.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/medusa" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "docusign", + "name": "DocuSign", + "category": "Legal", + "is_open_source": false, + "pricing_model": "Paid (Envelope-based)", + "website": "https://www.docusign.com", + "description": "The world's #1 way to sign electronically on practically any device, from almost anywhere, at any time.", + "alternatives": [ + "documenso" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=docusign.com", + "avg_monthly_cost": 25, + "pros": [ + "Industry standard for e-signatures", + "Legally binding in most countries", + "Workflow automation for document routing", + "Strong mobile experience" + ], + "cons": [ + "Expensive for occasional use", + "UI feels dated compared to competitors", + "Limited free tier" + ] + }, + { + "slug": "documenso", + "name": "Documenso", + "category": "Legal", + "is_open_source": true, + "github_repo": "documenso/documenso", + "stars": 8000, + "website": "https://documenso.com", + "description": "The open-source DocuSign alternative. We aim to be the world's most trusted document signing platform.", + "pros": [ + "Self-hosted digital signatures with full audit trail", + "Developer-friendly API and webhook integration", + "Beautiful, modern signing experience" + ], + "cons": [ + "Newer ecosystem" + ], + "last_commit": "2026-02-10T09:00:00Z", + "language": "TypeScript", + "license": "AGPL-3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=documenso.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/documenso" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "mailchimp", + "name": "Mailchimp", + "category": "Marketing", + "is_open_source": false, + "pricing_model": "Paid (Contact-based)", + "website": "https://mailchimp.com", + "description": "All-in-one marketing platform that helps you manage and talk to your clients, customers, and other interested parties.", + "alternatives": [ + "listmonk", + "mautic" + ], + "logo_url": "/logos/mailchimp.svg", + "avg_monthly_cost": 13, + "pros": [ + "Beginner-friendly email marketing", + "Good free tier for small lists", + "Built-in landing page builder", + "Detailed campaign analytics" + ], + "cons": [ + "Pricing increases steeply with list size", + "Owned by Intuit (less indie-friendly)", + "Template editor is limiting" + ] + }, + { + "slug": "listmonk", + "name": "Listmonk", + "category": "Marketing", + "is_open_source": true, + "github_repo": "knadh/listmonk", + "stars": 19000, + "website": "https://listmonk.app", + "description": "High performance, self-hosted newsletter and mailing list manager with a modern dashboard.", + "pros": [ + "Handles millions of subscribers with blazing fast performance", + "Templating engine with rich media and personalization", + "Manages bounces, unsubscribes, and analytics automatically" + ], + "cons": [ + "No built-in sending (needs SMTP/SES)" + ], + "last_commit": "2026-02-05T12:00:00Z", + "language": "Go", + "license": "AGPL-3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=listmonk.app", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/listmonk" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "mautic", + "name": "Mautic", + "category": "Marketing", + "is_open_source": true, + "github_repo": "mautic/mautic", + "stars": 7000, + "website": "https://www.mautic.org", + "description": "World's largest open source marketing automation project.", + "pros": [ + "Full marketing automation with CRM-grade contact management", + "Visual campaign builder with multi-channel triggers", + "Email, SMS, and social media campaign support" + ], + "cons": [ + "Complex setup and maintenance" + ], + "last_commit": "2026-02-09T18:00:00Z", + "language": "PHP", + "license": "GPL-3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mautic.org", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/mautic" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "statuspage", + "name": "Statuspage", + "category": "Monitoring", + "is_open_source": false, + "pricing_model": "Paid (Atlassian)", + "website": "https://www.atlassian.com/software/statuspage", + "description": "The best way to communicate status and downtime to your customers.", + "alternatives": [ + "uptime-kuma" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=atlassian.com", + "avg_monthly_cost": 29, + "pros": [ + "Clean, professional status pages", + "Integrated incident management", + "Email/SMS subscriber notifications", + "Atlassian ecosystem integration" + ], + "cons": [ + "Expensive for what it does ($29+/mo)", + "Limited customization options", + "Overkill if you just need a simple status page" + ] + }, + { + "slug": "uptime-kuma", + "name": "Uptime Kuma", + "category": "Monitoring", + "is_open_source": true, + "github_repo": "louislam/uptime-kuma", + "stars": 55000, + "website": "https://uptime.kuma.pet", + "description": "A fancy self-hosted monitoring tool.", + "pros": [ + "Beautiful, real-time monitoring dashboard", + "Multi-protocol support: HTTP, TCP, DNS, Docker, and more", + "Notification integrations with 90+ services including Slack, Discord, and Telegram" + ], + "cons": [ + "Self-hosted only (usually)" + ], + "last_commit": "2026-02-10T08:00:00Z", + "language": "JavaScript", + "license": "MIT", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=uptime.kuma.pet", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/uptime-kuma" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "datadog", + "name": "Datadog", + "category": "Monitoring", + "is_open_source": false, + "pricing_model": "Paid (Usage-based)", + "website": "https://www.datadoghq.com", + "description": "Modern monitoring and security that gives you full visibility into your applications and infrastructure.", + "alternatives": [ + "signoz" + ], + "logo_url": "/logos/datadog.svg", + "avg_monthly_cost": 23, + "pros": [ + "Comprehensive observability platform", + "APM, logs, metrics in one place", + "Excellent dashboards and alerting", + "Supports 750+ integrations" + ], + "cons": [ + "Notoriously expensive at scale", + "Complex pricing model (per host, per GB)", + "Can become a significant budget item" + ] + }, + { + "slug": "signoz", + "name": "SigNoz", + "category": "Monitoring", + "is_open_source": true, + "github_repo": "signoz/signoz", + "stars": 18000, + "website": "https://signoz.io", + "description": "Open source observability platform. SigNoz helps developers monitor applications and troubleshoot problems.", + "pros": [ + "Unified metrics, traces, and logs in a single platform", + "OpenTelemetry native — no proprietary agents required", + "ClickHouse-powered for fast queries at scale" + ], + "cons": [ + "High resource usage (ClickHouse)" + ], + "last_commit": "2026-02-09T20:00:00Z", + "language": "Go", + "license": "MIT", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=signoz.io", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/signoz" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "typeform", + "name": "Typeform", + "category": "Productivity", + "is_open_source": false, + "pricing_model": "Paid (Response-based)", + "website": "https://www.typeform.com", + "description": "Build beautiful, interactive forms, surveys, quizzes, and something else entirely.", + "alternatives": [ + "tally" + ], + "logo_url": "/logos/typeform.svg", + "avg_monthly_cost": 25, + "pros": [ + "Beautiful, conversational form experience", + "High completion rates vs traditional forms", + "Logic jumps and conditional flows", + "Great integrations (Zapier, webhooks)" + ], + "cons": [ + "Expensive for the response limits", + "Limited free tier (10 responses/mo)", + "Not ideal for complex multi-page forms" + ] + }, + { + "slug": "tally", + "name": "Tally", + "category": "Productivity", + "is_open_source": false, + "is_free_tier_generous": true, + "pricing_model": "Free/Paid", + "website": "https://tally.so", + "description": "The simplest way to create forms. Tally is a new type of form builder that works like a doc.", + "pros": [ + "Notion-like form building experience with no-code simplicity", + "Unlimited forms and responses on the free tier", + "Conditional logic, hidden fields, and payment collection" + ], + "cons": [ + "Wait, it's not open source (but highly OS-friendly community)" + ], + "tags": [ + "Forms", + "Surveys", + "No-code" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tally.so" + }, + { + "slug": "confluence", + "name": "Confluence", + "category": "Productivity", + "is_open_source": false, + "pricing_model": "Paid (Atlassian)", + "website": "https://www.atlassian.com/software/confluence", + "description": "Your remote-friendly team workspace where knowledge and collaboration meet.", + "alternatives": [ + "outline" + ], + "logo_url": "/logos/confluence.svg", + "avg_monthly_cost": 10, + "pros": [ + "Deep Jira integration for dev teams", + "Structured knowledge base with spaces", + "Templates for common documentation", + "Permissions and access control" + ], + "cons": [ + "Slow and bloated interface", + "Search is frustratingly poor", + "Editing experience lags behind Notion" + ] + }, + { + "slug": "outline", + "name": "Outline", + "category": "Productivity", + "is_open_source": true, + "github_repo": "outline/outline", + "stars": 24000, + "website": "https://www.getoutline.com", + "description": "Fast, collaborative, knowledge base for your team built using React and Markdown.", + "pros": [ + "Sub-second search across all documents", + "Beautifully designed editor with Markdown shortcuts", + "Integrates with Slack, Figma, and 20+ tools out of the box" + ], + "cons": [ + "Hard to self-host (complex storage requirements)" + ], + "last_commit": "2026-02-10T12:00:00Z", + "language": "TypeScript", + "license": "Other", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=getoutline.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/outline" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "hootsuite", + "name": "Hootsuite", + "category": "Marketing", + "is_open_source": false, + "pricing_model": "Paid (Seat-based)", + "website": "https://www.hootsuite.com", + "description": "Social media marketing and management dashboard.", + "alternatives": [ + "mixpost" + ], + "logo_url": "/logos/hootsuite.svg", + "avg_monthly_cost": 49, + "pros": [ + "Manage multiple social accounts in one place", + "Post scheduling across platforms", + "Team collaboration and approval workflows", + "Analytics and reporting dashboard" + ], + "cons": [ + "Expensive plans ($99+/mo)", + "UI feels cluttered and dated", + "Free plan was eliminated" + ] + }, + { + "slug": "mixpost", + "name": "Mixpost", + "category": "Marketing", + "is_open_source": true, + "github_repo": "inovector/mixpost", + "stars": 3000, + "website": "https://mixpost.app", + "description": "Self-hosted social media management software.", + "pros": [ + "Own your data", + "No monthly subscription" + ], + "cons": [ + "Newer, fewer social connectors" + ], + "last_commit": "2026-02-01T15:00:00Z", + "language": "PHP", + "license": "Other", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mixpost.app", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/mixpost" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "codespaces", + "name": "GitHub Codespaces", + "category": "DevOps", + "is_open_source": false, + "pricing_model": "Paid (Usage-based)", + "website": "https://github.com/features/codespaces", + "description": "Fast, cloud-hosted developer environments.", + "alternatives": [ + "coder" + ], + "logo_url": "/logos/codespaces.svg", + "avg_monthly_cost": 15, + "pros": [ + "Full VS Code in the browser", + "Pre-configured dev environments", + "Instant onboarding for new contributors", + "Deep GitHub integration" + ], + "cons": [ + "Usage-based pricing adds up", + "Requires stable internet connection", + "Limited GPU/compute options" + ] + }, + { + "slug": "coder", + "name": "Coder", + "category": "DevOps", + "is_open_source": true, + "github_repo": "coder/coder", + "stars": 20000, + "website": "https://coder.com", + "description": "Provision software development environments as code on your infrastructure.", + "pros": [ + "Run dev environments on any infrastructure — cloud, on-prem, or hybrid", + "Self-hosted remote development with VS Code and JetBrains support", + "Ephemeral workspaces with Terraform-based provisioning" + ], + "cons": [ + "Requires K8s or Terraform knowledge" + ], + "last_commit": "2026-02-09T22:00:00Z", + "language": "Go", + "license": "AGPL-3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coder.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/coder" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "quickbooks", + "name": "QuickBooks", + "category": "Financial", + "is_open_source": false, + "pricing_model": "Paid (Monthly Subscription)", + "website": "https://quickbooks.intuit.com", + "description": "Smart, simple online accounting software for small businesses.", + "alternatives": [ + "akaunting", + "erpnext" + ], + "logo_url": "/logos/quickbooks.svg", + "avg_monthly_cost": 25, + "pros": [ + "Industry standard for small business accounting", + "Easy invoicing and expense tracking", + "Bank feed integration", + "Tax preparation features" + ], + "cons": [ + "Subscription pricing keeps increasing", + "Performance issues with large files", + "Limited multi-currency support" + ] + }, + { + "slug": "akaunting", + "name": "Akaunting", + "category": "Financial", + "is_open_source": true, + "github_repo": "akaunting/akaunting", + "stars": 12000, + "website": "https://akaunting.com", + "description": "Free and open source online accounting software for small businesses and freelancers.", + "pros": [ + "Modular app store", + "Multilingual and multicurrency" + ], + "cons": [ + "Some essential apps are paid" + ], + "last_commit": "2026-02-08T14:00:00Z", + "language": "PHP", + "license": "GPL-3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=akaunting.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/akaunting" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "premiere", + "name": "Adobe Premiere Pro", + "category": "Creative", + "is_open_source": false, + "pricing_model": "Paid (Creative Cloud)", + "website": "https://www.adobe.com/products/premiere.html", + "description": "Industry-leading video editing software for film, TV, and the web.", + "alternatives": [ + "kdenlive" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=adobe.com", + "avg_monthly_cost": 35, + "pros": [ + "Professional-grade video editing", + "Excellent integration with After Effects", + "Industry standard in film and media", + "AI-powered features (scene detection, auto-reframe)" + ], + "cons": [ + "Subscription-only ($22.99/mo)", + "Resource-intensive — needs powerful hardware", + "Steep learning curve" + ] + }, + { + "slug": "kdenlive", + "name": "Kdenlive", + "category": "Creative", + "is_open_source": true, + "github_repo": "KDE/kdenlive", + "stars": 3500, + "website": "https://kdenlive.org", + "description": "Open source video editing software based on the MLT Framework and KDE.", + "pros": [ + "Truly free forever", + "Powerful multi-track editing" + ], + "cons": [ + "UI can be intimidating for beginners" + ], + "last_commit": "2026-02-10T11:00:00Z", + "language": "C++", + "license": "GPL-3.0", + "logo_url": "/logos/kdenlive.svg", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "dashlane", + "name": "Dashlane", + "category": "Security", + "is_open_source": false, + "pricing_model": "Paid (Subscription)", + "website": "https://www.dashlane.com", + "description": "Cloud-based password manager and digital wallet.", + "alternatives": [ + "vaultwarden", + "bitwarden" + ], + "logo_url": "/logos/dashlane.svg", + "avg_monthly_cost": 8, + "pros": [ + "Clean, intuitive interface", + "Built-in VPN on premium plans", + "Dark web monitoring alerts", + "Secure sharing for teams" + ], + "cons": [ + "More expensive than competitors", + "Free tier limited to 25 passwords", + "Desktop app was discontinued" + ] + }, + { + "slug": "vaultwarden", + "name": "Vaultwarden", + "category": "Security", + "is_open_source": true, + "github_repo": "dani-garcia/vaultwarden", + "stars": 32000, + "website": "https://github.com/dani-garcia/vaultwarden", + "description": "Unofficial Bitwarden compatible server written in Rust, formerly known as bitwarden_rs.", + "pros": [ + "Full Bitwarden API compatibility in a lightweight Rust binary", + "Runs on 50MB of RAM — perfect for Raspberry Pi or small VPS", + "Supports organizations, attachments, and Bitwarden Send" + ], + "cons": [ + "Third-party implementation (not security audited officially)" + ], + "last_commit": "2026-02-09T10:00:00Z", + "language": "Rust", + "license": "AGPL-3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=bitwarden.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/vaultwarden" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "pipedrive", + "name": "Pipedrive", + "category": "CRM", + "is_open_source": false, + "pricing_model": "Paid (Seat-based)", + "website": "https://www.pipedrive.com", + "description": "Sales CRM & pipeline management software that helps you get more organized.", + "alternatives": [ + "twenty" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=pipedrive.com", + "avg_monthly_cost": 15, + "pros": [ + "Simple, visual sales pipeline", + "Easy to set up and use", + "Good automation for follow-ups", + "Affordable entry-level pricing" + ], + "cons": [ + "Limited features vs Salesforce", + "Reporting could be more powerful", + "No free tier" + ] + }, + { + "slug": "twenty", + "name": "Twenty", + "category": "CRM", + "is_open_source": true, + "github_repo": "twentyhq/twenty", + "stars": 15000, + "website": "https://twenty.com", + "description": "A modern open-source CRM alternative to Salesforce and Pipedrive.", + "pros": [ + "Clean, Notion-like interface for CRM workflows", + "Deeply customizable data models and views", + "GraphQL API for flexible integrations" + ], + "cons": [ + "Still in early development" + ], + "last_commit": "2026-02-10T14:00:00Z", + "language": "TypeScript", + "license": "AGPL-3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=twenty.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/twenty" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "sentry", + "name": "Sentry", + "category": "Monitoring", + "is_open_source": false, + "pricing_model": "Paid (Usage-based)", + "website": "https://sentry.io", + "description": "Developer-first error tracking and performance monitoring.", + "alternatives": [ + "glitchtip" + ], + "logo_url": "/logos/sentry.svg", + "avg_monthly_cost": 26, + "pros": [ + "Best-in-class error tracking", + "Stack traces with source maps", + "Performance monitoring built in", + "Supports 100+ platforms and languages" + ], + "cons": [ + "Can be noisy without proper filtering", + "Pricing based on error volume", + "Self-hosting is complex" + ] + }, + { + "slug": "glitchtip", + "name": "GlitchTip", + "category": "Monitoring", + "is_open_source": true, + "github_repo": "glitchtip/glitchtip", + "stars": 3000, + "website": "https://glitchtip.com", + "description": "Open source error tracking that's compatible with Sentry SDKs.", + "pros": [ + "Sentry-compatible error tracking that simplifies self-hosting", + "Lightweight alternative requiring minimal server resources", + "Performance monitoring with transaction tracking" + ], + "cons": [ + "Less polished UI than Sentry" + ], + "last_commit": "2026-02-05T09:00:00Z", + "language": "Python", + "license": "MIT", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=glitchtip.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/glitchtip" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "calendly", + "name": "Calendly", + "category": "Productivity", + "is_open_source": false, + "pricing_model": "Paid (Seat-based)", + "website": "https://calendly.com", + "description": "The modern scheduling platform that makes 'finding time' a breeze.", + "alternatives": [ + "calcom" + ], + "logo_url": "/logos/calendly.svg", + "avg_monthly_cost": 10, + "pros": [ + "Frictionless scheduling experience", + "Integrates with Google/Outlook calendars", + "Team scheduling and round-robin", + "Customizable booking pages" + ], + "cons": [ + "Free plan limited to one event type", + "Premium features locked behind $10+/mo", + "Branding on free tier" + ] + }, + { + "slug": "calcom", + "name": "Cal.com", + "category": "Productivity", + "is_open_source": true, + "github_repo": "calcom/cal.com", + "stars": 30000, + "website": "https://cal.com", + "description": "The open-source Calendly alternative. Take control of your scheduling.", + "pros": [ + "Self-hosted scheduling — no data leaves your server", + "Deeply extensible with a plugin architecture and API", + "Round-robin, collective, and managed event types" + ], + "cons": [ + "Can be overkill for simple use cases" + ], + "last_commit": "2026-02-10T07:00:00Z", + "language": "TypeScript", + "license": "AGPL-3.0", + "logo_url": "/logos/calcom.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/calcom" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "intercom", + "name": "Intercom", + "category": "Support", + "is_open_source": false, + "pricing_model": "Paid (Seat-based)", + "website": "https://www.intercom.com", + "description": "The business messenger that builds real-time connections.", + "alternatives": [ + "chaskiq" + ], + "logo_url": "/logos/intercom.svg", + "avg_monthly_cost": 39, + "pros": [ + "Best-in-class live chat and messaging", + "AI chatbot (Fin) handles common questions", + "Product tours and onboarding flows", + "Unified inbox for support" + ], + "cons": [ + "Very expensive ($74+/mo starting)", + "Pricing model is complex and confusing", + "Can be overkill for small teams" + ] + }, + { + "slug": "chaskiq", + "name": "Chaskiq", + "category": "Support", + "is_open_source": true, + "github_repo": "chaskiq/chaskiq", + "stars": 4000, + "website": "https://chaskiq.io", + "description": "Open source conversational marketing platform alternative to Intercom and Drift.", + "pros": [ + "Self-hosted customer messaging that replaces Intercom", + "Bot automation with visual workflow builder", + "Multi-channel support including web chat, email, and WhatsApp" + ], + "cons": [ + "Smaller community than Chatwoot" + ], + "last_commit": "2026-01-28T12:00:00Z", + "language": "Ruby", + "license": "GPL-3.0", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=chaskiq.io", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/chaskiq" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "mailgun", + "name": "Mailgun", + "category": "Marketing", + "is_open_source": false, + "pricing_model": "Paid (Usage-based)", + "website": "https://www.mailgun.com", + "description": "Electronic mail delivery service for developers.", + "alternatives": [ + "postal" + ], + "logo_url": "/logos/mailgun.svg", + "avg_monthly_cost": 15, + "pros": [ + "Reliable transactional email delivery", + "Powerful email API and SMTP relay", + "Detailed delivery analytics", + "Good documentation" + ], + "cons": [ + "No visual email builder", + "Pricing increased significantly", + "Support quality has declined" + ] + }, + { + "slug": "postal", + "name": "Postal", + "category": "Marketing", + "is_open_source": true, + "github_repo": "postalserver/postal", + "stars": 15000, + "website": "https://postalserver.io", + "description": "A fully featured open source mail delivery platform for incoming & outgoing e-mail.", + "pros": [ + "High-performance mail delivery server built for throughput", + "Detailed delivery tracking with click and open analytics", + "IP pool management and DKIM/SPF configuration" + ], + "cons": [ + "Extremely complex to manage delivery (IP warm-up)" + ], + "last_commit": "2026-02-09T13:00:00Z", + "language": "Ruby", + "license": "MIT", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=postalserver.io", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/postal" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "segment", + "name": "Segment", + "category": "Marketing", + "is_open_source": false, + "pricing_model": "Paid (Usage-based)", + "website": "https://segment.com", + "description": "The leading customer data platform (CDP).", + "alternatives": [ + "jitsu" + ], + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=segment.com", + "avg_monthly_cost": 120, + "pros": [ + "Single API for all analytics tools", + "Customer data platform (CDP) capabilities", + "200+ destination integrations", + "Clean data pipeline management" + ], + "cons": [ + "Extremely expensive ($120+/mo to start)", + "Complex to set up properly", + "Overkill for simple tracking needs" + ] + }, + { + "slug": "jitsu", + "name": "Jitsu", + "category": "Marketing", + "is_open_source": true, + "github_repo": "jitsucom/jitsu", + "stars": 5000, + "website": "https://jitsu.com", + "description": "High-performance data collection platform and open-source Segment alternative.", + "pros": [ + "Unlimited data volume", + "Real-time data streaming" + ], + "cons": [ + "Fewer destinations than Segment" + ], + "last_commit": "2026-02-10T16:00:00Z", + "language": "TypeScript", + "license": "MIT", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jitsu.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/jitsu" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "dokku", + "name": "Dokku", + "category": "DevOps", + "is_open_source": true, + "github_repo": "dokku/dokku", + "website": "https://dokku.com", + "description": "A docker-powered PaaS that helps you build and manage the lifecycle of applications", + "pros": [ + "Rock-solid stability — battle-tested since 2013", + "Heroku-compatible buildpacks and Procfile workflow", + "Zero-downtime deploys with simple git push" + ], + "cons": [ + "CLI driven" + ], + "stars": 31874, + "last_commit": "2026-02-09T15:40:31Z", + "language": "Shell", + "license": "MIT License", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=dokku.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/dokku" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "chatgpt", + "name": "ChatGPT / OpenAI", + "category": "AI Models", + "is_open_source": false, + "pricing_model": "Paid/Freemium", + "website": "https://openai.com", + "description": "The leading commercial AI assistant and API platform (GPT-4o, o1).", + "alternatives": [ + "llama", + "deepseek", + "mistral" + ], + "tags": [ + "AI", + "LLM", + "Chat" + ], + "hosting_type": "cloud", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openai.com", + "avg_monthly_cost": 20, + "pros": [ + "Most capable general-purpose AI assistant", + "Excellent at writing, coding, and reasoning", + "Plugin ecosystem and GPT store", + "Supports image, voice, and file inputs" + ], + "cons": [ + "$20/mo for GPT-4 access", + "Can hallucinate confidently", + "No self-hosting option", + "Data privacy concerns for sensitive info" + ] + }, + { + "slug": "llama", + "name": "Meta Llama 3.1", + "category": "AI Models", + "is_open_source": true, + "github_repo": "meta-llama/llama3", + "website": "https://llama.meta.com", + "description": "Meta's flagship open-weight model with 128K context. Supports 8B, 70B, and 405B parameters.", + "pros": [ + "Massive 128K token context window for long documents", + "Strong multilingual support across 8+ languages", + "SOTA 405B variant competing with GPT-4 at a fraction of the cost" + ], + "cons": [ + "405B requires massive hardware", + "Llama Community License" + ], + "stars": 65000, + "language": "Python", + "license": "Llama 3.1 Community License", + "tags": [ + "AI", + "LLM", + "128K Context" + ], + "hardware_req": "8GB VRAM (8B), 40GB+ VRAM (70B), 800GB+ VRAM (405B)", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 128000, + "parameters_total_b": 405, + "is_multimodal": false + }, + "logo_url": "/logos/meta.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/llama" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "deepseek", + "name": "DeepSeek-V3 / R1", + "category": "AI Models", + "is_open_source": true, + "github_repo": "deepseek-ai/DeepSeek-V3", + "website": "https://deepseek.com", + "description": "Powerful open-source models including V3 (671B) and R1 (Reasoning). Rivals GPT-4o and o1.", + "pros": [ + "State-of-the-art reasoning (R1)", + "Extremely cost efficient", + "MIT License (V3/R1)" + ], + "cons": [ + "Full model requires huge VRAM", + "Newer ecosystem" + ], + "stars": 110000, + "language": "Python", + "license": "MIT License", + "tags": [ + "AI", + "LLM", + "Reasoning" + ], + "alternatives": [ + "llama", + "mistral", + "qwen", + "deepseek-v3-1" + ], + "hardware_req": "8GB VRAM (Distilled), 160GB+ VRAM (Full)", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 160, + "context_window_tokens": 128000, + "parameters_total_b": 671, + "parameters_active_b": 37, + "is_multimodal": false + }, + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=deepseek.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/deepseek" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "mistral", + "name": "Mistral Large 2", + "category": "AI Models", + "is_open_source": true, + "github_repo": "mistralai/mistral-inference", + "website": "https://mistral.ai", + "description": "Flagship 123B model from Mistral AI. Optimized for multilingual, reasoning, and coding tasks.", + "pros": [ + "State-of-the-art performance per parameter on benchmarks", + "128K context window with function-calling support", + "Efficient Mixture-of-Experts architecture for fast inference" + ], + "cons": [ + "Mistral Research License", + "Requires high VRAM (80GB+)" + ], + "stars": 20000, + "language": "Python", + "license": "Mistral Research License", + "tags": [ + "AI", + "LLM", + "EU" + ], + "hardware_req": "80GB+ VRAM (FP16), 40GB+ (8-bit)", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 80, + "context_window_tokens": 128000, + "parameters_total_b": 123, + "is_multimodal": false + }, + "logo_url": "/logos/mistral.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/mistral" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "gemma", + "name": "Google Gemma 2", + "category": "AI Models", + "is_open_source": true, + "github_repo": "google/gemma-2", + "website": "https://ai.google.dev/gemma", + "description": "Google's open-weight models (9B, 27B) with class-leading performance and efficient architecture.", + "pros": [ + "Distilled for performance", + "Excellent 27B variant", + "Google AI ecosystem" + ], + "cons": [ + "8K context window", + "Gemma Terms of Use" + ], + "stars": 20000, + "language": "Python", + "license": "Gemma License", + "tags": [ + "AI", + "LLM", + "Google" + ], + "hardware_req": "8GB VRAM (9B), 24GB+ VRAM (27B)", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 18, + "context_window_tokens": 8192, + "parameters_total_b": 27, + "is_multimodal": false + }, + "logo_url": "/logos/gemma.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/gemma" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "qwen", + "name": "Qwen 2.5", + "category": "AI Models", + "is_open_source": true, + "github_repo": "QwenLM/Qwen2.5", + "website": "https://qwenlm.github.io", + "description": "Comprehensive LLM series from Alibaba Cloud, excelling in coding, math, and multilingual support.", + "pros": [ + "128K context window", + "Top-tier coding ability", + "Apache 2.0 (mostly)" + ], + "cons": [ + "72B requires significant VRAM" + ], + "stars": 50000, + "language": "Python", + "license": "Apache License 2.0", + "tags": [ + "AI", + "LLM", + "Coding" + ], + "hardware_req": "8GB VRAM (7B), 40GB+ VRAM (32B), 140GB+ VRAM (72B)", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 40, + "context_window_tokens": 128000, + "parameters_total_b": 72, + "is_multimodal": false + }, + "logo_url": "/logos/qwen.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/qwen" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "midjourney", + "name": "Midjourney", + "category": "AI Image Generation", + "is_open_source": false, + "pricing_model": "Paid (Subscription)", + "website": "https://midjourney.com", + "description": "Leading AI image generation tool, known for artistic and photorealistic outputs.", + "alternatives": [ + "stable-diffusion", + "flux" + ], + "tags": [ + "AI", + "Image", + "Art" + ], + "hosting_type": "cloud", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=midjourney.com", + "avg_monthly_cost": 10, + "pros": [ + "Best-in-class AI image generation quality", + "Stunning artistic and photorealistic outputs", + "Active community for inspiration", + "V6 handles text in images well" + ], + "cons": [ + "Discord-only interface (no standalone app)", + "No free tier ($10/mo minimum)", + "Limited control over exact outputs", + "No API for automation" + ] + }, + { + "slug": "stable-diffusion", + "name": "Stable Diffusion 3.5", + "category": "AI Image Generation", + "is_open_source": true, + "github_repo": "Stability-AI/sd3.5", + "website": "https://stability.ai", + "description": "The latest open-weights image generation model from Stability AI, offering superior prompt adherence.", + "pros": [ + "Run image generation entirely on your own GPU", + "Extensive community with thousands of fine-tuned models", + "ControlNet, inpainting, and img2img for precise creative control" + ], + "cons": [ + "Stability Community License", + "Requires 8GB+ VRAM" + ], + "stars": 10000, + "language": "Python", + "license": "Stability Community License", + "tags": [ + "AI", + "Image", + "Prompt Adherence" + ], + "hardware_req": "8GB VRAM (Medium), 16GB+ VRAM (Large)", + "hosting_type": "self-hosted", + "logo_url": "/logos/stability.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/stable-diffusion" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "mochi-1", + "name": "Mochi-1", + "category": "AI Video Generation", + "is_open_source": true, + "github_repo": "genmoai/mochi1", + "website": "https://www.genmo.ai", + "description": "High-fidelity open-weights video generation model from Genmo, rivaling closed-source alternatives.", + "pros": [ + "Realistic motion", + "Adobe-like quality", + "Apache 2.0 license" + ], + "cons": [ + "Extreme hardware requirements", + "Memory intensive" + ], + "stars": 5000, + "language": "Python", + "license": "Apache License 2.0", + "tags": [ + "AI", + "Video", + "Motion" + ], + "hardware_req": "24GB VRAM (Minimal), 80GB VRAM (Recommended)", + "hosting_type": "both", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=genmo.ai", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "hunyuan-video", + "name": "HunyuanVideo 1.5", + "category": "AI Video Generation", + "is_open_source": true, + "github_repo": "Tencent/HunyuanVideo", + "website": "https://github.com/Tencent/HunyuanVideo", + "description": "Tencent's state-of-the-art open-source video generation model with 13B parameters.", + "pros": [ + "Native 720p output", + "Long sequences support", + "Stable and clean motion" + ], + "cons": [ + "High compute cost" + ], + "stars": 8000, + "language": "Python", + "license": "Apache License 2.0", + "tags": [ + "AI", + "Video", + "HD" + ], + "hardware_req": "14GB VRAM (v1.5/distilled), 45GB+ VRAM (Base)", + "hosting_type": "both", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tencent.com", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "flux", + "name": "FLUX", + "category": "AI Image Generation", + "is_open_source": true, + "github_repo": "black-forest-labs/flux", + "website": "https://blackforestlabs.ai", + "description": "Next-gen open image generation model from Black Forest Labs. State-of-the-art quality rivaling Midjourney.", + "pros": [ + "Outstanding image quality", + "Open weights available", + "Rapid community adoption" + ], + "cons": [ + "High VRAM requirement", + "Newer (less tooling)" + ], + "stars": 20000, + "language": "Python", + "license": "Apache License 2.0", + "tags": [ + "AI", + "Image", + "New" + ], + "hardware_req": "12GB+ VRAM (Schnell), 24GB+ (Dev)", + "hosting_type": "both", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=blackforestlabs.ai", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "github-copilot", + "name": "GitHub Copilot", + "category": "AI Coding", + "is_open_source": false, + "pricing_model": "Paid (Subscription)", + "website": "https://github.com/features/copilot", + "description": "AI pair programmer by GitHub/OpenAI. Integrates into VS Code and JetBrains.", + "alternatives": [ + "continue-dev", + "tabby" + ], + "tags": [ + "AI", + "Coding", + "IDE" + ], + "hosting_type": "cloud", + "logo_url": "/logos/github-copilot.svg", + "avg_monthly_cost": 10, + "pros": [ + "Best AI code completion in the market", + "Deep IDE integration (VS Code, JetBrains)", + "Understands project context", + "Copilot Chat for code explanations" + ], + "cons": [ + "$10/mo per user", + "Can suggest insecure or outdated patterns", + "Privacy concerns with code telemetry", + "Dependent on GitHub/Microsoft" + ] + }, + { + "slug": "continue-dev", + "name": "Continue", + "category": "AI Coding", + "is_open_source": true, + "github_repo": "continuedev/continue", + "website": "https://continue.dev", + "description": "Open-source AI code assistant for VS Code and JetBrains. Use any model (local or API).", + "pros": [ + "Highly customizable AI coding assistant — bring your own model", + "Works with VS Code and JetBrains natively", + "Context-aware with codebase indexing and retrieval" + ], + "cons": [ + "Requires model setup" + ], + "stars": 25000, + "language": "TypeScript", + "license": "Apache License 2.0", + "tags": [ + "AI", + "Coding", + "IDE", + "Self-Hosted" + ], + "hardware_req": "Depends on chosen model", + "hosting_type": "both", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=continue.dev", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "tabby", + "name": "TabbyML", + "category": "AI Coding", + "is_open_source": true, + "github_repo": "TabbyML/tabby", + "website": "https://tabby.tabbyml.com", + "description": "Self-hosted AI coding assistant. An open-source, self-hosted alternative to GitHub Copilot.", + "pros": [ + "Enterprise-ready self-hosted code completion", + "Supports multiple model backends including local GGUF", + "IDE extensions for VS Code, Vim, and IntelliJ" + ], + "cons": [ + "Needs GPU for best results" + ], + "stars": 25000, + "language": "Rust", + "license": "Apache License 2.0", + "tags": [ + "AI", + "Coding", + "Self-Hosted" + ], + "hardware_req": "8GB+ VRAM recommended", + "hosting_type": "self-hosted", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tabby.tabbyml.com", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/tabby" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "ollama", + "name": "Ollama", + "category": "AI Runners", + "is_open_source": true, + "github_repo": "ollama/ollama", + "website": "https://ollama.com", + "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models locally.", + "pros": [ + "Run any open model locally with a single command", + "OpenAI-compatible API for drop-in integration", + "Automatic model management with quantization support" + ], + "cons": [ + "Command line focused (needs UI)" + ], + "stars": 60000, + "language": "Go", + "license": "MIT License", + "tags": [ + "AI", + "Local", + "Runner" + ], + "hardware_req": "8GB+ RAM", + "hosting_type": "self-hosted", + "logo_url": "/logos/ollama.svg", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/ollama" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "open-webui", + "name": "Open WebUI", + "category": "AI Interfaces", + "is_open_source": true, + "github_repo": "open-webui/open-webui", + "website": "https://openwebui.com", + "description": "User-friendly WebUI for LLMs (Formerly Ollama WebUI). Supports Ollama and OpenAI-compatible APIs.", + "pros": [ + "ChatGPT-like UI", + "Multi-model chat", + "RAG support" + ], + "cons": [ + "Requires backend (like Ollama)" + ], + "stars": 15000, + "language": "Svelte", + "license": "MIT License", + "tags": [ + "AI", + "UI", + "Chat" + ], + "hosting_type": "self-hosted", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openwebui.com", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "jan", + "name": "Jan", + "category": "AI Interfaces", + "is_open_source": true, + "github_repo": "janhq/jan", + "website": "https://jan.ai", + "description": "Jan is an open source alternative to ChatGPT that runs 100% offline on your computer.", + "pros": [ + "Runs offline", + "Native app (no Docker)", + "Local model manager" + ], + "cons": [ + "Heavy resource usage" + ], + "stars": 18000, + "language": "TypeScript", + "license": "AGPL-3.0", + "tags": [ + "AI", + "Desktop", + "Offline" + ], + "hardware_req": "Apple Silicon or NVIDIA GPU", + "hosting_type": "self-hosted", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jan.ai", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "lm-studio", + "name": "LM Studio", + "category": "AI Runners", + "is_open_source": false, + "pricing_model": "Free (Proprietary)", + "website": "https://lmstudio.ai", + "description": "Discover, download, and run local LLMs. Easy GUI for GGUF models.", + "alternatives": [ + "ollama", + "gpt4all" + ], + "tags": [ + "AI", + "Desktop", + "GUI" + ], + "hardware_req": "Apple Silicon or NVIDIA/AMD GPU", + "hosting_type": "self-hosted", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=lmstudio.ai", + "referral_url": "https://m.do.co/c/2ed27757a361", + "pros": [ + "Run LLMs locally with a clean GUI", + "No cloud dependency — fully offline", + "Supports GGUF and other quantized formats", + "Built-in model discovery and download" + ], + "cons": [ + "Requires decent hardware (8GB+ RAM)", + "Closed source despite local-first approach", + "Limited compared to CLI tools like Ollama" + ] + }, + { + "slug": "gpt4all", + "name": "GPT4All", + "category": "AI Runners", + "is_open_source": true, + "github_repo": "nomic-ai/gpt4all", + "website": "https://gpt4all.io", + "description": "Run open-source LLMs locally on your CPU and GPU. No internet required.", + "pros": [ + "One-click desktop installer — no terminal needed", + "Built-in RAG for chatting with your local documents", + "Runs on CPU — no GPU required for basic models" + ], + "cons": [ + "Slower on CPU" + ], + "stars": 65000, + "language": "C++", + "license": "Apache License 2.0", + "tags": [ + "AI", + "Desktop", + "CPU" + ], + "hosting_type": "self-hosted", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=gpt4all.io", + "deployment": { + "type": "docker-compose", + "local_path": "./.docker-deploy/gpt4all" + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "localai", + "name": "LocalAI", + "category": "AI Runners", + "is_open_source": true, + "github_repo": "mudler/LocalAI", + "website": "https://localai.io", + "description": "The specific build of LocalAI, the free, Open Source OpenAI alternative. Drop-in replacement for OpenAI API.", + "pros": [ + "OpenAI API compatible", + "Runs on consumer hardware", + "No GPU required" + ], + "cons": [ + "Configuration heavy" + ], + "stars": 20000, + "language": "Go", + "license": "MIT License", + "tags": [ + "AI", + "API", + "Backend" + ], + "hosting_type": "self-hosted", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=localai.io", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "flowise", + "name": "Flowise", + "category": "AI Tools", + "is_open_source": true, + "github_repo": "FlowiseAI/Flowise", + "website": "https://flowiseai.com", + "description": "Drag & drop UI to build your customized LLM flow using LangChainJS.", + "pros": [ + "Low-code", + "Visual builder", + "Rich integrations" + ], + "cons": [ + "Node.js dependency" + ], + "stars": 28000, + "language": "TypeScript", + "license": "Apache License 2.0", + "tags": [ + "AI", + "Low-Code", + "LangChain" + ], + "hosting_type": "self-hosted", + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=flowiseai.com", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "llama-4", + "name": "Meta Llama 4", + "category": "AI Models", + "is_open_source": true, + "github_repo": "meta-llama/llama4", + "website": "https://llama.meta.com", + "description": "The latest generation of Llama. 'Maverick' architecture with 256K context. The new standard for open weights.", + "pros": [ + "Next-gen Maverick architecture — faster and smarter than Llama 3", + "256K context window — double that of most competitors", + "Native multimodal support for images, video, and text" + ], + "cons": [ + "High VRAM for top tiers" + ], + "stars": 45000, + "language": "Python", + "license": "Llama Community License", + "tags": [ + "AI", + "LLM", + "2026", + "SOTA" + ], + "hardware_req": "12GB VRAM (Medium), 48GB+ VRAM (Large)", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 12, + "context_window_tokens": 256000, + "parameters_total_b": 65, + "is_multimodal": true + }, + "logo_url": "/logos/meta.svg", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "gemma-3", + "name": "Google Gemma 3", + "category": "AI Models", + "is_open_source": true, + "github_repo": "google/gemma-3", + "website": "https://ai.google.dev/gemma", + "description": "Gemma 3 (27B) delivers GPT-5 class performance on a single GPU. Optimized for reasoning and agents.", + "pros": [ + "Incredible 27B performance", + "Agent-centric design", + "JAX/PyTorch native" + ], + "cons": [ + "limited to 27B size currently" + ], + "stars": 15000, + "language": "Python", + "license": "Gemma License", + "tags": [ + "AI", + "LLM", + "Google", + "2026" + ], + "hardware_req": "24GB VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 24, + "context_window_tokens": 1000000, + "parameters_total_b": 27, + "is_multimodal": true + }, + "logo_url": "/logos/gemma.svg", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "qwen-3", + "name": "Qwen 3 (235B)", + "category": "AI Models", + "is_open_source": true, + "github_repo": "QwenLM/Qwen3", + "website": "https://qwenlm.github.io", + "description": "Massive 235B param model. The absolute king of coding and mathematics benchmarks in 2026.", + "pros": [ + "Unmatched coding performance", + "Excellent math/reasoning", + "MoE efficiency" + ], + "cons": [ + "Requires multi-GPU setup" + ], + "stars": 35000, + "language": "Python", + "license": "Apache License 2.0", + "tags": [ + "AI", + "LLM", + "Coding", + "MoE" + ], + "hardware_req": "140GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 140, + "context_window_tokens": 1000000, + "parameters_total_b": 235, + "is_multimodal": false + }, + "logo_url": "/logos/qwen.svg", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "deepseek-v3-1", + "name": "DeepSeek V3.1", + "category": "AI Models", + "is_open_source": true, + "github_repo": "deepseek-ai/DeepSeek-V3.1", + "website": "https://deepseek.com", + "description": "Refined V3 architecture with improved instruction following and reduced hallucination rates.", + "pros": [ + "API pricing 10-50x cheaper than GPT-4 equivalents", + "Open weights with full model access — no API lock-in", + "Top-tier reasoning that rivals closed-source frontier models" + ], + "cons": [ + "Complex serving stack" + ], + "stars": 120000, + "language": "Python", + "license": "MIT License", + "tags": [ + "AI", + "LLM", + "Reasoning" + ], + "alternatives": [ + "deepseek", + "llama", + "mistral", + "qwen" + ], + "hardware_req": "80GB VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 80, + "context_window_tokens": 128000, + "parameters_total_b": 685, + "is_multimodal": false + }, + "logo_url": "https://www.google.com/s2/favicons?sz=128&domain=deepseek.com", + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "llama-3-1-8b", + "name": "Llama 3.1 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "The latest 8B parameter model from Meta, optimized for efficiency and edge devices.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 128000, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-3-1-70b", + "name": "Llama 3.1 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "A powerful 70B model by Meta, rivaling closed-source top-tier models.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "49GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 128000, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-3-1-405b", + "name": "Llama 3.1 405B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "Meta's massive 405B frontier-class open weights model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "284GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 284, + "context_window_tokens": 128000, + "parameters_total_b": 405, + "parameters_active_b": 405, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-3-8b", + "name": "Llama 3 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "Meta's highly capable 8B model, a standard for local LLM inference.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 8192, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-3-70b", + "name": "Llama 3 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "Meta's previous generation 70B heavy-hitter.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "49GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 8192, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-2-7b", + "name": "Llama 2 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "The classic 7B model that started the open-weight revolution.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-2-13b", + "name": "Llama 2 13B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "A balanced 13B model from the Llama 2 series.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 4096, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-2-70b", + "name": "Llama 2 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "The largest Llama 2 model, widely used for fine-tuning.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "49GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "code-llama-7b", + "name": "Code Llama 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "Specialized coding model based on Llama 2.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 100000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "code-llama-13b", + "name": "Code Llama 13B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "Mid-sized specialized coding model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 100000, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "code-llama-34b", + "name": "Code Llama 34B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "Large coding model with excellent performance.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 24, + "context_window_tokens": 100000, + "parameters_total_b": 34, + "parameters_active_b": 34, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "code-llama-70b", + "name": "Code Llama 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llama.meta.com", + "description": "The most powerful Code Llama variant.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Open Weights", + "AI", + "LLM", + "Meta" + ], + "hardware_req": "49GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 100000, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "mistral-7b-v0-3", + "name": "Mistral 7B v0.3", + "category": "AI Models", + "is_open_source": true, + "website": "https://mistral.ai", + "description": "Updated 7B model from Mistral AI with extended vocabulary and function calling.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Europe", + "Mistral AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 32000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "mistral-nemo-12b", + "name": "Mistral Nemo 12B", + "category": "AI Models", + "is_open_source": true, + "website": "https://mistral.ai", + "description": "A native 12B model built in collaboration with NVIDIA, fitting in 24GB VRAM.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Europe", + "Mistral AI" + ], + "hardware_req": "8GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 128000, + "parameters_total_b": 12, + "parameters_active_b": 12, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "mixtral-8x7b", + "name": "Mixtral 8x7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://mistral.ai", + "description": "The first high-performance open sparse Mixture-of-Experts (MoE) model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Europe", + "Mistral AI" + ], + "hardware_req": "33GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 33, + "context_window_tokens": 32000, + "parameters_total_b": 47, + "parameters_active_b": 47, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "mixtral-8x22b", + "name": "Mixtral 8x22B", + "category": "AI Models", + "is_open_source": true, + "website": "https://mistral.ai", + "description": "A massive MoE model setting new standards for open weights.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Europe", + "Mistral AI" + ], + "hardware_req": "99GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 99, + "context_window_tokens": 65000, + "parameters_total_b": 141, + "parameters_active_b": 141, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "codestral-22b", + "name": "Codestral 22B", + "category": "AI Models", + "is_open_source": true, + "website": "https://mistral.ai", + "description": "Mistral's first dedicated code model, proficient in 80+ languages.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Europe", + "Mistral AI" + ], + "hardware_req": "15GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 15, + "context_window_tokens": 32000, + "parameters_total_b": 22, + "parameters_active_b": 22, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "mathstral-7b", + "name": "Mathstral 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://mistral.ai", + "description": "Specialized model for math and reasoning tasks.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Europe", + "Mistral AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 32000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "ministral-3b", + "name": "Ministral 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://mistral.ai", + "description": "Mistral's efficient edge model for mobile and low-latency use cases.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Europe", + "Mistral AI" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 128000, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "ministral-8b", + "name": "Ministral 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://mistral.ai", + "description": "A powerful edge model bridging the gap between small and medium LLMs.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Europe", + "Mistral AI" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 128000, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "qwen-2-5-0-5b", + "name": "Qwen 2.5 0.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Tiny but capable model for extreme edge analytics.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 32000, + "parameters_total_b": 0.5, + "parameters_active_b": 0.5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-1-5b", + "name": "Qwen 2.5 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Small footprint model punching above its weight.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 32000, + "parameters_total_b": 1.5, + "parameters_active_b": 1.5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-3b", + "name": "Qwen 2.5 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Balanced 3B model, great for mobile inference.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 32000, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-7b", + "name": "Qwen 2.5 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "The 7B workhorse of the Qwen 2.5 family, beating Llama 3.1 8B in many benchmarks.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 128000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-14b", + "name": "Qwen 2.5 14B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "A sweet-spot size for dual-GPU or high VRAM consumer cards.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "10GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 128000, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-32b", + "name": "Qwen 2.5 32B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Ideally sized for 24GB VRAM cards like the RTX 3090/4090.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "22GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 128000, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-72b", + "name": "Qwen 2.5 72B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Top-tier open weights model, consistently ranking high on leaderboards.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "50GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 50, + "context_window_tokens": 128000, + "parameters_total_b": 72, + "parameters_active_b": 72, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-coder-1-5b", + "name": "Qwen 2.5 Coder 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Tiny coding assistant.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 32000, + "parameters_total_b": 1.5, + "parameters_active_b": 1.5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-coder-7b", + "name": "Qwen 2.5 Coder 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "State-of-the-art 7B coding model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 128000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-5-coder-32b", + "name": "Qwen 2.5 Coder 32B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Powerful coding model fitting in consumer hardware.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "22GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 128000, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-vl-7b", + "name": "Qwen 2 VL 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Vision-Language model capable of understanding images and video.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 32000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": true + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen-2-vl-72b", + "name": "Qwen 2 VL 72B", + "category": "AI Models", + "is_open_source": true, + "website": "https://qwenlm.github.io", + "description": "Massive Vision-Language model for complex visual reasoning.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Alibaba Cloud", + "Qwen", + "LLM", + "AI", + "Alibaba" + ], + "hardware_req": "50GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 50, + "context_window_tokens": 32000, + "parameters_total_b": 72, + "parameters_active_b": 72, + "is_multimodal": true + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "gemma-2-2b", + "name": "Gemma 2 2B", + "category": "AI Models", + "is_open_source": true, + "website": "https://ai.google.dev/gemma", + "description": "Efficient 2B model by Google, distilled for high performance.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Gemma", + "Google", + "LLM", + "Google DeepMind", + "AI" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 8192, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "gemma-2-9b", + "name": "Gemma 2 9B", + "category": "AI Models", + "is_open_source": true, + "website": "https://ai.google.dev/gemma", + "description": "Google's powerful 9B open model, outperforming larger predecessors.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Gemma", + "Google", + "LLM", + "Google DeepMind", + "AI" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 8192, + "parameters_total_b": 9, + "parameters_active_b": 9, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "gemma-2-27b", + "name": "Gemma 2 27B", + "category": "AI Models", + "is_open_source": true, + "website": "https://ai.google.dev/gemma", + "description": "Large-scale open model from Google designed for complex reasoning.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Gemma", + "Google", + "LLM", + "Google DeepMind", + "AI" + ], + "hardware_req": "19GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 19, + "context_window_tokens": 8192, + "parameters_total_b": 27, + "parameters_active_b": 27, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "codegemma-2b", + "name": "CodeGemma 2B", + "category": "AI Models", + "is_open_source": true, + "website": "https://ai.google.dev/gemma", + "description": "Fast, lightweight code completion model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Gemma", + "Google", + "LLM", + "Google DeepMind", + "AI" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 8192, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "codegemma-7b", + "name": "CodeGemma 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://ai.google.dev/gemma", + "description": "Instruction-tuned coding model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Gemma", + "Google", + "LLM", + "Google DeepMind", + "AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 8192, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "recurrentgemma-2b", + "name": "RecurrentGemma 2B", + "category": "AI Models", + "is_open_source": true, + "website": "https://ai.google.dev/gemma", + "description": "Gemma architecture with recurrent neural network efficiency.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Gemma", + "Google", + "LLM", + "Google DeepMind", + "AI" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 8192, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "palette-2b", + "name": "Palette 2B", + "category": "AI Models", + "is_open_source": true, + "website": "https://ai.google.dev/gemma", + "description": "Specialized vision-language model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Gemma", + "Google", + "LLM", + "Google DeepMind", + "AI" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 8192, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "phi-3-5-mini", + "name": "Phi 3.5 Mini", + "category": "AI Models", + "is_open_source": true, + "website": "https://azure.microsoft.com/en-us/products/phi", + "description": "Latest lightweight powerhouse from Microsoft, beating many larger models.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Phi", + "AI", + "LLM", + "Microsoft" + ], + "hardware_req": "3GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 128000, + "parameters_total_b": 3.8, + "parameters_active_b": 3.8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "phi-3-5-moe", + "name": "Phi 3.5 MoE", + "category": "AI Models", + "is_open_source": true, + "website": "https://azure.microsoft.com/en-us/products/phi", + "description": "Mixture-of-Experts model combining 16x3.8B experts.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Phi", + "AI", + "LLM", + "Microsoft" + ], + "hardware_req": "29GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 29, + "context_window_tokens": 128000, + "parameters_total_b": 42, + "parameters_active_b": 42, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "phi-3-5-vision", + "name": "Phi 3.5 Vision", + "category": "AI Models", + "is_open_source": true, + "website": "https://azure.microsoft.com/en-us/products/phi", + "description": "Multimodal version of Phi 3.5 capable of image analysis.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Phi", + "AI", + "LLM", + "Microsoft" + ], + "hardware_req": "3GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 128000, + "parameters_total_b": 4.2, + "parameters_active_b": 4.2, + "is_multimodal": true + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "phi-3-mini", + "name": "Phi 3 Mini", + "category": "AI Models", + "is_open_source": true, + "website": "https://azure.microsoft.com/en-us/products/phi", + "description": "Highly capable 3.8B model trained on textbook data.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Phi", + "AI", + "LLM", + "Microsoft" + ], + "hardware_req": "3GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 128000, + "parameters_total_b": 3.8, + "parameters_active_b": 3.8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "phi-3-medium", + "name": "Phi 3 Medium", + "category": "AI Models", + "is_open_source": true, + "website": "https://azure.microsoft.com/en-us/products/phi", + "description": "14B parameter version of the Phi-3 family.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Phi", + "AI", + "LLM", + "Microsoft" + ], + "hardware_req": "10GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 128000, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "orca-2-13b", + "name": "Orca 2 13B", + "category": "AI Models", + "is_open_source": true, + "website": "https://azure.microsoft.com/en-us/products/phi", + "description": "Microsoft's research model exploring reasoning capabilities.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Phi", + "AI", + "LLM", + "Microsoft" + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 4096, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "yi-1-5-6b", + "name": "Yi 1.5 6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://01.ai", + "description": "Strong 6B model from 01.AI.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "01.AI", + "Yi" + ], + "hardware_req": "4GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "yi-1-5-9b", + "name": "Yi 1.5 9B", + "category": "AI Models", + "is_open_source": true, + "website": "https://01.ai", + "description": "9B parameter model optimized for coding and math.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "01.AI", + "Yi" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 9, + "parameters_active_b": 9, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "yi-1-5-34b", + "name": "Yi 1.5 34B", + "category": "AI Models", + "is_open_source": true, + "website": "https://01.ai", + "description": "Highly rated 34B model, popular in the community.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "01.AI", + "Yi" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 24, + "context_window_tokens": 4096, + "parameters_total_b": 34, + "parameters_active_b": 34, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "yi-large", + "name": "Yi Large", + "category": "AI Models", + "is_open_source": true, + "website": "https://01.ai", + "description": "Proprietary-class open weights model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "01.AI", + "Yi" + ], + "hardware_req": "70GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 70, + "context_window_tokens": 32000, + "parameters_total_b": 100, + "parameters_active_b": 100, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "command-r", + "name": "Command R", + "category": "AI Models", + "is_open_source": true, + "website": "https://cohere.com", + "description": "Optimized for RAG (Retrieval Augmented Generation) and tool use.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Cohere For AI", + "Cohere", + "LLM", + "RAG", + "AI" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 24, + "context_window_tokens": 128000, + "parameters_total_b": 35, + "parameters_active_b": 35, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "command-r-plus", + "name": "Command R+", + "category": "AI Models", + "is_open_source": true, + "website": "https://cohere.com", + "description": "Massive RAG-optimized model with advanced reasoning.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Cohere For AI", + "Cohere", + "LLM", + "RAG", + "AI" + ], + "hardware_req": "73GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 73, + "context_window_tokens": 128000, + "parameters_total_b": 104, + "parameters_active_b": 104, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "dolphin-2-9-llama-3-8b", + "name": "Dolphin 2.9 Llama 3 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://erichartford.com", + "description": "Uncensored fine-tune of Llama 3 8B.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Roleplay", + "Uncensored", + "LLM", + "Cognitive Computations", + "AI" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 8192, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "dolphin-2-9-2-qwen-2-72b", + "name": "Dolphin 2.9.2 Qwen 2 72B", + "category": "AI Models", + "is_open_source": true, + "website": "https://erichartford.com", + "description": "Powerful uncensored chat model based on Qwen 2.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Roleplay", + "Uncensored", + "LLM", + "Cognitive Computations", + "AI" + ], + "hardware_req": "50GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 50, + "context_window_tokens": 32000, + "parameters_total_b": 72, + "parameters_active_b": 72, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "dolphin-mixtral-8x7b", + "name": "Dolphin Mixtral 8x7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://erichartford.com", + "description": "One of the most popular uncensored MoE models.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Roleplay", + "Uncensored", + "LLM", + "Cognitive Computations", + "AI" + ], + "hardware_req": "33GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 33, + "context_window_tokens": 32000, + "parameters_total_b": 47, + "parameters_active_b": 47, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "hermes-3-llama-3-1-8b", + "name": "Hermes 3 Llama 3.1 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://nousresearch.com", + "description": "Unlock the full potential of Llama 3.1 with advanced agentic capabilities.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Fine-tune", + "AI", + "LLM", + "Nous Research" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 128000, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "hermes-3-llama-3-1-70b", + "name": "Hermes 3 Llama 3.1 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://nousresearch.com", + "description": "70B version of the Hermes 3 agentic fine-tune.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Fine-tune", + "AI", + "LLM", + "Nous Research" + ], + "hardware_req": "49GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 128000, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "nous-hermes-2-mixtral-8x7b", + "name": "Nous Hermes 2 Mixtral 8x7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://nousresearch.com", + "description": "High-quality instruction tuned Mixtral.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Fine-tune", + "AI", + "LLM", + "Nous Research" + ], + "hardware_req": "33GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 33, + "context_window_tokens": 32000, + "parameters_total_b": 47, + "parameters_active_b": 47, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "solar-10-7b", + "name": "Solar 10.7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://upstage.ai", + "description": "Innovative 10.7B model created using depth up-scaling of Llama 2.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Upstage", + "Solar", + "LLM", + "Depth Upscaling", + "AI" + ], + "hardware_req": "7GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 7, + "context_window_tokens": 4096, + "parameters_total_b": 10.7, + "parameters_active_b": 10.7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "solar-pro", + "name": "Solar Pro", + "category": "AI Models", + "is_open_source": true, + "website": "https://upstage.ai", + "description": "Advanced scale-up of the Solar architecture.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Upstage", + "Solar", + "LLM", + "Depth Upscaling", + "AI" + ], + "hardware_req": "15GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 15, + "context_window_tokens": 4096, + "parameters_total_b": 22, + "parameters_active_b": 22, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "deepseek-coder-v2-16b", + "name": "DeepSeek Coder V2 16B", + "category": "AI Models", + "is_open_source": true, + "website": "https://deepseek.com", + "description": "Powerful coding-specific MoE model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Coding", + "AI", + "LLM", + "DeepSeek" + ], + "hardware_req": "11GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 11, + "context_window_tokens": 128000, + "parameters_total_b": 16, + "parameters_active_b": 16, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "deepseek-coder-v2-236b", + "name": "DeepSeek Coder V2 236B", + "category": "AI Models", + "is_open_source": true, + "website": "https://deepseek.com", + "description": "Massive coding model rivaling GPT-4 across benchmarks.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Coding", + "AI", + "LLM", + "DeepSeek" + ], + "hardware_req": "165GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 165, + "context_window_tokens": 128000, + "parameters_total_b": 236, + "parameters_active_b": 236, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "deepseek-llm-7b", + "name": "DeepSeek LLM 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://deepseek.com", + "description": "General purpose 7B chat model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Coding", + "AI", + "LLM", + "DeepSeek" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 32000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "deepseek-llm-67b", + "name": "DeepSeek LLM 67B", + "category": "AI Models", + "is_open_source": true, + "website": "https://deepseek.com", + "description": "Large general purpose model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Coding", + "AI", + "LLM", + "DeepSeek" + ], + "hardware_req": "47GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 47, + "context_window_tokens": 32000, + "parameters_total_b": 67, + "parameters_active_b": 67, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "stable-lm-2-1-6b", + "name": "Stable LM 2 1.6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://stability.ai", + "description": "Very small, efficient model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Stability AI", + "AI", + "LLM" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1.6, + "parameters_active_b": 1.6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "stable-lm-2-12b", + "name": "Stable LM 2 12B", + "category": "AI Models", + "is_open_source": true, + "website": "https://stability.ai", + "description": "Balanced 12B model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Stability AI", + "AI", + "LLM" + ], + "hardware_req": "8GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 4096, + "parameters_total_b": 12, + "parameters_active_b": 12, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "stable-code-3b", + "name": "Stable Code 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://stability.ai", + "description": "Specialized 3B coding model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Stability AI", + "AI", + "LLM" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 16384, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "starling-lm-7b-alpha", + "name": "Starling LM 7B Alpha", + "category": "AI Models", + "is_open_source": true, + "website": "https://nexusflow.ai", + "description": "RLHF fine-tune known for high quality responses.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Nexusflow", + "RLHF" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 8192, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "starling-lm-7b-beta", + "name": "Starling LM 7B Beta", + "category": "AI Models", + "is_open_source": true, + "website": "https://nexusflow.ai", + "description": "Improved beta version of the Starling RLHF model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Nexusflow", + "RLHF" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 8192, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "openchat-3-5", + "name": "OpenChat 3.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://openchat.team", + "description": "Fine-tuned Mistral 7B using C-RLFT strategy.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "C-RLFT", + "AI", + "LLM", + "OpenChat" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 8192, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "openchat-3-6", + "name": "OpenChat 3.6", + "category": "AI Models", + "is_open_source": true, + "website": "https://openchat.team", + "description": "Updated version based on Llama 3 8B.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "C-RLFT", + "AI", + "LLM", + "OpenChat" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 8192, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "tinyllama-1-1b", + "name": "TinyLlama 1.1B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/jzhang38/TinyLlama", + "description": "The most popular ~1B model, trained on 3T tokens.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Small", + "AI", + "LLM", + "TinyLlama" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 2048, + "parameters_total_b": 1.1, + "parameters_active_b": 1.1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "falcon-2-11b", + "name": "Falcon 2 11B", + "category": "AI Models", + "is_open_source": true, + "website": "https://falconllm.tii.ae", + "description": "TII's efficient 11B model with strong reasoning capabilities.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Falcon", + "LLM", + "AI", + "TII" + ], + "hardware_req": "8GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 2048, + "parameters_total_b": 11, + "parameters_active_b": 11, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/falcon.svg" + }, + { + "slug": "falcon-180b", + "name": "Falcon 180B", + "category": "AI Models", + "is_open_source": true, + "website": "https://falconllm.tii.ae", + "description": "Massive open model, one of the largest available.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Falcon", + "LLM", + "AI", + "TII" + ], + "hardware_req": "126GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 126, + "context_window_tokens": 2048, + "parameters_total_b": 180, + "parameters_active_b": 180, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/falcon.svg" + }, + { + "slug": "falcon-40b", + "name": "Falcon 40B", + "category": "AI Models", + "is_open_source": true, + "website": "https://falconllm.tii.ae", + "description": "The original high-performance open model form TII.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Falcon", + "LLM", + "AI", + "TII" + ], + "hardware_req": "28GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 28, + "context_window_tokens": 2048, + "parameters_total_b": 40, + "parameters_active_b": 40, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/falcon.svg" + }, + { + "slug": "falcon-7b", + "name": "Falcon 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://falconllm.tii.ae", + "description": "Smaller variant of the Falcon family.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Falcon", + "LLM", + "AI", + "TII" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 2048, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/falcon.svg" + }, + { + "slug": "glm-4-9b", + "name": "GLM 4 9B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/THUDM/GLM-4", + "description": "Powerful multilingual model from Zhipu AI.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "GLM", + "Zhipu AI", + "LLM", + "AI" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 128000, + "parameters_total_b": 9, + "parameters_active_b": 9, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "chatglm3-6b", + "name": "ChatGLM3 6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/THUDM/GLM-4", + "description": "Optimized Chinese-English conversational model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "GLM", + "Zhipu AI", + "LLM", + "AI" + ], + "hardware_req": "4GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 32000, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "granite-3-0-8b-instruct", + "name": "Granite 3.0 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.ibm.com/granite", + "description": "IBM's enterprise-grade open model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "IBM", + "LLM", + "AI", + "Enterprise" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "granite-3-0-2b-instruct", + "name": "Granite 3.0 2B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.ibm.com/granite", + "description": "Efficient enterprise model for lower resource environments.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "IBM", + "LLM", + "AI", + "Enterprise" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "granite-code-3b", + "name": "Granite Code 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.ibm.com/granite", + "description": "IBM specialized code model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "IBM", + "LLM", + "AI", + "Enterprise" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "granite-code-8b", + "name": "Granite Code 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.ibm.com/granite", + "description": "Larger coding model from IBM.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "IBM", + "LLM", + "AI", + "Enterprise" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "flux-1-schnell", + "name": "Flux.1 Schnell", + "category": "AI Models", + "is_open_source": true, + "website": "https://blackforestlabs.ai", + "description": "Fastest state-of-the-art open image generation model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Black Forest Labs", + "Image Generation", + "AI", + "Diffusion" + ], + "hardware_req": "8GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 77, + "parameters_total_b": 12, + "parameters_active_b": 12, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/flux.svg" + }, + { + "slug": "flux-1-dev", + "name": "Flux.1 Dev", + "category": "AI Models", + "is_open_source": true, + "website": "https://blackforestlabs.ai", + "description": "Developer version of the powerful Flux image model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Black Forest Labs", + "Image Generation", + "AI", + "Diffusion" + ], + "hardware_req": "8GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 77, + "parameters_total_b": 12, + "parameters_active_b": 12, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/flux.svg" + }, + { + "slug": "sdxl-1-0", + "name": "SDXL 1.0", + "category": "AI Models", + "is_open_source": true, + "website": "https://stability.ai", + "description": "The benchmark for open source image generation.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Image Generation", + "AI", + "Diffusion", + "Stability AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 77, + "parameters_total_b": 6.6, + "parameters_active_b": 6.6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "sd-3-medium", + "name": "SD 3 Medium", + "category": "AI Models", + "is_open_source": true, + "website": "https://stability.ai", + "description": "Stability AI's latest medium-sized image model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Image Generation", + "AI", + "Diffusion", + "Stability AI" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 77, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "stable-cascade", + "name": "Stable Cascade", + "category": "AI Models", + "is_open_source": true, + "website": "https://stability.ai", + "description": "Efficient cascade architecture for high detail images.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Image Generation", + "AI", + "Diffusion", + "Stability AI" + ], + "hardware_req": "3GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 77, + "parameters_total_b": 3.6, + "parameters_active_b": 3.6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "internlm-2-5-7b", + "name": "InternLM 2.5 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://internlm.intern-ai.org.cn", + "description": "High performance 7B model with strong reasoning.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Shanghai AI Lab", + "LLM", + "AI", + "InternLM" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 32000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "internlm-2-5-20b", + "name": "InternLM 2.5 20B", + "category": "AI Models", + "is_open_source": true, + "website": "https://internlm.intern-ai.org.cn", + "description": "Balanced 20B model filling the gap between 7B and 70B.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Shanghai AI Lab", + "LLM", + "AI", + "InternLM" + ], + "hardware_req": "14GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 14, + "context_window_tokens": 32000, + "parameters_total_b": 20, + "parameters_active_b": 20, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "baichuan-2-7b", + "name": "Baichuan 2 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.baichuan-ai.com", + "description": "Top tier Chinese-English bilingual model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Baichuan", + "LLM", + "AI", + "Baichuan Inc." + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "baichuan-2-13b", + "name": "Baichuan 2 13B", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.baichuan-ai.com", + "description": "Larger variant of the popular Baichuan series.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Baichuan", + "LLM", + "AI", + "Baichuan Inc." + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 4096, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "minicpm-2-4b", + "name": "MiniCPM 2.4B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/OpenBMB/MiniCPM", + "description": "High efficiency edge model optimization.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "OpenBMB", + "Mobile", + "LLM", + "Edge", + "AI" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 2.4, + "parameters_active_b": 2.4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "minicpm-v-2-6", + "name": "MiniCPM V 2.6", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/OpenBMB/MiniCPM", + "description": "Powerful multimodal model for mobile devices.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "OpenBMB", + "Mobile", + "LLM", + "Edge", + "AI" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "exaone-3-0-7-8b", + "name": "Exaone 3.0 7.8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.lgresearch.ai", + "description": "LG's competitive open model entry.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LG", + "LG AI Research", + "LLM", + "AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7.8, + "parameters_active_b": 7.8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "jamba-v0-1", + "name": "Jamba v0.1", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.ai21.com/jamba", + "description": "First production-grade Mamba-Transformer hybrid model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Hybrid", + "LLM", + "AI", + "Mamba", + "AI21 Labs" + ], + "hardware_req": "36GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 36, + "context_window_tokens": 256000, + "parameters_total_b": 52, + "parameters_active_b": 52, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "grok-1", + "name": "Grok 1", + "category": "AI Models", + "is_open_source": true, + "website": "https://x.ai", + "description": "Massive 314B parameter open weights model from xAI.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Grok", + "LLM", + "AI", + "xAI" + ], + "hardware_req": "220GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 220, + "context_window_tokens": 8192, + "parameters_total_b": 314, + "parameters_active_b": 314, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/grok.svg" + }, + { + "slug": "deepseek-vl-7b", + "name": "DeepSeek VL 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://deepseek.com", + "description": "Vision language model from DeepSeek.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "DeepSeek", + "Vision", + "AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": true + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "deepseek-vl-1-3b", + "name": "DeepSeek VL 1.3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://deepseek.com", + "description": "Small vision language model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "DeepSeek", + "Vision", + "AI" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1.3, + "parameters_active_b": 1.3, + "is_multimodal": true + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "whisper-large-v3", + "name": "Whisper Large v3", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/openai/whisper", + "description": "State-of-the-art automatic speech recognition model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "ASR", + "Audio", + "OpenAI" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 0, + "parameters_total_b": 1.5, + "parameters_active_b": 1.5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "whisper-medium", + "name": "Whisper Key", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/openai/whisper", + "description": "Balanced speech recognition model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "ASR", + "Audio", + "OpenAI" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 0, + "parameters_total_b": 0.7, + "parameters_active_b": 0.7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "seamless-m4t-large", + "name": "Seamless M4T Large", + "category": "AI Models", + "is_open_source": true, + "website": "https://ai.meta.com/research/seamless-communication/", + "description": "Massive multilingual translation and transcription model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "Meta", + "Audio", + "Translation" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 0, + "parameters_total_b": 2.3, + "parameters_active_b": 2.3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "starcoder-2-15b", + "name": "StarCoder 2 15B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/bigcode", + "description": "The successor to the original StarCoder, trained on The Stack v2.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "ServiceNow", + "BigCode", + "AI", + "Coding" + ], + "hardware_req": "10GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 16384, + "parameters_total_b": 15, + "parameters_active_b": 15, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "starcoder-2-7b", + "name": "StarCoder 2 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/bigcode", + "description": "Mid-sized coding model from the BigCode project.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "ServiceNow", + "BigCode", + "AI", + "Coding" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 16384, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "starcoder-2-3b", + "name": "StarCoder 2 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/bigcode", + "description": "Efficient coding assistant.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "ServiceNow", + "BigCode", + "AI", + "Coding" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 16384, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "llava-1-6-34b", + "name": "LLaVA 1.6 34B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llava-vl.github.io", + "description": "High performance large multimodal model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Multimodal", + "Vision", + "LLaVA Team", + "AI" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 24, + "context_window_tokens": 4096, + "parameters_total_b": 34, + "parameters_active_b": 34, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "llava-1-6-13b", + "name": "LLaVA 1.6 13B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llava-vl.github.io", + "description": "Improved visual reasoning capabilities.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Multimodal", + "Vision", + "LLaVA Team", + "AI" + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 4096, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "llava-1-6-7b", + "name": "LLaVA 1.6 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://llava-vl.github.io", + "description": "Efficient multimodal model base on Vicuna.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Multimodal", + "Vision", + "LLaVA Team", + "AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "bakllava", + "name": "BakLLaVA", + "category": "AI Models", + "is_open_source": true, + "website": "https://llava-vl.github.io", + "description": "Mistral-based LLaVA variant.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Multimodal", + "Vision", + "LLaVA Team", + "AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "bloom-176b", + "name": "BLOOM 176B", + "category": "AI Models", + "is_open_source": true, + "website": "https://bigscience.huggingface.co", + "description": "The world's largest open-multilingual language model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Multilingual", + "Open Science", + "BigScience", + "AI" + ], + "hardware_req": "123GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 123, + "context_window_tokens": 2048, + "parameters_total_b": 176, + "parameters_active_b": 176, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "bloomz-176b", + "name": "BLOOMZ 176B", + "category": "AI Models", + "is_open_source": true, + "website": "https://bigscience.huggingface.co", + "description": "Instruction tuned version of BLOOM.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Multilingual", + "Open Science", + "BigScience", + "AI" + ], + "hardware_req": "123GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 123, + "context_window_tokens": 2048, + "parameters_total_b": 176, + "parameters_active_b": 176, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "bloom-7b", + "name": "BLOOM 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://bigscience.huggingface.co", + "description": "Smaller variant of the BLOOM family.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Multilingual", + "Open Science", + "BigScience", + "AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 2048, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "pythia-12b", + "name": "Pythia 12B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/EleutherAI/pythia", + "description": "Designed to interpret and analyze LLM training dynamics.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Research", + "EleutherAI", + "AI" + ], + "hardware_req": "8GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 2048, + "parameters_total_b": 12, + "parameters_active_b": 12, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "pythia-6-9b", + "name": "Pythia 6.9B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/EleutherAI/pythia", + "description": "Standard research model size.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Research", + "EleutherAI", + "AI" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 2048, + "parameters_total_b": 6.9, + "parameters_active_b": 6.9, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "opt-175b", + "name": "OPT 175B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/facebookresearch/metaseq", + "description": "Meta's Open Pre-trained Transformer, matching GPT-3 performance.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "Legacy", + "Meta" + ], + "hardware_req": "122GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 122, + "context_window_tokens": 2048, + "parameters_total_b": 175, + "parameters_active_b": 175, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "opt-66b", + "name": "OPT 66B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/facebookresearch/metaseq", + "description": "Large scale OPT model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "Legacy", + "Meta" + ], + "hardware_req": "46GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 46, + "context_window_tokens": 2048, + "parameters_total_b": 66, + "parameters_active_b": 66, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "opt-30b", + "name": "OPT 30B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/facebookresearch/metaseq", + "description": "Mid-range OPT model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "Legacy", + "Meta" + ], + "hardware_req": "21GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 2048, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "h2o-danube-2-1-8b", + "name": "H2O Danube 2 1.8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://h2o.ai", + "description": "Highly efficient mobile-class model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "H2O", + "AI", + "H2O.ai" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 8192, + "parameters_total_b": 1.8, + "parameters_active_b": 1.8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "fuyu-8b", + "name": "Fuyu 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://adept.ai", + "description": "Simple architecture multimodal model for digital agents.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Multimodal", + "Adept", + "Agent", + "AI" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 8192, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "nexusraven-v2-13b", + "name": "NexusRaven V2 13B", + "category": "AI Models", + "is_open_source": true, + "website": "https://nexusflow.ai", + "description": "Specialized in function calling and tool use.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "Nexusflow", + "Raven" + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 4096, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "xverse-65b", + "name": "Xverse 65B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/xverse-ai", + "description": "Large multilingual model trained from scratch.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "Shenzhen Yuanxiang", + "Multilingual" + ], + "hardware_req": "46GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 46, + "context_window_tokens": 4096, + "parameters_total_b": 65, + "parameters_active_b": 65, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "xverse-13b", + "name": "Xverse 13B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/xverse-ai", + "description": "Efficient multilingual model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "Shenzhen Yuanxiang", + "Multilingual" + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 4096, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "aquila2-34b", + "name": "Aquila2 34B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/FlagAI-Open/FlagAI", + "description": "Strong performance on reasoning benchmarks.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "BAAI", + "AI" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 24, + "context_window_tokens": 4096, + "parameters_total_b": 34, + "parameters_active_b": 34, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "telechat-12b", + "name": "TeleChat 12B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/Tele-AI/Telechat", + "description": "Telecommunications oriented LLM.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Telecom", + "China Telecom", + "AI" + ], + "hardware_req": "8GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 8, + "context_window_tokens": 4096, + "parameters_total_b": 12, + "parameters_active_b": 12, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "orion-14b", + "name": "Orion 14B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/OrionStarAI/Orion", + "description": "Chat and conversational model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Orion", + "AI", + "OrionStar" + ], + "hardware_req": "10GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "seallm-7b-v2-5", + "name": "SeaLLM 7B v2.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/SeaLLMs", + "description": "State-of-the-art multilingual LLM for Southeast Asian languages.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Southeast Asia", + "AI", + "Alibaba (sea-lion)" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "openbiollm-8b", + "name": "OpenBioLLM 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/saama", + "description": "Advanced medical LLM outperforming GPT-4 on biomedical benchmarks.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Saama AI", + "AI", + "LLM", + "Medical", + "Biology" + ], + "hardware_req": "6GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "openbiollm-70b", + "name": "OpenBioLLM 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/saama", + "description": "Massive scale biomedical research model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Saama AI", + "AI", + "LLM", + "Medical", + "Biology" + ], + "hardware_req": "49GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "meditron-70b", + "name": "Meditron 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/epfl-llm", + "description": "Open-access LLM adapted to the medical domain.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "EPFL", + "AI", + "Medical" + ], + "hardware_req": "49GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "meditron-7b", + "name": "Meditron 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/epfl-llm", + "description": "Efficient medical assistant model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "EPFL", + "AI", + "Medical" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "gorilla-openfunctions-v2", + "name": "Gorilla OpenFunctions v2", + "category": "AI Models", + "is_open_source": true, + "website": "https://gorilla.cs.berkeley.edu", + "description": "The best open source model for API function calling.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "AI", + "LLM", + "Agents", + "Berkeley", + "API" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "wizardlm-2-8x22b", + "name": "WizardLM 2 8x22B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/nlpxucan/WizardLM", + "description": "Top-tier reasoning model from Microsoft using Evol-Instruct.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Microsoft", + "AI", + "Evol-Instruct" + ], + "hardware_req": "99GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 99, + "context_window_tokens": 65536, + "parameters_total_b": 141, + "parameters_active_b": 141, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "wizardlm-2-7b", + "name": "WizardLM 2 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/nlpxucan/WizardLM", + "description": "Fastest and most capable 7B model for complex instructions.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Microsoft", + "AI", + "Evol-Instruct" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 32000, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "vicuna-13b-v1-5", + "name": "Vicuna 13B v1.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://lmsys.org", + "description": "The classic open chat model based on Llama 2.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "LMSYS", + "Chatbot" + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 16384, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "vicuna-7b-v1-5", + "name": "Vicuna 7B v1.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://lmsys.org", + "description": "Highly efficient chat model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "LMSYS", + "Chatbot" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 16384, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "zephyr-7b-beta", + "name": "Zephyr 7B Beta", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/HuggingFaceH4", + "description": "Pioneered DPO (Direct Preference Optimization) for better alignment without RLHF.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "Hugging Face H4", + "DPO" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 8192, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "zephyr-141b-a39b", + "name": "Zephyr 141B A39B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/HuggingFaceH4", + "description": "Experimental DPO fine-tune of Mixtral 8x22B.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "Hugging Face H4", + "DPO" + ], + "hardware_req": "99GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 99, + "context_window_tokens": 4096, + "parameters_total_b": 141, + "parameters_active_b": 141, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "openelm-3b", + "name": "OpenELM 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/apple/corenet", + "description": "Apple's efficiently layered open model for devices.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "On-Device", + "Apple" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 2048, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "openelm-1-1b", + "name": "OpenELM 1.1B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/apple/corenet", + "description": "Tiny Apple model for extreme edge cases.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "On-Device", + "Apple" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 2048, + "parameters_total_b": 1.1, + "parameters_active_b": 1.1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "mamba-2-8b", + "name": "Mamba 2.8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/state-spaces/mamba", + "description": "Linear-time sequence modeling with state space architecture.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Cartesia", + "AI", + "SSM", + "LLM", + "Non-Transformer" + ], + "hardware_req": "2GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 0, + "parameters_total_b": 2.8, + "parameters_active_b": 2.8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "mamba-1-4b", + "name": "Mamba 1.4B", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/state-spaces/mamba", + "description": "Efficient non-transformer architecture.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Cartesia", + "AI", + "SSM", + "LLM", + "Non-Transformer" + ], + "hardware_req": "1GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 0, + "parameters_total_b": 1.4, + "parameters_active_b": 1.4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "rwkv-6-14b", + "name": "RWKV 6 14B", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.rwkv.com", + "description": "RNN with Transformer-level performance and infinite context potential.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "RNN", + "BlinkDL" + ], + "hardware_req": "10GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 0, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "rwkv-6-7b", + "name": "RWKV 6 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.rwkv.com", + "description": "Efficient RNN language model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "AI", + "RNN", + "BlinkDL" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 0, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "cerebras-gpt-13b", + "name": "Cerebras GPT 13B", + "category": "AI Models", + "is_open_source": true, + "website": "https://www.cerebras.net", + "description": "Trained on the massive CS-2 wafer-scale engine.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "LLM", + "Cerebras", + "AI", + "Wafer-Scale" + ], + "hardware_req": "9GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 2048, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361" + }, + { + "slug": "qwen-audio-chat", + "name": "Qwen-Audio-Chat", + "category": "AI Models", + "is_open_source": true, + "website": "https://github.com/QwenLM/Qwen-Audio", + "description": "Universal audio understanding model.", + "pros": [ + "Open Source", + "High Performance", + "Run Locally" + ], + "cons": [ + "Requires GPU inference", + "Management complexity" + ], + "stars": 0, + "language": "Python", + "license": "Open Weights", + "tags": [ + "Audio", + "AI", + "Multimodal", + "Alibaba Cloud" + ], + "hardware_req": "5GB+ VRAM", + "hosting_type": "both", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 0, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-7b-instruct", + "name": "Qwen2.5 7B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct", + "description": "Open source model Qwen/Qwen2.5-7B-Instruct. 1073 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1073, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-7B", + "base_model:finetune:Qwen/Qwen2.5-7B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-7b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-0.6b", + "name": "Qwen3 0.6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-0.6B", + "description": "Open source model Qwen/Qwen3-0.6B. 1083 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1083, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-0.6B-Base", + "base_model:finetune:Qwen/Qwen3-0.6B-Base", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-0.6b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "gpt2", + "name": "Gpt2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai-community/gpt2", + "description": "Open source model openai-community/gpt2. 3114 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3114, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "tflite", + "rust", + "onnx", + "safetensors", + "gpt2", + "exbert", + "en", + "doi:10.57967/hf/0039", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gpt2" + }, + { + "slug": "qwen2.5-1.5b-instruct", + "name": "Qwen2.5 1.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct", + "description": "Open source model Qwen/Qwen2.5-1.5B-Instruct. 617 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 617, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-1.5B", + "base_model:finetune:Qwen/Qwen2.5-1.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-1.5b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-3b-instruct", + "name": "Qwen2.5 3B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct", + "description": "Open source model Qwen/Qwen2.5-3B-Instruct. 404 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 404, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-3B", + "base_model:finetune:Qwen/Qwen2.5-3B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-3b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "llama-3.1-8b-instruct", + "name": "Llama 3.1 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct", + "description": "Open source model meta-llama/Llama-3.1-8B-Instruct. 5467 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 5467, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "base_model:meta-llama/Llama-3.1-8B", + "base_model:finetune:meta-llama/Llama-3.1-8B", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.1-8b-instruct", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "gpt-oss-20b", + "name": "Gpt Oss 20B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai/gpt-oss-20b", + "description": "Open source model openai/gpt-oss-20b. 4378 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4378, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gpt_oss", + "vllm", + "conversational", + "arxiv:2508.10925", + "endpoints_compatible", + "8-bit", + "mxfp4", + "deploy:azure", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 14, + "context_window_tokens": 4096, + "parameters_total_b": 20, + "parameters_active_b": 20, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gpt-oss-20b" + }, + { + "slug": "qwen2.5-0.5b-instruct", + "name": "Qwen2.5 0.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct", + "description": "Open source model Qwen/Qwen2.5-0.5B-Instruct. 463 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 463, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-0.5B", + "base_model:finetune:Qwen/Qwen2.5-0.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-0.5b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-4b", + "name": "Qwen3 4B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B", + "description": "Open source model Qwen/Qwen3-4B. 552 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 552, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-4B-Base", + "base_model:finetune:Qwen/Qwen3-4B-Base", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-4b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-8b", + "name": "Qwen3 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-8B", + "description": "Open source model Qwen/Qwen3-8B. 940 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 940, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-8B-Base", + "base_model:finetune:Qwen/Qwen3-8B-Base", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-8b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-32b-instruct", + "name": "Qwen2.5 32B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct", + "description": "Open source model Qwen/Qwen2.5-32B-Instruct. 328 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 328, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-32B", + "base_model:finetune:Qwen/Qwen2.5-32B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-32b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "opt-125m", + "name": "Opt 125M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/facebook/opt-125m", + "description": "Open source model facebook/opt-125m. 233 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 233, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "opt", + "en", + "arxiv:2205.01068", + "arxiv:2005.14165", + "text-generation-inference", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "opt-125m" + }, + { + "slug": "qwen3-1.7b", + "name": "Qwen3 1.7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-1.7B", + "description": "Open source model Qwen/Qwen3-1.7B. 422 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 422, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-1.7B-Base", + "base_model:finetune:Qwen/Qwen3-1.7B-Base", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-1.7b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "tiny-qwen2forcausallm-2.5", + "name": "Tiny Qwen2Forcausallm 2.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/trl-internal-testing/tiny-Qwen2ForCausalLM-2.5", + "description": "Open source model trl-internal-testing/tiny-Qwen2ForCausalLM-2.5. 3 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "trl", + "conversational", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "tiny-qwen2forcausallm-2.5", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "dolphin-2.9.1-yi-1.5-34b", + "name": "Dolphin 2.9.1 Yi 1.5 34B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/dphn/dolphin-2.9.1-yi-1.5-34b", + "description": "Open source model dphn/dolphin-2.9.1-yi-1.5-34b. 54 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 54, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "generated_from_trainer", + "axolotl", + "conversational", + "dataset:cognitivecomputations/Dolphin-2.9", + "dataset:teknium/OpenHermes-2.5", + "dataset:m-a-p/CodeFeedback-Filtered-Instruction", + "dataset:cognitivecomputations/dolphin-coder", + "dataset:cognitivecomputations/samantha-data", + "dataset:microsoft/orca-math-word-problems-200k", + "dataset:Locutusque/function-calling-chatml", + "dataset:internlm/Agent-FLAN", + "base_model:01-ai/Yi-1.5-34B", + "base_model:finetune:01-ai/Yi-1.5-34B", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 24, + "context_window_tokens": 4096, + "parameters_total_b": 34, + "parameters_active_b": 34, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "dolphin-2.9.1-yi-1.5-34b" + }, + { + "slug": "qwen3-embedding-0.6b", + "name": "Qwen3 Embedding 0.6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Embedding-0.6B", + "description": "Open source model Qwen/Qwen3-Embedding-0.6B. 879 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 879, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "sentence-transformers", + "safetensors", + "qwen3", + "transformers", + "sentence-similarity", + "feature-extraction", + "text-embeddings-inference", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-0.6B-Base", + "base_model:finetune:Qwen/Qwen3-0.6B-Base", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-embedding-0.6b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "gpt-oss-120b", + "name": "Gpt Oss 120B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai/gpt-oss-120b", + "description": "Open source model openai/gpt-oss-120b. 4503 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4503, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gpt_oss", + "vllm", + "conversational", + "arxiv:2508.10925", + "endpoints_compatible", + "8-bit", + "mxfp4", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 84, + "context_window_tokens": 4096, + "parameters_total_b": 120, + "parameters_active_b": 120, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gpt-oss-120b" + }, + { + "slug": "qwen3-4b-instruct-2507", + "name": "Qwen3 4B Instruct 2507", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507", + "description": "Open source model Qwen/Qwen3-4B-Instruct-2507. 730 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 730, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-4b-instruct-2507", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "moondream2", + "name": "Moondream2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/vikhyatk/moondream2", + "description": "Open source model vikhyatk/moondream2. 1373 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1373, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "moondream1", + "image-text-to-text", + "custom_code", + "doi:10.57967/hf/6762", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "moondream2" + }, + { + "slug": "llama-3.2-1b-instruct", + "name": "Llama 3.2 1B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct", + "description": "Open source model meta-llama/Llama-3.2-1B-Instruct. 1292 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1292, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "arxiv:2405.16406", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.2-1b-instruct", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen2-1.5b-instruct", + "name": "Qwen2 1.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct", + "description": "Open source model Qwen/Qwen2-1.5B-Instruct. 158 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 158, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2-1.5b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-coder-0.5b-instruct", + "name": "Qwen2.5 Coder 0.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-0.5B-Instruct. 64 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 64, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-0.5B", + "base_model:finetune:Qwen/Qwen2.5-Coder-0.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-0.5b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "kimi-k2.5", + "name": "Kimi K2.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mlx-community/Kimi-K2.5", + "description": "Open source model mlx-community/Kimi-K2.5. 28 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 28, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "mlx", + "safetensors", + "kimi_k25", + "conversational", + "custom_code", + "base_model:moonshotai/Kimi-K2.5", + "base_model:quantized:moonshotai/Kimi-K2.5", + "4-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "kimi-k2.5" + }, + { + "slug": "mistral-7b-instruct-v0.2", + "name": "Mistral 7B Instruct V0.2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2", + "description": "Open source model mistralai/Mistral-7B-Instruct-v0.2. 3075 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3075, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "mistral", + "finetuned", + "mistral-common", + "conversational", + "arxiv:2310.06825", + "text-generation-inference", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "mistral-7b-instruct-v0.2", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "qwen3-30b-a3b-instruct-2507", + "name": "Qwen3 30B A3B Instruct 2507", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507", + "description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507. 766 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 766, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2402.17463", + "arxiv:2407.02490", + "arxiv:2501.15383", + "arxiv:2404.06654", + "arxiv:2505.09388", + "eval-results", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-30b-a3b-instruct-2507", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "llm-jp-3-3.7b-instruct", + "name": "Llm Jp 3 3.7B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/llm-jp/llm-jp-3-3.7b-instruct", + "description": "Open source model llm-jp/llm-jp-3-3.7b-instruct. 13 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 13, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "en", + "ja", + "text-generation-inference", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llm-jp-3-3.7b-instruct" + }, + { + "slug": "llama-3.2-3b-instruct", + "name": "Llama 3.2 3B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct", + "description": "Open source model meta-llama/Llama-3.2-3B-Instruct. 1986 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1986, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "arxiv:2405.16406", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.2-3b-instruct", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "distilgpt2", + "name": "Distilgpt2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/distilbert/distilgpt2", + "description": "Open source model distilbert/distilgpt2. 609 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 609, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "tflite", + "rust", + "coreml", + "safetensors", + "gpt2", + "exbert", + "en", + "dataset:openwebtext", + "arxiv:1910.01108", + "arxiv:2201.08542", + "arxiv:2203.12574", + "arxiv:1910.09700", + "arxiv:1503.02531", + "model-index", + "co2_eq_emissions", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "distilgpt2" + }, + { + "slug": "qwen3-embedding-8b", + "name": "Qwen3 Embedding 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Embedding-8B", + "description": "Open source model Qwen/Qwen3-Embedding-8B. 584 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 584, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "sentence-transformers", + "safetensors", + "qwen3", + "transformers", + "sentence-similarity", + "feature-extraction", + "text-embeddings-inference", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-8B-Base", + "base_model:finetune:Qwen/Qwen3-8B-Base", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-embedding-8b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "meta-llama-3-8b", + "name": "Meta Llama 3 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", + "description": "Open source model meta-llama/Meta-Llama-3-8B. 6458 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 6458, + "language": "Python", + "license": "llama3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "meta-llama-3-8b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "tinyllama-1.1b-chat-v1.0", + "name": "Tinyllama 1.1B Chat V1.0", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0", + "description": "Open source model TinyLlama/TinyLlama-1.1B-Chat-v1.0. 1526 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1526, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "en", + "dataset:cerebras/SlimPajama-627B", + "dataset:bigcode/starcoderdata", + "dataset:HuggingFaceH4/ultrachat_200k", + "dataset:HuggingFaceH4/ultrafeedback_binarized", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "tinyllama-1.1b-chat-v1.0" + }, + { + "slug": "glm-4.7-flash", + "name": "Glm 4.7 Flash", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/zai-org/GLM-4.7-Flash", + "description": "Open source model zai-org/GLM-4.7-Flash. 1538 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1538, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "conversational", + "en", + "zh", + "arxiv:2508.06471", + "eval-results", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "glm-4.7-flash" + }, + { + "slug": "llama-3.2-1b", + "name": "Llama 3.2 1B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.2-1B", + "description": "Open source model meta-llama/Llama-3.2-1B. 2295 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2295, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "arxiv:2405.16406", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.2-1b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen3-32b", + "name": "Qwen3 32B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-32B", + "description": "Open source model Qwen/Qwen3-32B. 656 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 656, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-32b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "llama-3.2-1b-instruct-fp8-dynamic", + "name": "Llama 3.2 1B Instruct Fp8 Dynamic", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic", + "description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic. 3 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "safetensors", + "llama", + "fp8", + "vllm", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.2-1B-Instruct", + "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.2-1b-instruct-fp8-dynamic", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen2.5-coder-1.5b-instruct", + "name": "Qwen2.5 Coder 1.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-1.5B-Instruct. 106 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 106, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-1.5B", + "base_model:finetune:Qwen/Qwen2.5-Coder-1.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-1.5b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "meta-llama-3-8b-instruct", + "name": "Meta Llama 3 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct", + "description": "Open source model meta-llama/Meta-Llama-3-8B-Instruct. 4380 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4380, + "language": "Python", + "license": "llama3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "meta-llama-3-8b-instruct", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "gemma-3-1b-it", + "name": "Gemma 3 1B It", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/google/gemma-3-1b-it", + "description": "Open source model google/gemma-3-1b-it. 842 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 842, + "language": "Python", + "license": "gemma", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gemma3_text", + "conversational", + "arxiv:1905.07830", + "arxiv:1905.10044", + "arxiv:1911.11641", + "arxiv:1904.09728", + "arxiv:1705.03551", + "arxiv:1911.01547", + "arxiv:1907.10641", + "arxiv:1903.00161", + "arxiv:2009.03300", + "arxiv:2304.06364", + "arxiv:2103.03874", + "arxiv:2110.14168", + "arxiv:2311.12022", + "arxiv:2108.07732", + "arxiv:2107.03374", + "arxiv:2210.03057", + "arxiv:2106.03193", + "arxiv:1910.11856", + "arxiv:2502.12404", + "arxiv:2502.21228", + "arxiv:2404.16816", + "arxiv:2104.12756", + "arxiv:2311.16502", + "arxiv:2203.10244", + "arxiv:2404.12390", + "arxiv:1810.12440", + "arxiv:1908.02660", + "arxiv:2312.11805", + "base_model:google/gemma-3-1b-pt", + "base_model:finetune:google/gemma-3-1b-pt", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gemma-3-1b-it", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "phi-2", + "name": "Phi 2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/phi-2", + "description": "Open source model microsoft/phi-2. 3425 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3425, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi", + "nlp", + "code", + "en", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "phi-2", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "qwen2.5-coder-7b-instruct", + "name": "Qwen2.5 Coder 7B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct. 646 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 646, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-7B", + "base_model:finetune:Qwen/Qwen2.5-Coder-7B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-7b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-7b", + "name": "Qwen2.5 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-7B", + "description": "Open source model Qwen/Qwen2.5-7B. 264 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 264, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2407.10671", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-7b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "deepseek-r1-distill-qwen-1.5b", + "name": "Deepseek R1 Distill Qwen 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B. 1446 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1446, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-distill-qwen-1.5b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "deepseek-v3", + "name": "Deepseek V3", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3", + "description": "Open source model deepseek-ai/DeepSeek-V3. 4024 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4024, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v3", + "conversational", + "custom_code", + "arxiv:2412.19437", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-v3" + }, + { + "slug": "gpt2-large", + "name": "Gpt2 Large", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai-community/gpt2-large", + "description": "Open source model openai-community/gpt2-large. 344 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 344, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "rust", + "onnx", + "safetensors", + "gpt2", + "en", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gpt2-large" + }, + { + "slug": "glm-4.7-flash-mlx-8bit", + "name": "Glm 4.7 Flash Mlx 8Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-8bit", + "description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-8bit. 9 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 9, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "mlx", + "conversational", + "en", + "zh", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "8-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "glm-4.7-flash-mlx-8bit" + }, + { + "slug": "glm-4.7-flash-mlx-6bit", + "name": "Glm 4.7 Flash Mlx 6Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-6bit", + "description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-6bit. 7 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 7, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "mlx", + "conversational", + "en", + "zh", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "6-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "glm-4.7-flash-mlx-6bit" + }, + { + "slug": "qwen3-0.6b-fp8", + "name": "Qwen3 0.6B Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-0.6B-FP8", + "description": "Open source model Qwen/Qwen3-0.6B-FP8. 56 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 56, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-0.6B", + "base_model:quantized:Qwen/Qwen3-0.6B", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-0.6b-fp8", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "llama-3.1-8b", + "name": "Llama 3.1 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.1-8B", + "description": "Open source model meta-llama/Llama-3.1-8B. 2065 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2065, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.1-8b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "pythia-160m", + "name": "Pythia 160M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/EleutherAI/pythia-160m", + "description": "Open source model EleutherAI/pythia-160m. 38 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 38, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "gpt_neox", + "causal-lm", + "pythia", + "en", + "dataset:EleutherAI/pile", + "arxiv:2304.01373", + "arxiv:2101.00027", + "arxiv:2201.07311", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "pythia-160m" + }, + { + "slug": "deepseek-r1-distill-qwen-32b", + "name": "Deepseek R1 Distill Qwen 32B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-32B. 1517 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1517, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-distill-qwen-32b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "hunyuanocr", + "name": "Hunyuanocr", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tencent/HunyuanOCR", + "description": "Open source model tencent/HunyuanOCR. 553 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 553, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "hunyuan_vl", + "ocr", + "hunyuan", + "vision-language", + "image-to-text", + "1B", + "end-to-end", + "image-text-to-text", + "conversational", + "multilingual", + "arxiv:2511.19575", + "base_model:tencent/HunyuanOCR", + "base_model:finetune:tencent/HunyuanOCR", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "hunyuanocr" + }, + { + "slug": "qwen3-30b-a3b", + "name": "Qwen3 30B A3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B", + "description": "Open source model Qwen/Qwen3-30B-A3B. 855 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 855, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-30B-A3B-Base", + "base_model:finetune:Qwen/Qwen3-30B-A3B-Base", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-30b-a3b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-0.5b", + "name": "Qwen2.5 0.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-0.5B", + "description": "Open source model Qwen/Qwen2.5-0.5B. 372 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 372, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2407.10671", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-0.5b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-32b-instruct-awq", + "name": "Qwen2.5 32B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-32B-Instruct-AWQ. 94 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 94, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-32B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-32B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-32b-instruct-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "nvidia-nemotron-3-nano-30b-a3b-fp8", + "name": "Nvidia Nemotron 3 Nano 30B A3B Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8", + "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8. 284 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 284, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "nemotron_h", + "feature-extraction", + "nvidia", + "pytorch", + "conversational", + "custom_code", + "en", + "es", + "fr", + "de", + "ja", + "it", + "dataset:nvidia/Nemotron-Pretraining-Code-v1", + "dataset:nvidia/Nemotron-CC-v2", + "dataset:nvidia/Nemotron-Pretraining-SFT-v1", + "dataset:nvidia/Nemotron-CC-Math-v1", + "dataset:nvidia/Nemotron-Pretraining-Code-v2", + "dataset:nvidia/Nemotron-Pretraining-Specialized-v1", + "dataset:nvidia/Nemotron-CC-v2.1", + "dataset:nvidia/Nemotron-CC-Code-v1", + "dataset:nvidia/Nemotron-Pretraining-Dataset-sample", + "dataset:nvidia/Nemotron-Competitive-Programming-v1", + "dataset:nvidia/Nemotron-Math-v2", + "dataset:nvidia/Nemotron-Agentic-v1", + "dataset:nvidia/Nemotron-Math-Proofs-v1", + "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1", + "dataset:nvidia/Nemotron-Science-v1", + "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend", + "arxiv:2512.20848", + "arxiv:2512.20856", + "base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "eval-results", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "nvidia-nemotron-3-nano-30b-a3b-fp8" + }, + { + "slug": "qwen2.5-14b-instruct", + "name": "Qwen2.5 14B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct", + "description": "Open source model Qwen/Qwen2.5-14B-Instruct. 312 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 312, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-14B", + "base_model:finetune:Qwen/Qwen2.5-14B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-14b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "nvidia-nemotron-3-nano-30b-a3b-bf16", + "name": "Nvidia Nemotron 3 Nano 30B A3B Bf16", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16. 634 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 634, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "nemotron_h", + "feature-extraction", + "nvidia", + "pytorch", + "conversational", + "custom_code", + "en", + "es", + "fr", + "de", + "ja", + "it", + "dataset:nvidia/Nemotron-Pretraining-Code-v1", + "dataset:nvidia/Nemotron-CC-v2", + "dataset:nvidia/Nemotron-Pretraining-SFT-v1", + "dataset:nvidia/Nemotron-CC-Math-v1", + "dataset:nvidia/Nemotron-Pretraining-Code-v2", + "dataset:nvidia/Nemotron-Pretraining-Specialized-v1", + "dataset:nvidia/Nemotron-CC-v2.1", + "dataset:nvidia/Nemotron-CC-Code-v1", + "dataset:nvidia/Nemotron-Pretraining-Dataset-sample", + "dataset:nvidia/Nemotron-Competitive-Programming-v1", + "dataset:nvidia/Nemotron-Math-v2", + "dataset:nvidia/Nemotron-Agentic-v1", + "dataset:nvidia/Nemotron-Math-Proofs-v1", + "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1", + "dataset:nvidia/Nemotron-Science-v1", + "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend", + "arxiv:2512.20848", + "arxiv:2512.20856", + "eval-results", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "nvidia-nemotron-3-nano-30b-a3b-bf16" + }, + { + "slug": "openelm-1_1b-instruct", + "name": "Openelm 1_1B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/apple/OpenELM-1_1B-Instruct", + "description": "Open source model apple/OpenELM-1_1B-Instruct. 72 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 72, + "language": "Python", + "license": "apple-amlr", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "openelm", + "custom_code", + "arxiv:2404.14619", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "openelm-1_1b-instruct" + }, + { + "slug": "tiny-random-llamaforcausallm", + "name": "Tiny Random Llamaforcausallm", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/hmellor/tiny-random-LlamaForCausalLM", + "description": "Open source model hmellor/tiny-random-LlamaForCausalLM. 0 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 0, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "tiny-random-llamaforcausallm", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen3-next-80b-a3b-instruct", + "name": "Qwen3 Next 80B A3B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Instruct", + "description": "Open source model Qwen/Qwen3-Next-80B-A3B-Instruct. 937 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 937, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_next", + "conversational", + "arxiv:2309.00071", + "arxiv:2404.06654", + "arxiv:2505.09388", + "arxiv:2501.15383", + "eval-results", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 56, + "context_window_tokens": 4096, + "parameters_total_b": 80, + "parameters_active_b": 80, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-next-80b-a3b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "h2ovl-mississippi-800m", + "name": "H2Ovl Mississippi 800M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/h2oai/h2ovl-mississippi-800m", + "description": "Open source model h2oai/h2ovl-mississippi-800m. 39 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 39, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "h2ovl_chat", + "feature-extraction", + "gpt", + "llm", + "multimodal large language model", + "ocr", + "conversational", + "custom_code", + "en", + "arxiv:2410.13611", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "h2ovl-mississippi-800m" + }, + { + "slug": "bloomz-560m", + "name": "Bloomz 560M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/bigscience/bloomz-560m", + "description": "Open source model bigscience/bloomz-560m. 137 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 137, + "language": "Python", + "license": "bigscience-bloom-rail-1.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tensorboard", + "safetensors", + "bloom", + "ak", + "ar", + "as", + "bm", + "bn", + "ca", + "code", + "en", + "es", + "eu", + "fon", + "fr", + "gu", + "hi", + "id", + "ig", + "ki", + "kn", + "lg", + "ln", + "ml", + "mr", + "ne", + "nso", + "ny", + "or", + "pa", + "pt", + "rn", + "rw", + "sn", + "st", + "sw", + "ta", + "te", + "tn", + "ts", + "tum", + "tw", + "ur", + "vi", + "wo", + "xh", + "yo", + "zh", + "zu", + "dataset:bigscience/xP3", + "arxiv:2211.01786", + "model-index", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "bloomz-560m" + }, + { + "slug": "qwen2.5-1.5b-quantized.w8a8", + "name": "Qwen2.5 1.5B Quantized.W8A8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/RedHatAI/Qwen2.5-1.5B-quantized.w8a8", + "description": "Open source model RedHatAI/Qwen2.5-1.5B-quantized.w8a8. 2 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "safetensors", + "qwen2", + "chat", + "neuralmagic", + "llmcompressor", + "conversational", + "en", + "base_model:Qwen/Qwen2.5-1.5B", + "base_model:quantized:Qwen/Qwen2.5-1.5B", + "8-bit", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-1.5b-quantized.w8a8", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "h2ovl-mississippi-2b", + "name": "H2Ovl Mississippi 2B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/h2oai/h2ovl-mississippi-2b", + "description": "Open source model h2oai/h2ovl-mississippi-2b. 40 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 40, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "h2ovl_chat", + "feature-extraction", + "gpt", + "llm", + "multimodal large language model", + "ocr", + "conversational", + "custom_code", + "en", + "arxiv:2410.13611", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "h2ovl-mississippi-2b" + }, + { + "slug": "llava-v1.5-7b", + "name": "Llava V1.5 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/liuhaotian/llava-v1.5-7b", + "description": "Open source model liuhaotian/llava-v1.5-7b. 537 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 537, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "llava", + "image-text-to-text", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llava-v1.5-7b" + }, + { + "slug": "t5-3b", + "name": "T5 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/google-t5/t5-3b", + "description": "Open source model google-t5/t5-3b. 51 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 51, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "safetensors", + "t5", + "summarization", + "translation", + "en", + "fr", + "ro", + "de", + "multilingual", + "dataset:c4", + "arxiv:1805.12471", + "arxiv:1708.00055", + "arxiv:1704.05426", + "arxiv:1606.05250", + "arxiv:1808.09121", + "arxiv:1810.12885", + "arxiv:1905.10044", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "t5-3b" + }, + { + "slug": "qwen2.5-14b-instruct-awq", + "name": "Qwen2.5 14B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-14B-Instruct-AWQ. 27 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 27, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-14B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-14B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-14b-instruct-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "llama-3.2-3b", + "name": "Llama 3.2 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.2-3B", + "description": "Open source model meta-llama/Llama-3.2-3B. 697 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 697, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "arxiv:2405.16406", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.2-3b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "phi-3-mini-4k-instruct-gptq-4bit", + "name": "Phi 3 Mini 4K Instruct Gptq 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/kaitchup/Phi-3-mini-4k-instruct-gptq-4bit", + "description": "Open source model kaitchup/Phi-3-mini-4k-instruct-gptq-4bit. 2 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3", + "conversational", + "custom_code", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "gptq", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "phi-3-mini-4k-instruct-gptq-4bit", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "qwen2.5-72b-instruct-awq", + "name": "Qwen2.5 72B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-72B-Instruct-AWQ. 74 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 74, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-72B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-72B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 50, + "context_window_tokens": 4096, + "parameters_total_b": 72, + "parameters_active_b": 72, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-72b-instruct-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "smollm2-135m", + "name": "Smollm2 135M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M", + "description": "Open source model HuggingFaceTB/SmolLM2-135M. 166 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 166, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "en", + "arxiv:2502.02737", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "smollm2-135m" + }, + { + "slug": "llama-3.3-70b-instruct", + "name": "Llama 3.3 70B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct", + "description": "Open source model meta-llama/Llama-3.3-70B-Instruct. 2658 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2658, + "language": "Python", + "license": "llama3.3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "de", + "arxiv:2204.05149", + "base_model:meta-llama/Llama-3.1-70B", + "base_model:finetune:meta-llama/Llama-3.1-70B", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.3-70b-instruct", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen3-30b-a3b-instruct-2507-fp8", + "name": "Qwen3 30B A3B Instruct 2507 Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507-FP8", + "description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507-FP8. 112 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 112, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-30B-A3B-Instruct-2507", + "base_model:quantized:Qwen/Qwen3-30B-A3B-Instruct-2507", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-30b-a3b-instruct-2507-fp8", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-coder-32b-instruct", + "name": "Qwen2.5 Coder 32B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct. 1995 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1995, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-32B", + "base_model:finetune:Qwen/Qwen2.5-Coder-32B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-32b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-235b-a22b-instruct-2507-fp8", + "name": "Qwen3 235B A22B Instruct 2507 Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-235B-A22B-Instruct-2507-FP8", + "description": "Open source model Qwen/Qwen3-235B-A22B-Instruct-2507-FP8. 145 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 145, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-235B-A22B-Instruct-2507", + "base_model:quantized:Qwen/Qwen3-235B-A22B-Instruct-2507", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 164, + "context_window_tokens": 4096, + "parameters_total_b": 235, + "parameters_active_b": 235, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-235b-a22b-instruct-2507-fp8", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "deepseek-r1-distill-qwen-7b", + "name": "Deepseek R1 Distill Qwen 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-7B. 787 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 787, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-distill-qwen-7b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "phi-3-mini-4k-instruct", + "name": "Phi 3 Mini 4K Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", + "description": "Open source model microsoft/Phi-3-mini-4k-instruct. 1386 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1386, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3", + "nlp", + "code", + "conversational", + "custom_code", + "en", + "fr", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "phi-3-mini-4k-instruct", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "qwen3-14b", + "name": "Qwen3 14B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-14B", + "description": "Open source model Qwen/Qwen3-14B. 366 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 366, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-14B-Base", + "base_model:finetune:Qwen/Qwen3-14B-Base", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-14b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-coder-1.5b", + "name": "Qwen2.5 Coder 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B", + "description": "Open source model Qwen/Qwen2.5-Coder-1.5B. 81 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 81, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "qwen", + "qwen-coder", + "codeqwen", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-1.5B", + "base_model:finetune:Qwen/Qwen2.5-1.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-1.5b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "llama-3.1-70b-instruct", + "name": "Llama 3.1 70B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct", + "description": "Open source model meta-llama/Llama-3.1-70B-Instruct. 890 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 890, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "base_model:meta-llama/Llama-3.1-70B", + "base_model:finetune:meta-llama/Llama-3.1-70B", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.1-70b-instruct", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "hunyuanimage-3.0", + "name": "Hunyuanimage 3.0", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tencent/HunyuanImage-3.0", + "description": "Open source model tencent/HunyuanImage-3.0. 640 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 640, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "hunyuan_image_3_moe", + "text-to-image", + "custom_code", + "arxiv:2509.23951", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "hunyuanimage-3.0" + }, + { + "slug": "qwen2.5-coder-7b-instruct-awq", + "name": "Qwen2.5 Coder 7B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-AWQ. 19 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 19, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-7B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-7b-instruct-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-coder-30b-a3b-instruct", + "name": "Qwen3 Coder 30B A3B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct", + "description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct. 945 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 945, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2505.09388", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-coder-30b-a3b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "deepseek-r1-0528", + "name": "Deepseek R1 0528", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-0528", + "description": "Open source model deepseek-ai/DeepSeek-R1-0528. 2400 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2400, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v3", + "conversational", + "custom_code", + "arxiv:2501.12948", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-0528" + }, + { + "slug": "tiny-random-llama-3", + "name": "Tiny Random Llama 3", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/llamafactory/tiny-random-Llama-3", + "description": "Open source model llamafactory/tiny-random-Llama-3. 3 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "text-generation-inference", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "tiny-random-llama-3", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen2.5-coder-32b-instruct-awq", + "name": "Qwen2.5 Coder 32B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct-AWQ. 33 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 33, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-32B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-Coder-32B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-32b-instruct-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "mistral-7b-instruct-v0.1", + "name": "Mistral 7B Instruct V0.1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1", + "description": "Open source model mistralai/Mistral-7B-Instruct-v0.1. 1826 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1826, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "mistral", + "finetuned", + "mistral-common", + "conversational", + "arxiv:2310.06825", + "base_model:mistralai/Mistral-7B-v0.1", + "base_model:finetune:mistralai/Mistral-7B-v0.1", + "text-generation-inference", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "mistral-7b-instruct-v0.1", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "gpt-oss-20b-mxfp4-q8", + "name": "Gpt Oss 20B Mxfp4 Q8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mlx-community/gpt-oss-20b-MXFP4-Q8", + "description": "Open source model mlx-community/gpt-oss-20b-MXFP4-Q8. 31 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 31, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "mlx", + "safetensors", + "gpt_oss", + "vllm", + "conversational", + "base_model:openai/gpt-oss-20b", + "base_model:quantized:openai/gpt-oss-20b", + "4-bit", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 14, + "context_window_tokens": 4096, + "parameters_total_b": 20, + "parameters_active_b": 20, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gpt-oss-20b-mxfp4-q8" + }, + { + "slug": "qwen3-embedding-4b", + "name": "Qwen3 Embedding 4B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Embedding-4B", + "description": "Open source model Qwen/Qwen3-Embedding-4B. 224 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 224, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "sentence-transformers", + "safetensors", + "qwen3", + "transformers", + "sentence-similarity", + "feature-extraction", + "text-embeddings-inference", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-4B-Base", + "base_model:finetune:Qwen/Qwen3-4B-Base", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-embedding-4b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-1.5b-instruct-awq", + "name": "Qwen2.5 1.5B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-1.5B-Instruct-AWQ. 6 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 6, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-1.5B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-1.5B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-1.5b-instruct-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "meta-llama-3.1-8b-instruct-fp8", + "name": "Meta Llama 3.1 8B Instruct Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8", + "description": "Open source model RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8. 44 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 44, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "fp8", + "vllm", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.1-8B-Instruct", + "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "meta-llama-3.1-8b-instruct-fp8", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "phi-4", + "name": "Phi 4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/phi-4", + "description": "Open source model microsoft/phi-4. 2220 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2220, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3", + "phi", + "nlp", + "math", + "code", + "chat", + "conversational", + "en", + "arxiv:2412.08905", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "phi-4", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "deepseek-r1", + "name": "Deepseek R1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1", + "description": "Open source model deepseek-ai/DeepSeek-R1. 13011 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 13011, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v3", + "conversational", + "custom_code", + "arxiv:2501.12948", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1" + }, + { + "slug": "llama-3.2-1b-instruct-fp8", + "name": "Llama 3.2 1B Instruct Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8", + "description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8. 3 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "safetensors", + "llama", + "llama-3", + "neuralmagic", + "llmcompressor", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.2-1B-Instruct", + "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.2-1b-instruct-fp8", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-3.1-405b", + "name": "Llama 3.1 405B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.1-405B", + "description": "Open source model meta-llama/Llama-3.1-405B. 961 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 961, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 284, + "context_window_tokens": 4096, + "parameters_total_b": 405, + "parameters_active_b": 405, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.1-405b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen3-4b-thinking-2507", + "name": "Qwen3 4B Thinking 2507", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B-Thinking-2507", + "description": "Open source model Qwen/Qwen3-4B-Thinking-2507. 548 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 548, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-4b-thinking-2507", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "gpt2-medium", + "name": "Gpt2 Medium", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai-community/gpt2-medium", + "description": "Open source model openai-community/gpt2-medium. 193 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 193, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "rust", + "onnx", + "safetensors", + "gpt2", + "en", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gpt2-medium" + }, + { + "slug": "tiny-gpt2", + "name": "Tiny Gpt2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/sshleifer/tiny-gpt2", + "description": "Open source model sshleifer/tiny-gpt2. 34 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 34, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "gpt2", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "tiny-gpt2" + }, + { + "slug": "hermes-3-llama-3.1-8b", + "name": "Hermes 3 Llama 3.1 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B", + "description": "Open source model NousResearch/Hermes-3-Llama-3.1-8B. 385 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 385, + "language": "Python", + "license": "llama3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "Llama-3", + "instruct", + "finetune", + "chatml", + "gpt4", + "synthetic data", + "distillation", + "function calling", + "json mode", + "axolotl", + "roleplaying", + "chat", + "conversational", + "en", + "arxiv:2408.11857", + "base_model:meta-llama/Llama-3.1-8B", + "base_model:finetune:meta-llama/Llama-3.1-8B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "hermes-3-llama-3.1-8b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "phi-3.5-vision-instruct", + "name": "Phi 3.5 Vision Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/Phi-3.5-vision-instruct", + "description": "Open source model microsoft/Phi-3.5-vision-instruct. 726 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 726, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3_v", + "nlp", + "code", + "vision", + "image-text-to-text", + "conversational", + "custom_code", + "multilingual", + "arxiv:2404.14219", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": true + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "phi-3.5-vision-instruct", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "minimax-m2", + "name": "Minimax M2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/MiniMaxAI/MiniMax-M2", + "description": "Open source model MiniMaxAI/MiniMax-M2. 1485 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1485, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "minimax_m2", + "conversational", + "custom_code", + "arxiv:2504.07164", + "arxiv:2509.06501", + "arxiv:2509.13160", + "eval-results", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "minimax-m2" + }, + { + "slug": "deepseek-r1-distill-llama-8b", + "name": "Deepseek R1 Distill Llama 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-8B. 843 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 843, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-distill-llama-8b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen3-14b-awq", + "name": "Qwen3 14B Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-14B-AWQ", + "description": "Open source model Qwen/Qwen3-14B-AWQ. 57 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 57, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-14B", + "base_model:quantized:Qwen/Qwen3-14B", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-14b-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-235b-a22b", + "name": "Qwen3 235B A22B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-235B-A22B", + "description": "Open source model Qwen/Qwen3-235B-A22B. 1075 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1075, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 164, + "context_window_tokens": 4096, + "parameters_total_b": 235, + "parameters_active_b": 235, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-235b-a22b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "meta-llama-3.1-8b-instruct-awq-int4", + "name": "Meta Llama 3.1 8B Instruct Awq Int4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4", + "description": "Open source model hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4. 87 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 87, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "llama-3.1", + "meta", + "autoawq", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "meta-llama-3.1-8b-instruct-awq-int4", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "lfm2.5-1.2b-instruct-mlx-8bit", + "name": "Lfm2.5 1.2B Instruct Mlx 8Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit", + "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit. 1 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "lfm2", + "liquid", + "lfm2.5", + "edge", + "mlx", + "conversational", + "en", + "ar", + "zh", + "fr", + "de", + "ja", + "ko", + "es", + "base_model:LiquidAI/LFM2.5-1.2B-Instruct", + "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct", + "endpoints_compatible", + "8-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "lfm2.5-1.2b-instruct-mlx-8bit" + }, + { + "slug": "glm-4.7-flash-gguf", + "name": "Glm 4.7 Flash Gguf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF", + "description": "Open source model unsloth/GLM-4.7-Flash-GGUF. 482 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 482, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "gguf", + "unsloth", + "en", + "zh", + "arxiv:2508.06471", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "deploy:azure", + "region:us", + "imatrix", + "conversational" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "glm-4.7-flash-gguf" + }, + { + "slug": "deepseek-r1-distill-qwen-14b", + "name": "Deepseek R1 Distill Qwen 14B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-14B. 603 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 603, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-distill-qwen-14b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "lfm2.5-1.2b-instruct-mlx-6bit", + "name": "Lfm2.5 1.2B Instruct Mlx 6Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit", + "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit. 4 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "lfm2", + "liquid", + "lfm2.5", + "edge", + "mlx", + "conversational", + "en", + "ar", + "zh", + "fr", + "de", + "ja", + "ko", + "es", + "base_model:LiquidAI/LFM2.5-1.2B-Instruct", + "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct", + "endpoints_compatible", + "6-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "lfm2.5-1.2b-instruct-mlx-6bit" + }, + { + "slug": "lfm2.5-1.2b-instruct-mlx-4bit", + "name": "Lfm2.5 1.2B Instruct Mlx 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit", + "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit. 1 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "lfm2", + "liquid", + "lfm2.5", + "edge", + "mlx", + "conversational", + "en", + "ar", + "zh", + "fr", + "de", + "ja", + "ko", + "es", + "base_model:LiquidAI/LFM2.5-1.2B-Instruct", + "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct", + "endpoints_compatible", + "4-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "lfm2.5-1.2b-instruct-mlx-4bit" + }, + { + "slug": "vicuna-7b-v1.5", + "name": "Vicuna 7B V1.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmsys/vicuna-7b-v1.5", + "description": "Open source model lmsys/vicuna-7b-v1.5. 387 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 387, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "llama", + "arxiv:2307.09288", + "arxiv:2306.05685", + "text-generation-inference", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "vicuna-7b-v1.5" + }, + { + "slug": "llama-3.2-1b-instruct-q8_0-gguf", + "name": "Llama 3.2 1B Instruct Q8_0 Gguf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF", + "description": "Open source model hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF. 43 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 43, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "gguf", + "facebook", + "meta", + "pytorch", + "llama", + "llama-3", + "llama-cpp", + "gguf-my-repo", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.2-1B-Instruct", + "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct", + "endpoints_compatible", + "region:us", + "conversational" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.2-1b-instruct-q8_0-gguf", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "llama-3.3-70b-instruct-awq", + "name": "Llama 3.3 70B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/kosbu/Llama-3.3-70B-Instruct-AWQ", + "description": "Open source model kosbu/Llama-3.3-70B-Instruct-AWQ. 10 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 10, + "language": "Python", + "license": "llama3.3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "llama-3", + "awq", + "conversational", + "en", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "de", + "base_model:meta-llama/Llama-3.3-70B-Instruct", + "base_model:quantized:meta-llama/Llama-3.3-70B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-3.3-70b-instruct-awq", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen3-32b-fp8", + "name": "Qwen3 32B Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-32B-FP8", + "description": "Open source model Qwen/Qwen3-32B-FP8. 80 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 80, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-32B", + "base_model:quantized:Qwen/Qwen3-32B", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-32b-fp8", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "gpt2-xl", + "name": "Gpt2 Xl", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai-community/gpt2-xl", + "description": "Open source model openai-community/gpt2-xl. 373 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 373, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "rust", + "safetensors", + "gpt2", + "en", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gpt2-xl" + }, + { + "slug": "qwen3-4b-instruct-2507-fp8", + "name": "Qwen3 4B Instruct 2507 Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507-FP8", + "description": "Open source model Qwen/Qwen3-4B-Instruct-2507-FP8. 65 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 65, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-4B-Instruct-2507", + "base_model:quantized:Qwen/Qwen3-4B-Instruct-2507", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-4b-instruct-2507-fp8", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "xlnet-base-cased", + "name": "Xlnet Base Cased", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/xlnet/xlnet-base-cased", + "description": "Open source model xlnet/xlnet-base-cased. 80 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 80, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "rust", + "xlnet", + "en", + "dataset:bookcorpus", + "dataset:wikipedia", + "arxiv:1906.08237", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "xlnet-base-cased" + }, + { + "slug": "llama-2-7b-hf", + "name": "Llama 2 7B Hf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-2-7b-hf", + "description": "Open source model meta-llama/Llama-2-7b-hf. 2268 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2268, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "llama", + "facebook", + "meta", + "llama-2", + "en", + "arxiv:2307.09288", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-2-7b-hf", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen2.5-math-7b-instruct", + "name": "Qwen2.5 Math 7B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Math-7B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Math-7B-Instruct. 89 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 89, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2409.12122", + "base_model:Qwen/Qwen2.5-Math-7B", + "base_model:finetune:Qwen/Qwen2.5-Math-7B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-math-7b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-reranker-0.6b", + "name": "Qwen3 Reranker 0.6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Reranker-0.6B", + "description": "Open source model Qwen/Qwen3-Reranker-0.6B. 305 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 305, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "text-ranking", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-0.6B-Base", + "base_model:finetune:Qwen/Qwen3-0.6B-Base", + "text-embeddings-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-reranker-0.6b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-1.5b", + "name": "Qwen2.5 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B", + "description": "Open source model Qwen/Qwen2.5-1.5B. 165 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 165, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2407.10671", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-1.5b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-30b-a3b-thinking-2507", + "name": "Qwen3 30B A3B Thinking 2507", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Thinking-2507", + "description": "Open source model Qwen/Qwen3-30B-A3B-Thinking-2507. 359 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 359, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2402.17463", + "arxiv:2407.02490", + "arxiv:2501.15383", + "arxiv:2404.06654", + "arxiv:2505.09388", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-30b-a3b-thinking-2507", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "smollm2-135m-instruct", + "name": "Smollm2 135M Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct", + "description": "Open source model HuggingFaceTB/SmolLM2-135M-Instruct. 292 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 292, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "tensorboard", + "onnx", + "safetensors", + "llama", + "transformers.js", + "conversational", + "en", + "arxiv:2502.02737", + "base_model:HuggingFaceTB/SmolLM2-135M", + "base_model:quantized:HuggingFaceTB/SmolLM2-135M", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "smollm2-135m-instruct" + }, + { + "slug": "qwen2.5-math-1.5b", + "name": "Qwen2.5 Math 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Math-1.5B", + "description": "Open source model Qwen/Qwen2.5-Math-1.5B. 100 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 100, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2409.12122", + "base_model:Qwen/Qwen2.5-1.5B", + "base_model:finetune:Qwen/Qwen2.5-1.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-math-1.5b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "glm-4.5-air-awq-4bit", + "name": "Glm 4.5 Air Awq 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/cyankiwi/GLM-4.5-Air-AWQ-4bit", + "description": "Open source model cyankiwi/GLM-4.5-Air-AWQ-4bit. 27 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 27, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe", + "conversational", + "en", + "zh", + "arxiv:2508.06471", + "base_model:zai-org/GLM-4.5-Air", + "base_model:quantized:zai-org/GLM-4.5-Air", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "glm-4.5-air-awq-4bit" + }, + { + "slug": "llama-2-7b-chat-hf", + "name": "Llama 2 7B Chat Hf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf", + "description": "Open source model meta-llama/Llama-2-7b-chat-hf. 4705 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4705, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "llama", + "facebook", + "meta", + "llama-2", + "conversational", + "en", + "arxiv:2307.09288", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-2-7b-chat-hf", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen2.5-coder-7b-instruct-gptq-int4", + "name": "Qwen2.5 Coder 7B Instruct Gptq Int4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4", + "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4. 12 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 12, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-7B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "gptq", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-7b-instruct-gptq-int4", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-vl-30b-a3b-instruct-awq", + "name": "Qwen3 Vl 30B A3B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ", + "description": "Open source model QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ. 38 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 38, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_vl_moe", + "image-text-to-text", + "AWQ", + "vLLM", + "conversational", + "arxiv:2505.09388", + "arxiv:2502.13923", + "arxiv:2409.12191", + "arxiv:2308.12966", + "base_model:Qwen/Qwen3-VL-30B-A3B-Instruct", + "base_model:quantized:Qwen/Qwen3-VL-30B-A3B-Instruct", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-vl-30b-a3b-instruct-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-8b-base", + "name": "Qwen3 8B Base", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-8B-Base", + "description": "Open source model Qwen/Qwen3-8B-Base. 82 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 82, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-8b-base", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-coder-14b-instruct", + "name": "Qwen2.5 Coder 14B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-14B-Instruct. 140 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 140, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-14B", + "base_model:finetune:Qwen/Qwen2.5-Coder-14B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-14b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "stories15m_moe", + "name": "Stories15M_Moe", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/ggml-org/stories15M_MOE", + "description": "Open source model ggml-org/stories15M_MOE. 5 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 5, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gguf", + "mixtral", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "stories15m_moe" + }, + { + "slug": "opt-1.3b", + "name": "Opt 1.3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/facebook/opt-1.3b", + "description": "Open source model facebook/opt-1.3b. 182 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 182, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "opt", + "en", + "arxiv:2205.01068", + "arxiv:2005.14165", + "text-generation-inference", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "opt-1.3b" + }, + { + "slug": "minimax-m2-awq", + "name": "Minimax M2 Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/QuantTrio/MiniMax-M2-AWQ", + "description": "Open source model QuantTrio/MiniMax-M2-AWQ. 8 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 8, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "mixtral", + "vLLM", + "AWQ", + "conversational", + "arxiv:2504.07164", + "arxiv:2509.06501", + "arxiv:2509.13160", + "base_model:MiniMaxAI/MiniMax-M2", + "base_model:quantized:MiniMaxAI/MiniMax-M2", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "minimax-m2-awq" + }, + { + "slug": "glm-4.7-flash-nvfp4", + "name": "Glm 4.7 Flash Nvfp4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/GadflyII/GLM-4.7-Flash-NVFP4", + "description": "Open source model GadflyII/GLM-4.7-Flash-NVFP4. 62 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 62, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "moe", + "nvfp4", + "quantized", + "vllm", + "glm", + "30b", + "conversational", + "en", + "zh", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "glm-4.7-flash-nvfp4" + }, + { + "slug": "hy-mt1.5-7b", + "name": "Hy Mt1.5 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tencent/HY-MT1.5-7B", + "description": "Open source model tencent/HY-MT1.5-7B. 133 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 133, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "hunyuan_v1_dense", + "translation", + "zh", + "en", + "fr", + "pt", + "es", + "ja", + "tr", + "ru", + "ar", + "ko", + "th", + "it", + "de", + "vi", + "ms", + "id", + "tl", + "hi", + "pl", + "cs", + "nl", + "km", + "my", + "fa", + "gu", + "ur", + "te", + "mr", + "he", + "bn", + "ta", + "uk", + "bo", + "kk", + "mn", + "ug", + "arxiv:2512.24092", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "hy-mt1.5-7b" + }, + { + "slug": "gemma-2-27b-it", + "name": "Gemma 2 27B It", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/google/gemma-2-27b-it", + "description": "Open source model google/gemma-2-27b-it. 559 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 559, + "language": "Python", + "license": "gemma", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gemma2", + "conversational", + "arxiv:2009.03300", + "arxiv:1905.07830", + "arxiv:1911.11641", + "arxiv:1904.09728", + "arxiv:1905.10044", + "arxiv:1907.10641", + "arxiv:1811.00937", + "arxiv:1809.02789", + "arxiv:1911.01547", + "arxiv:1705.03551", + "arxiv:2107.03374", + "arxiv:2108.07732", + "arxiv:2110.14168", + "arxiv:2009.11462", + "arxiv:2101.11718", + "arxiv:2110.08193", + "arxiv:1804.09301", + "arxiv:2109.07958", + "arxiv:1804.06876", + "arxiv:2103.03874", + "arxiv:2304.06364", + "arxiv:2206.04615", + "arxiv:2203.09509", + "base_model:google/gemma-2-27b", + "base_model:finetune:google/gemma-2-27b", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 19, + "context_window_tokens": 4096, + "parameters_total_b": 27, + "parameters_active_b": 27, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gemma-2-27b-it", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "qwen3-coder-next-gguf", + "name": "Qwen3 Coder Next Gguf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/Qwen3-Coder-Next-GGUF", + "description": "Open source model unsloth/Qwen3-Coder-Next-GGUF. 347 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 347, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "gguf", + "qwen3_next", + "unsloth", + "qwen", + "qwen3", + "base_model:Qwen/Qwen3-Coder-Next", + "base_model:quantized:Qwen/Qwen3-Coder-Next", + "endpoints_compatible", + "region:us", + "imatrix", + "conversational" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-coder-next-gguf", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "gte-qwen2-1.5b-instruct", + "name": "Gte Qwen2 1.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Alibaba-NLP/gte-Qwen2-1.5B-instruct", + "description": "Open source model Alibaba-NLP/gte-Qwen2-1.5B-instruct. 229 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 229, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "sentence-transformers", + "safetensors", + "qwen2", + "mteb", + "transformers", + "Qwen2", + "sentence-similarity", + "custom_code", + "arxiv:2308.03281", + "model-index", + "text-embeddings-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gte-qwen2-1.5b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "lfm2-1.2b", + "name": "Lfm2 1.2B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/LiquidAI/LFM2-1.2B", + "description": "Open source model LiquidAI/LFM2-1.2B. 349 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 349, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "lfm2", + "liquid", + "edge", + "conversational", + "en", + "ar", + "zh", + "fr", + "de", + "ja", + "ko", + "es", + "arxiv:2511.23404", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "lfm2-1.2b" + }, + { + "slug": "saiga_llama3_8b", + "name": "Saiga_Llama3_8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/IlyaGusev/saiga_llama3_8b", + "description": "Open source model IlyaGusev/saiga_llama3_8b. 137 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 137, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "ru", + "dataset:IlyaGusev/saiga_scored", + "doi:10.57967/hf/2368", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "saiga_llama3_8b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen3-1.7b-base", + "name": "Qwen3 1.7B Base", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-1.7B-Base", + "description": "Open source model Qwen/Qwen3-1.7B-Base. 62 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 62, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-1.7b-base", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "mistral-7b-v0.3-bnb-4bit", + "name": "Mistral 7B V0.3 Bnb 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/mistral-7b-v0.3-bnb-4bit", + "description": "Open source model unsloth/mistral-7b-v0.3-bnb-4bit. 22 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 22, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "mistral", + "unsloth", + "mistral-7b", + "en", + "base_model:mistralai/Mistral-7B-v0.3", + "base_model:quantized:mistralai/Mistral-7B-v0.3", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "bitsandbytes", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "mistral-7b-v0.3-bnb-4bit", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "gemma-2-2b-it", + "name": "Gemma 2 2B It", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/google/gemma-2-2b-it", + "description": "Open source model google/gemma-2-2b-it. 1285 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1285, + "language": "Python", + "license": "gemma", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gemma2", + "conversational", + "arxiv:2009.03300", + "arxiv:1905.07830", + "arxiv:1911.11641", + "arxiv:1904.09728", + "arxiv:1905.10044", + "arxiv:1907.10641", + "arxiv:1811.00937", + "arxiv:1809.02789", + "arxiv:1911.01547", + "arxiv:1705.03551", + "arxiv:2107.03374", + "arxiv:2108.07732", + "arxiv:2110.14168", + "arxiv:2009.11462", + "arxiv:2101.11718", + "arxiv:2110.08193", + "arxiv:1804.09301", + "arxiv:2109.07958", + "arxiv:1804.06876", + "arxiv:2103.03874", + "arxiv:2304.06364", + "arxiv:1903.00161", + "arxiv:2206.04615", + "arxiv:2203.09509", + "arxiv:2403.13793", + "base_model:google/gemma-2-2b", + "base_model:finetune:google/gemma-2-2b", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gemma-2-2b-it", + "logo_url": "/logos/gemma.svg" + }, + { + "slug": "phi-4-multimodal-instruct", + "name": "Phi 4 Multimodal Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/Phi-4-multimodal-instruct", + "description": "Open source model microsoft/Phi-4-multimodal-instruct. 1573 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1573, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi4mm", + "nlp", + "code", + "audio", + "automatic-speech-recognition", + "speech-summarization", + "speech-translation", + "visual-question-answering", + "phi-4-multimodal", + "phi", + "phi-4-mini", + "custom_code", + "multilingual", + "ar", + "zh", + "cs", + "da", + "nl", + "en", + "fi", + "fr", + "de", + "he", + "hu", + "it", + "ja", + "ko", + "no", + "pl", + "pt", + "ru", + "es", + "sv", + "th", + "tr", + "uk", + "arxiv:2503.01743", + "arxiv:2407.13833", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "phi-4-multimodal-instruct", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "pythia-70m-deduped", + "name": "Pythia 70M Deduped", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/EleutherAI/pythia-70m-deduped", + "description": "Open source model EleutherAI/pythia-70m-deduped. 27 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 27, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "gpt_neox", + "causal-lm", + "pythia", + "en", + "dataset:EleutherAI/the_pile_deduplicated", + "arxiv:2304.01373", + "arxiv:2101.00027", + "arxiv:2201.07311", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "pythia-70m-deduped" + }, + { + "slug": "dialogpt-medium", + "name": "Dialogpt Medium", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/DialoGPT-medium", + "description": "Open source model microsoft/DialoGPT-medium. 433 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 433, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "rust", + "gpt2", + "conversational", + "arxiv:1911.00536", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "dialogpt-medium" + }, + { + "slug": "gpt-oss-20b-bf16", + "name": "Gpt Oss 20B Bf16", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/gpt-oss-20b-BF16", + "description": "Open source model unsloth/gpt-oss-20b-BF16. 29 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 29, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gpt_oss", + "vllm", + "unsloth", + "conversational", + "base_model:openai/gpt-oss-20b", + "base_model:finetune:openai/gpt-oss-20b", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 14, + "context_window_tokens": 4096, + "parameters_total_b": 20, + "parameters_active_b": 20, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "gpt-oss-20b-bf16" + }, + { + "slug": "qwen2.5-72b-instruct", + "name": "Qwen2.5 72B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", + "description": "Open source model Qwen/Qwen2.5-72B-Instruct. 910 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 910, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-72B", + "base_model:finetune:Qwen/Qwen2.5-72B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 50, + "context_window_tokens": 4096, + "parameters_total_b": 72, + "parameters_active_b": 72, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-72b-instruct", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-32b-awq", + "name": "Qwen3 32B Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-32B-AWQ", + "description": "Open source model Qwen/Qwen3-32B-AWQ. 125 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 125, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-32B", + "base_model:quantized:Qwen/Qwen3-32B", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-32b-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "mimo-v2-flash", + "name": "Mimo V2 Flash", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/XiaomiMiMo/MiMo-V2-Flash", + "description": "Open source model XiaomiMiMo/MiMo-V2-Flash. 628 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 628, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "mimo_v2_flash", + "conversational", + "custom_code", + "eval-results", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "mimo-v2-flash" + }, + { + "slug": "qwen3-coder-30b-a3b-instruct-fp8", + "name": "Qwen3 Coder 30B A3B Instruct Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8", + "description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8. 158 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 158, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2505.09388", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-coder-30b-a3b-instruct-fp8", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen3-8b-fp8", + "name": "Qwen3 8B Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-8B-FP8", + "description": "Open source model Qwen/Qwen3-8B-FP8. 56 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 56, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-8B", + "base_model:quantized:Qwen/Qwen3-8B", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-8b-fp8", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "deepseek-v3.2", + "name": "Deepseek V3.2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3.2", + "description": "Open source model deepseek-ai/DeepSeek-V3.2. 1251 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1251, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v32", + "conversational", + "base_model:deepseek-ai/DeepSeek-V3.2-Exp-Base", + "base_model:finetune:deepseek-ai/DeepSeek-V3.2-Exp-Base", + "eval-results", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-v3.2" + }, + { + "slug": "qwen3-coder-next", + "name": "Qwen3 Coder Next", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Coder-Next", + "description": "Open source model Qwen/Qwen3-Coder-Next. 912 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 912, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_next", + "conversational", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-coder-next", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2-0.5b", + "name": "Qwen2 0.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2-0.5B", + "description": "Open source model Qwen/Qwen2-0.5B. 164 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 164, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "pretrained", + "conversational", + "en", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2-0.5b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "mistral-7b-v0.1", + "name": "Mistral 7B V0.1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mistralai/Mistral-7B-v0.1", + "description": "Open source model mistralai/Mistral-7B-v0.1. 4042 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4042, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "mistral", + "pretrained", + "mistral-common", + "en", + "arxiv:2310.06825", + "text-generation-inference", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "mistral-7b-v0.1", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "kimi-k2-thinking", + "name": "Kimi K2 Thinking", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/moonshotai/Kimi-K2-Thinking", + "description": "Open source model moonshotai/Kimi-K2-Thinking. 1670 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1670, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "kimi_k2", + "conversational", + "custom_code", + "eval-results", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "kimi-k2-thinking" + }, + { + "slug": "deepseek-r1-0528-qwen3-8b-mlx-4bit", + "name": "Deepseek R1 0528 Qwen3 8B Mlx 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit", + "description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit. 7 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 7, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "mlx", + "safetensors", + "qwen3", + "conversational", + "base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", + "base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", + "4-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-0528-qwen3-8b-mlx-4bit", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-7b-instruct-awq", + "name": "Qwen2.5 7B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-7B-Instruct-AWQ. 36 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 36, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-7B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-7B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-7b-instruct-awq", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "points-reader", + "name": "Points Reader", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tencent/POINTS-Reader", + "description": "Open source model tencent/POINTS-Reader. 100 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 100, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "image-text-to-text", + "conversational", + "custom_code", + "arxiv:2509.01215", + "arxiv:2412.08443", + "arxiv:2409.04828", + "arxiv:2405.11850", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "points-reader" + }, + { + "slug": "qwen3-4b-base", + "name": "Qwen3 4B Base", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B-Base", + "description": "Open source model Qwen/Qwen3-4B-Base. 80 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 80, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-4b-base", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "step-3.5-flash", + "name": "Step 3.5 Flash", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/stepfun-ai/Step-3.5-Flash", + "description": "Open source model stepfun-ai/Step-3.5-Flash. 621 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 621, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "step3p5", + "conversational", + "custom_code", + "arxiv:2602.10604", + "arxiv:2601.05593", + "arxiv:2507.19427", + "eval-results", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "step-3.5-flash" + }, + { + "slug": "kogpt2-base-v2", + "name": "Kogpt2 Base V2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/skt/kogpt2-base-v2", + "description": "Open source model skt/kogpt2-base-v2. 60 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 60, + "language": "Python", + "license": "cc-by-nc-sa-4.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "jax", + "gpt2", + "ko", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "kogpt2-base-v2" + }, + { + "slug": "parler-tts-mini-multilingual-v1.1", + "name": "Parler Tts Mini Multilingual V1.1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/parler-tts/parler-tts-mini-multilingual-v1.1", + "description": "Open source model parler-tts/parler-tts-mini-multilingual-v1.1. 54 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 54, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "parler_tts", + "text-to-speech", + "annotation", + "en", + "fr", + "es", + "pt", + "pl", + "de", + "nl", + "it", + "dataset:facebook/multilingual_librispeech", + "dataset:parler-tts/libritts_r_filtered", + "dataset:parler-tts/libritts-r-filtered-speaker-descriptions", + "dataset:parler-tts/mls_eng", + "dataset:parler-tts/mls-eng-speaker-descriptions", + "dataset:ylacombe/mls-annotated", + "dataset:ylacombe/cml-tts-filtered-annotated", + "dataset:PHBJT/cml-tts-filtered", + "arxiv:2402.01912", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "parler-tts-mini-multilingual-v1.1" + }, + { + "slug": "qwen3-reranker-8b", + "name": "Qwen3 Reranker 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Reranker-8B", + "description": "Open source model Qwen/Qwen3-Reranker-8B. 213 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 213, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "text-ranking", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-8B-Base", + "base_model:finetune:Qwen/Qwen3-8B-Base", + "text-embeddings-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-reranker-8b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "deepseek-r1-0528-qwen3-8b-mlx-8bit", + "name": "Deepseek R1 0528 Qwen3 8B Mlx 8Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit", + "description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit. 13 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 13, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "mlx", + "safetensors", + "qwen3", + "conversational", + "base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", + "base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", + "8-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-0528-qwen3-8b-mlx-8bit", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "powermoe-3b", + "name": "Powermoe 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/ibm-research/PowerMoE-3b", + "description": "Open source model ibm-research/PowerMoE-3b. 14 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 14, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "granitemoe", + "arxiv:2408.13359", + "model-index", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "powermoe-3b" + }, + { + "slug": "llada-8b-instruct", + "name": "Llada 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/GSAI-ML/LLaDA-8B-Instruct", + "description": "Open source model GSAI-ML/LLaDA-8B-Instruct. 342 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 342, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llada", + "conversational", + "custom_code", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llada-8b-instruct" + }, + { + "slug": "apertus-8b-instruct-2509", + "name": "Apertus 8B Instruct 2509", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/swiss-ai/Apertus-8B-Instruct-2509", + "description": "Open source model swiss-ai/Apertus-8B-Instruct-2509. 435 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 435, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "apertus", + "multilingual", + "compliant", + "swiss-ai", + "conversational", + "arxiv:2509.14233", + "base_model:swiss-ai/Apertus-8B-2509", + "base_model:finetune:swiss-ai/Apertus-8B-2509", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "apertus-8b-instruct-2509" + }, + { + "slug": "qwen3-30b-a3b-gptq-int4", + "name": "Qwen3 30B A3B Gptq Int4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-GPTQ-Int4", + "description": "Open source model Qwen/Qwen3-30B-A3B-GPTQ-Int4. 45 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 45, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-30B-A3B", + "base_model:quantized:Qwen/Qwen3-30B-A3B", + "endpoints_compatible", + "4-bit", + "gptq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-30b-a3b-gptq-int4", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "tinyllama-1.1b-chat-v0.3-gptq", + "name": "Tinyllama 1.1B Chat V0.3 Gptq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ", + "description": "Open source model TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ. 9 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 9, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "en", + "dataset:cerebras/SlimPajama-627B", + "dataset:bigcode/starcoderdata", + "dataset:OpenAssistant/oasst_top1_2023-08-25", + "base_model:TinyLlama/TinyLlama-1.1B-Chat-v0.3", + "base_model:quantized:TinyLlama/TinyLlama-1.1B-Chat-v0.3", + "text-generation-inference", + "4-bit", + "gptq", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "tinyllama-1.1b-chat-v0.3-gptq" + }, + { + "slug": "prot_t5_xl_bfd", + "name": "Prot_T5_Xl_Bfd", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Rostlab/prot_t5_xl_bfd", + "description": "Open source model Rostlab/prot_t5_xl_bfd. 10 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 10, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "t5", + "protein language model", + "dataset:BFD", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "prot_t5_xl_bfd" + }, + { + "slug": "qwen3-4b-instruct-2507-unsloth-bnb-4bit", + "name": "Qwen3 4B Instruct 2507 Unsloth Bnb 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit", + "description": "Open source model unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit. 13 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 13, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "unsloth", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-4B-Instruct-2507", + "base_model:quantized:Qwen/Qwen3-4B-Instruct-2507", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "bitsandbytes", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-4b-instruct-2507-unsloth-bnb-4bit", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "phi-3.5-mini-instruct", + "name": "Phi 3.5 Mini Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/Phi-3.5-mini-instruct", + "description": "Open source model microsoft/Phi-3.5-mini-instruct. 963 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 963, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3", + "nlp", + "code", + "conversational", + "custom_code", + "multilingual", + "arxiv:2404.14219", + "arxiv:2407.13833", + "arxiv:2403.06412", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "phi-3.5-mini-instruct", + "logo_url": "/logos/phi.svg" + }, + { + "slug": "meta-llama-3.1-8b-instruct-bnb-4bit", + "name": "Meta Llama 3.1 8B Instruct Bnb 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit", + "description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit. 95 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 95, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "llama-3", + "meta", + "facebook", + "unsloth", + "conversational", + "en", + "arxiv:2204.05149", + "base_model:meta-llama/Llama-3.1-8B-Instruct", + "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "bitsandbytes", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "meta-llama-3.1-8b-instruct-bnb-4bit", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "glm-4.7-flash-awq-4bit", + "name": "Glm 4.7 Flash Awq 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/cyankiwi/GLM-4.7-Flash-AWQ-4bit", + "description": "Open source model cyankiwi/GLM-4.7-Flash-AWQ-4bit. 43 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 43, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "conversational", + "en", + "zh", + "arxiv:2508.06471", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "glm-4.7-flash-awq-4bit" + }, + { + "slug": "dots.ocr", + "name": "Dots.Ocr", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/rednote-hilab/dots.ocr", + "description": "Open source model rednote-hilab/dots.ocr. 1243 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1243, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "dots_ocr", + "safetensors", + "image-to-text", + "ocr", + "document-parse", + "layout", + "table", + "formula", + "transformers", + "custom_code", + "image-text-to-text", + "conversational", + "en", + "zh", + "multilingual", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "dots.ocr" + }, + { + "slug": "mistral-7b-bnb-4bit", + "name": "Mistral 7B Bnb 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/mistral-7b-bnb-4bit", + "description": "Open source model unsloth/mistral-7b-bnb-4bit. 30 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 30, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "mistral", + "unsloth", + "mistral-7b", + "bnb", + "en", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "bitsandbytes", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "mistral-7b-bnb-4bit", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "glm-5-fp8", + "name": "Glm 5 Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/zai-org/GLM-5-FP8", + "description": "Open source model zai-org/GLM-5-FP8. 108 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 108, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm_moe_dsa", + "conversational", + "en", + "zh", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "glm-5-fp8" + }, + { + "slug": "qwen-7b", + "name": "Qwen 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen-7B", + "description": "Open source model Qwen/Qwen-7B. 395 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 395, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen", + "custom_code", + "zh", + "en", + "arxiv:2309.16609", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen-7b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwq-32b-awq", + "name": "Qwq 32B Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/QwQ-32B-AWQ", + "description": "Open source model Qwen/QwQ-32B-AWQ. 133 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 133, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2412.15115", + "base_model:Qwen/QwQ-32B", + "base_model:quantized:Qwen/QwQ-32B", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwq-32b-awq" + }, + { + "slug": "deepseek-r1-distill-llama-70b", + "name": "Deepseek R1 Distill Llama 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-70B. 741 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 741, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-r1-distill-llama-70b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "qwen2.5-coder-7b", + "name": "Qwen2.5 Coder 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B", + "description": "Open source model Qwen/Qwen2.5-Coder-7B. 134 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 134, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "qwen", + "qwen-coder", + "codeqwen", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-7B", + "base_model:finetune:Qwen/Qwen2.5-7B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-coder-7b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "qwen2.5-3b", + "name": "Qwen2.5 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-3B", + "description": "Open source model Qwen/Qwen2.5-3B. 169 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 169, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2407.10671", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen2.5-3b", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "deepseek-v2-lite-chat", + "name": "Deepseek V2 Lite Chat", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat", + "description": "Open source model deepseek-ai/DeepSeek-V2-Lite-Chat. 133 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 133, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v2", + "conversational", + "custom_code", + "arxiv:2405.04434", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-v2-lite-chat" + }, + { + "slug": "tiny-qwen3forcausallm", + "name": "Tiny Qwen3Forcausallm", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/trl-internal-testing/tiny-Qwen3ForCausalLM", + "description": "Open source model trl-internal-testing/tiny-Qwen3ForCausalLM. 1 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "trl", + "conversational", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "tiny-qwen3forcausallm", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "deepseek-coder-v2-lite-instruct", + "name": "Deepseek Coder V2 Lite Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", + "description": "Open source model deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct. 539 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 539, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v2", + "conversational", + "custom_code", + "arxiv:2401.06066", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-coder-v2-lite-instruct" + }, + { + "slug": "qwen3-0.6b-base", + "name": "Qwen3 0.6B Base", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-0.6B-Base", + "description": "Open source model Qwen/Qwen3-0.6B-Base. 146 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 146, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen3-0.6b-base", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "diffractgpt_mistral_chemical_formula", + "name": "Diffractgpt_Mistral_Chemical_Formula", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/knc6/diffractgpt_mistral_chemical_formula", + "description": "Open source model knc6/diffractgpt_mistral_chemical_formula. 1 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "peft", + "safetensors", + "chemistry", + "text-generation-inference", + "atomgpt", + "diffraction", + "en", + "base_model:unsloth/mistral-7b-bnb-4bit", + "base_model:adapter:unsloth/mistral-7b-bnb-4bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "diffractgpt_mistral_chemical_formula", + "logo_url": "/logos/mistral.svg" + }, + { + "slug": "qwen-7b-chat", + "name": "Qwen 7B Chat", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen-7B-Chat", + "description": "Open source model Qwen/Qwen-7B-Chat. 787 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 787, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen", + "custom_code", + "zh", + "en", + "arxiv:2309.16609", + "arxiv:2305.08322", + "arxiv:2009.03300", + "arxiv:2305.05280", + "arxiv:2210.03629", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "qwen-7b-chat", + "logo_url": "/logos/qwen.svg" + }, + { + "slug": "nvidia-nemotron-3-nano-30b-a3b-nvfp4", + "name": "Nvidia Nemotron 3 Nano 30B A3B Nvfp4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4", + "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4. 100 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 100, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "nemotron_h", + "feature-extraction", + "nvidia", + "pytorch", + "conversational", + "custom_code", + "en", + "es", + "fr", + "de", + "ja", + "it", + "dataset:nvidia/Nemotron-Pretraining-Code-v1", + "dataset:nvidia/Nemotron-CC-v2", + "dataset:nvidia/Nemotron-Pretraining-SFT-v1", + "dataset:nvidia/Nemotron-CC-Math-v1", + "dataset:nvidia/Nemotron-Pretraining-Code-v2", + "dataset:nvidia/Nemotron-Pretraining-Specialized-v1", + "dataset:nvidia/Nemotron-CC-v2.1", + "dataset:nvidia/Nemotron-CC-Code-v1", + "dataset:nvidia/Nemotron-Pretraining-Dataset-sample", + "dataset:nvidia/Nemotron-Competitive-Programming-v1", + "dataset:nvidia/Nemotron-Math-v2", + "dataset:nvidia/Nemotron-Agentic-v1", + "dataset:nvidia/Nemotron-Math-Proofs-v1", + "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1", + "dataset:nvidia/Nemotron-Science-v1", + "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend", + "arxiv:2512.20848", + "arxiv:2512.20856", + "arxiv:2601.20088", + "base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "nvidia-nemotron-3-nano-30b-a3b-nvfp4" + }, + { + "slug": "falcon-h1-tiny-90m-instruct", + "name": "Falcon H1 Tiny 90M Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tiiuae/Falcon-H1-Tiny-90M-Instruct", + "description": "Open source model tiiuae/Falcon-H1-Tiny-90M-Instruct. 31 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 31, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "falcon_h1", + "falcon-h1", + "edge", + "conversational", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "falcon-h1-tiny-90m-instruct", + "logo_url": "/logos/falcon.svg" + }, + { + "slug": "hermes-3-llama-3.2-3b", + "name": "Hermes 3 Llama 3.2 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.2-3B", + "description": "Open source model NousResearch/Hermes-3-Llama-3.2-3B. 174 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 174, + "language": "Python", + "license": "llama3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "Llama-3", + "instruct", + "finetune", + "chatml", + "gpt4", + "synthetic data", + "distillation", + "function calling", + "json mode", + "axolotl", + "roleplaying", + "chat", + "conversational", + "en", + "arxiv:2408.11857", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "hermes-3-llama-3.2-3b", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "meta-llama-3.1-8b-instruct", + "name": "Meta Llama 3.1 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct", + "description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct. 94 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 94, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "llama-3", + "meta", + "facebook", + "unsloth", + "conversational", + "en", + "base_model:meta-llama/Llama-3.1-8B-Instruct", + "base_model:finetune:meta-llama/Llama-3.1-8B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "meta-llama-3.1-8b-instruct", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "meta-llama-3.1-8b-instruct-gguf", + "name": "Meta Llama 3.1 8B Instruct Gguf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", + "description": "Open source model bartowski/Meta-Llama-3.1-8B-Instruct-GGUF. 321 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 321, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "gguf", + "facebook", + "meta", + "pytorch", + "llama", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.1-8B-Instruct", + "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct", + "endpoints_compatible", + "region:us", + "conversational" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "meta-llama-3.1-8b-instruct-gguf", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "deepseek-v3-0324", + "name": "Deepseek V3 0324", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3-0324", + "description": "Open source model deepseek-ai/DeepSeek-V3-0324. 3087 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3087, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v3", + "conversational", + "custom_code", + "arxiv:2412.19437", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "deepseek-v3-0324" + }, + { + "slug": "elm", + "name": "Elm", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Joaoffg/ELM", + "description": "Open source model Joaoffg/ELM. 2 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "academic", + "university", + "en", + "nl", + "arxiv:2408.06931", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "elm" + }, + { + "slug": "llama-2-13b-chat-hf", + "name": "Llama 2 13B Chat Hf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-2-13b-chat-hf", + "description": "Open source model meta-llama/Llama-2-13b-chat-hf. 1109 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1109, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "llama", + "facebook", + "meta", + "llama-2", + "conversational", + "en", + "arxiv:2307.09288", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 4096, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "llama-2-13b-chat-hf", + "logo_url": "/logos/meta.svg" + }, + { + "slug": "svara-tts-v1", + "name": "Svara Tts V1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/kenpath/svara-tts-v1", + "description": "Open source model kenpath/svara-tts-v1. 18 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 18, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "text-to-speech", + "speech-synthesis", + "multilingual", + "indic", + "orpheus", + "lora", + "low-latency", + "gguf", + "zero-shot", + "emotions", + "discrete-audio-tokens", + "hi", + "bn", + "mr", + "te", + "kn", + "bho", + "mag", + "hne", + "mai", + "as", + "brx", + "doi", + "gu", + "ml", + "pa", + "ta", + "ne", + "sa", + "en", + "dataset:SYSPIN", + "dataset:RASA", + "dataset:IndicTTS", + "dataset:SPICOR", + "base_model:canopylabs/3b-hi-ft-research_release", + "base_model:adapter:canopylabs/3b-hi-ft-research_release", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "https://m.do.co/c/2ed27757a361", + "id": "svara-tts-v1" + } +] diff --git a/data/tools_expanded.json b/data/tools_expanded.json new file mode 100644 index 0000000..fee37b4 --- /dev/null +++ b/data/tools_expanded.json @@ -0,0 +1,9193 @@ +[ + { + "slug": "qwen2.5-7b-instruct", + "name": "Qwen2.5 7B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct", + "description": "Open source model Qwen/Qwen2.5-7B-Instruct. 1073 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1073, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-7B", + "base_model:finetune:Qwen/Qwen2.5-7B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-0.6b", + "name": "Qwen3 0.6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-0.6B", + "description": "Open source model Qwen/Qwen3-0.6B. 1083 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1083, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-0.6B-Base", + "base_model:finetune:Qwen/Qwen3-0.6B-Base", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gpt2", + "name": "Gpt2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai-community/gpt2", + "description": "Open source model openai-community/gpt2. 3114 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3114, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "tflite", + "rust", + "onnx", + "safetensors", + "gpt2", + "exbert", + "en", + "doi:10.57967/hf/0039", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-1.5b-instruct", + "name": "Qwen2.5 1.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct", + "description": "Open source model Qwen/Qwen2.5-1.5B-Instruct. 617 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 617, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-1.5B", + "base_model:finetune:Qwen/Qwen2.5-1.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-3b-instruct", + "name": "Qwen2.5 3B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct", + "description": "Open source model Qwen/Qwen2.5-3B-Instruct. 404 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 404, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-3B", + "base_model:finetune:Qwen/Qwen2.5-3B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.1-8b-instruct", + "name": "Llama 3.1 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct", + "description": "Open source model meta-llama/Llama-3.1-8B-Instruct. 5467 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 5467, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "base_model:meta-llama/Llama-3.1-8B", + "base_model:finetune:meta-llama/Llama-3.1-8B", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gpt-oss-20b", + "name": "Gpt Oss 20B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai/gpt-oss-20b", + "description": "Open source model openai/gpt-oss-20b. 4378 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4378, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gpt_oss", + "vllm", + "conversational", + "arxiv:2508.10925", + "endpoints_compatible", + "8-bit", + "mxfp4", + "deploy:azure", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 14, + "context_window_tokens": 4096, + "parameters_total_b": 20, + "parameters_active_b": 20, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-0.5b-instruct", + "name": "Qwen2.5 0.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct", + "description": "Open source model Qwen/Qwen2.5-0.5B-Instruct. 463 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 463, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-0.5B", + "base_model:finetune:Qwen/Qwen2.5-0.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-4b", + "name": "Qwen3 4B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B", + "description": "Open source model Qwen/Qwen3-4B. 552 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 552, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-4B-Base", + "base_model:finetune:Qwen/Qwen3-4B-Base", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-8b", + "name": "Qwen3 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-8B", + "description": "Open source model Qwen/Qwen3-8B. 940 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 940, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-8B-Base", + "base_model:finetune:Qwen/Qwen3-8B-Base", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-32b-instruct", + "name": "Qwen2.5 32B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct", + "description": "Open source model Qwen/Qwen2.5-32B-Instruct. 328 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 328, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-32B", + "base_model:finetune:Qwen/Qwen2.5-32B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "opt-125m", + "name": "Opt 125M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/facebook/opt-125m", + "description": "Open source model facebook/opt-125m. 233 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 233, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "opt", + "en", + "arxiv:2205.01068", + "arxiv:2005.14165", + "text-generation-inference", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-1.7b", + "name": "Qwen3 1.7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-1.7B", + "description": "Open source model Qwen/Qwen3-1.7B. 422 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 422, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-1.7B-Base", + "base_model:finetune:Qwen/Qwen3-1.7B-Base", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "tiny-qwen2forcausallm-2.5", + "name": "Tiny Qwen2Forcausallm 2.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/trl-internal-testing/tiny-Qwen2ForCausalLM-2.5", + "description": "Open source model trl-internal-testing/tiny-Qwen2ForCausalLM-2.5. 3 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "trl", + "conversational", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "dolphin-2.9.1-yi-1.5-34b", + "name": "Dolphin 2.9.1 Yi 1.5 34B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/dphn/dolphin-2.9.1-yi-1.5-34b", + "description": "Open source model dphn/dolphin-2.9.1-yi-1.5-34b. 54 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 54, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "generated_from_trainer", + "axolotl", + "conversational", + "dataset:cognitivecomputations/Dolphin-2.9", + "dataset:teknium/OpenHermes-2.5", + "dataset:m-a-p/CodeFeedback-Filtered-Instruction", + "dataset:cognitivecomputations/dolphin-coder", + "dataset:cognitivecomputations/samantha-data", + "dataset:microsoft/orca-math-word-problems-200k", + "dataset:Locutusque/function-calling-chatml", + "dataset:internlm/Agent-FLAN", + "base_model:01-ai/Yi-1.5-34B", + "base_model:finetune:01-ai/Yi-1.5-34B", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 24, + "context_window_tokens": 4096, + "parameters_total_b": 34, + "parameters_active_b": 34, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-embedding-0.6b", + "name": "Qwen3 Embedding 0.6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Embedding-0.6B", + "description": "Open source model Qwen/Qwen3-Embedding-0.6B. 879 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 879, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "sentence-transformers", + "safetensors", + "qwen3", + "transformers", + "sentence-similarity", + "feature-extraction", + "text-embeddings-inference", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-0.6B-Base", + "base_model:finetune:Qwen/Qwen3-0.6B-Base", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gpt-oss-120b", + "name": "Gpt Oss 120B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai/gpt-oss-120b", + "description": "Open source model openai/gpt-oss-120b. 4503 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4503, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gpt_oss", + "vllm", + "conversational", + "arxiv:2508.10925", + "endpoints_compatible", + "8-bit", + "mxfp4", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 84, + "context_window_tokens": 4096, + "parameters_total_b": 120, + "parameters_active_b": 120, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-4b-instruct-2507", + "name": "Qwen3 4B Instruct 2507", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507", + "description": "Open source model Qwen/Qwen3-4B-Instruct-2507. 730 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 730, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "moondream2", + "name": "Moondream2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/vikhyatk/moondream2", + "description": "Open source model vikhyatk/moondream2. 1373 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1373, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "moondream1", + "image-text-to-text", + "custom_code", + "doi:10.57967/hf/6762", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.2-1b-instruct", + "name": "Llama 3.2 1B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct", + "description": "Open source model meta-llama/Llama-3.2-1B-Instruct. 1292 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1292, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "arxiv:2405.16406", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2-1.5b-instruct", + "name": "Qwen2 1.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct", + "description": "Open source model Qwen/Qwen2-1.5B-Instruct. 158 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 158, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-0.5b-instruct", + "name": "Qwen2.5 Coder 0.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-0.5B-Instruct. 64 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 64, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-0.5B", + "base_model:finetune:Qwen/Qwen2.5-Coder-0.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "kimi-k2.5", + "name": "Kimi K2.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mlx-community/Kimi-K2.5", + "description": "Open source model mlx-community/Kimi-K2.5. 28 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 28, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "mlx", + "safetensors", + "kimi_k25", + "conversational", + "custom_code", + "base_model:moonshotai/Kimi-K2.5", + "base_model:quantized:moonshotai/Kimi-K2.5", + "4-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "mistral-7b-instruct-v0.2", + "name": "Mistral 7B Instruct V0.2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2", + "description": "Open source model mistralai/Mistral-7B-Instruct-v0.2. 3075 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3075, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "mistral", + "finetuned", + "mistral-common", + "conversational", + "arxiv:2310.06825", + "text-generation-inference", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-30b-a3b-instruct-2507", + "name": "Qwen3 30B A3B Instruct 2507", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507", + "description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507. 766 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 766, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2402.17463", + "arxiv:2407.02490", + "arxiv:2501.15383", + "arxiv:2404.06654", + "arxiv:2505.09388", + "eval-results", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llm-jp-3-3.7b-instruct", + "name": "Llm Jp 3 3.7B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/llm-jp/llm-jp-3-3.7b-instruct", + "description": "Open source model llm-jp/llm-jp-3-3.7b-instruct. 13 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 13, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "en", + "ja", + "text-generation-inference", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.2-3b-instruct", + "name": "Llama 3.2 3B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct", + "description": "Open source model meta-llama/Llama-3.2-3B-Instruct. 1986 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1986, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "arxiv:2405.16406", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "distilgpt2", + "name": "Distilgpt2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/distilbert/distilgpt2", + "description": "Open source model distilbert/distilgpt2. 609 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 609, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "tflite", + "rust", + "coreml", + "safetensors", + "gpt2", + "exbert", + "en", + "dataset:openwebtext", + "arxiv:1910.01108", + "arxiv:2201.08542", + "arxiv:2203.12574", + "arxiv:1910.09700", + "arxiv:1503.02531", + "model-index", + "co2_eq_emissions", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-embedding-8b", + "name": "Qwen3 Embedding 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Embedding-8B", + "description": "Open source model Qwen/Qwen3-Embedding-8B. 584 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 584, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "sentence-transformers", + "safetensors", + "qwen3", + "transformers", + "sentence-similarity", + "feature-extraction", + "text-embeddings-inference", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-8B-Base", + "base_model:finetune:Qwen/Qwen3-8B-Base", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "meta-llama-3-8b", + "name": "Meta Llama 3 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", + "description": "Open source model meta-llama/Meta-Llama-3-8B. 6458 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 6458, + "language": "Python", + "license": "llama3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "tinyllama-1.1b-chat-v1.0", + "name": "Tinyllama 1.1B Chat V1.0", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0", + "description": "Open source model TinyLlama/TinyLlama-1.1B-Chat-v1.0. 1526 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1526, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "en", + "dataset:cerebras/SlimPajama-627B", + "dataset:bigcode/starcoderdata", + "dataset:HuggingFaceH4/ultrachat_200k", + "dataset:HuggingFaceH4/ultrafeedback_binarized", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "glm-4.7-flash", + "name": "Glm 4.7 Flash", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/zai-org/GLM-4.7-Flash", + "description": "Open source model zai-org/GLM-4.7-Flash. 1538 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1538, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "conversational", + "en", + "zh", + "arxiv:2508.06471", + "eval-results", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.2-1b", + "name": "Llama 3.2 1B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.2-1B", + "description": "Open source model meta-llama/Llama-3.2-1B. 2295 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2295, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "arxiv:2405.16406", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-32b", + "name": "Qwen3 32B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-32B", + "description": "Open source model Qwen/Qwen3-32B. 656 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 656, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.2-1b-instruct-fp8-dynamic", + "name": "Llama 3.2 1B Instruct Fp8 Dynamic", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic", + "description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic. 3 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "safetensors", + "llama", + "fp8", + "vllm", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.2-1B-Instruct", + "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-1.5b-instruct", + "name": "Qwen2.5 Coder 1.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-1.5B-Instruct. 106 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 106, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-1.5B", + "base_model:finetune:Qwen/Qwen2.5-Coder-1.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "meta-llama-3-8b-instruct", + "name": "Meta Llama 3 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct", + "description": "Open source model meta-llama/Meta-Llama-3-8B-Instruct. 4380 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4380, + "language": "Python", + "license": "llama3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gemma-3-1b-it", + "name": "Gemma 3 1B It", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/google/gemma-3-1b-it", + "description": "Open source model google/gemma-3-1b-it. 842 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 842, + "language": "Python", + "license": "gemma", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gemma3_text", + "conversational", + "arxiv:1905.07830", + "arxiv:1905.10044", + "arxiv:1911.11641", + "arxiv:1904.09728", + "arxiv:1705.03551", + "arxiv:1911.01547", + "arxiv:1907.10641", + "arxiv:1903.00161", + "arxiv:2009.03300", + "arxiv:2304.06364", + "arxiv:2103.03874", + "arxiv:2110.14168", + "arxiv:2311.12022", + "arxiv:2108.07732", + "arxiv:2107.03374", + "arxiv:2210.03057", + "arxiv:2106.03193", + "arxiv:1910.11856", + "arxiv:2502.12404", + "arxiv:2502.21228", + "arxiv:2404.16816", + "arxiv:2104.12756", + "arxiv:2311.16502", + "arxiv:2203.10244", + "arxiv:2404.12390", + "arxiv:1810.12440", + "arxiv:1908.02660", + "arxiv:2312.11805", + "base_model:google/gemma-3-1b-pt", + "base_model:finetune:google/gemma-3-1b-pt", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "phi-2", + "name": "Phi 2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/phi-2", + "description": "Open source model microsoft/phi-2. 3425 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3425, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi", + "nlp", + "code", + "en", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-7b-instruct", + "name": "Qwen2.5 Coder 7B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct. 646 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 646, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-7B", + "base_model:finetune:Qwen/Qwen2.5-Coder-7B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-7b", + "name": "Qwen2.5 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-7B", + "description": "Open source model Qwen/Qwen2.5-7B. 264 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 264, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2407.10671", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-distill-qwen-1.5b", + "name": "Deepseek R1 Distill Qwen 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B. 1446 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1446, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-v3", + "name": "Deepseek V3", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3", + "description": "Open source model deepseek-ai/DeepSeek-V3. 4024 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4024, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v3", + "conversational", + "custom_code", + "arxiv:2412.19437", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gpt2-large", + "name": "Gpt2 Large", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai-community/gpt2-large", + "description": "Open source model openai-community/gpt2-large. 344 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 344, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "rust", + "onnx", + "safetensors", + "gpt2", + "en", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "glm-4.7-flash-mlx-8bit", + "name": "Glm 4.7 Flash Mlx 8Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-8bit", + "description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-8bit. 9 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 9, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "mlx", + "conversational", + "en", + "zh", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "8-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "glm-4.7-flash-mlx-6bit", + "name": "Glm 4.7 Flash Mlx 6Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-6bit", + "description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-6bit. 7 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 7, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "mlx", + "conversational", + "en", + "zh", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "6-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-0.6b-fp8", + "name": "Qwen3 0.6B Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-0.6B-FP8", + "description": "Open source model Qwen/Qwen3-0.6B-FP8. 56 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 56, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-0.6B", + "base_model:quantized:Qwen/Qwen3-0.6B", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.1-8b", + "name": "Llama 3.1 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.1-8B", + "description": "Open source model meta-llama/Llama-3.1-8B. 2065 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2065, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "pythia-160m", + "name": "Pythia 160M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/EleutherAI/pythia-160m", + "description": "Open source model EleutherAI/pythia-160m. 38 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 38, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "gpt_neox", + "causal-lm", + "pythia", + "en", + "dataset:EleutherAI/pile", + "arxiv:2304.01373", + "arxiv:2101.00027", + "arxiv:2201.07311", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-distill-qwen-32b", + "name": "Deepseek R1 Distill Qwen 32B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-32B. 1517 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1517, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "hunyuanocr", + "name": "Hunyuanocr", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tencent/HunyuanOCR", + "description": "Open source model tencent/HunyuanOCR. 553 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 553, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "hunyuan_vl", + "ocr", + "hunyuan", + "vision-language", + "image-to-text", + "1B", + "end-to-end", + "image-text-to-text", + "conversational", + "multilingual", + "arxiv:2511.19575", + "base_model:tencent/HunyuanOCR", + "base_model:finetune:tencent/HunyuanOCR", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-30b-a3b", + "name": "Qwen3 30B A3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B", + "description": "Open source model Qwen/Qwen3-30B-A3B. 855 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 855, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-30B-A3B-Base", + "base_model:finetune:Qwen/Qwen3-30B-A3B-Base", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-0.5b", + "name": "Qwen2.5 0.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-0.5B", + "description": "Open source model Qwen/Qwen2.5-0.5B. 372 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 372, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2407.10671", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-32b-instruct-awq", + "name": "Qwen2.5 32B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-32B-Instruct-AWQ. 94 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 94, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-32B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-32B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "nvidia-nemotron-3-nano-30b-a3b-fp8", + "name": "Nvidia Nemotron 3 Nano 30B A3B Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8", + "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8. 284 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 284, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "nemotron_h", + "feature-extraction", + "nvidia", + "pytorch", + "conversational", + "custom_code", + "en", + "es", + "fr", + "de", + "ja", + "it", + "dataset:nvidia/Nemotron-Pretraining-Code-v1", + "dataset:nvidia/Nemotron-CC-v2", + "dataset:nvidia/Nemotron-Pretraining-SFT-v1", + "dataset:nvidia/Nemotron-CC-Math-v1", + "dataset:nvidia/Nemotron-Pretraining-Code-v2", + "dataset:nvidia/Nemotron-Pretraining-Specialized-v1", + "dataset:nvidia/Nemotron-CC-v2.1", + "dataset:nvidia/Nemotron-CC-Code-v1", + "dataset:nvidia/Nemotron-Pretraining-Dataset-sample", + "dataset:nvidia/Nemotron-Competitive-Programming-v1", + "dataset:nvidia/Nemotron-Math-v2", + "dataset:nvidia/Nemotron-Agentic-v1", + "dataset:nvidia/Nemotron-Math-Proofs-v1", + "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1", + "dataset:nvidia/Nemotron-Science-v1", + "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend", + "arxiv:2512.20848", + "arxiv:2512.20856", + "base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "eval-results", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-14b-instruct", + "name": "Qwen2.5 14B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct", + "description": "Open source model Qwen/Qwen2.5-14B-Instruct. 312 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 312, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-14B", + "base_model:finetune:Qwen/Qwen2.5-14B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "nvidia-nemotron-3-nano-30b-a3b-bf16", + "name": "Nvidia Nemotron 3 Nano 30B A3B Bf16", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16. 634 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 634, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "nemotron_h", + "feature-extraction", + "nvidia", + "pytorch", + "conversational", + "custom_code", + "en", + "es", + "fr", + "de", + "ja", + "it", + "dataset:nvidia/Nemotron-Pretraining-Code-v1", + "dataset:nvidia/Nemotron-CC-v2", + "dataset:nvidia/Nemotron-Pretraining-SFT-v1", + "dataset:nvidia/Nemotron-CC-Math-v1", + "dataset:nvidia/Nemotron-Pretraining-Code-v2", + "dataset:nvidia/Nemotron-Pretraining-Specialized-v1", + "dataset:nvidia/Nemotron-CC-v2.1", + "dataset:nvidia/Nemotron-CC-Code-v1", + "dataset:nvidia/Nemotron-Pretraining-Dataset-sample", + "dataset:nvidia/Nemotron-Competitive-Programming-v1", + "dataset:nvidia/Nemotron-Math-v2", + "dataset:nvidia/Nemotron-Agentic-v1", + "dataset:nvidia/Nemotron-Math-Proofs-v1", + "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1", + "dataset:nvidia/Nemotron-Science-v1", + "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend", + "arxiv:2512.20848", + "arxiv:2512.20856", + "eval-results", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "openelm-1_1b-instruct", + "name": "Openelm 1_1B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/apple/OpenELM-1_1B-Instruct", + "description": "Open source model apple/OpenELM-1_1B-Instruct. 72 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 72, + "language": "Python", + "license": "apple-amlr", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "openelm", + "custom_code", + "arxiv:2404.14619", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "tiny-random-llamaforcausallm", + "name": "Tiny Random Llamaforcausallm", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/hmellor/tiny-random-LlamaForCausalLM", + "description": "Open source model hmellor/tiny-random-LlamaForCausalLM. 0 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 0, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-next-80b-a3b-instruct", + "name": "Qwen3 Next 80B A3B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Instruct", + "description": "Open source model Qwen/Qwen3-Next-80B-A3B-Instruct. 937 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 937, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_next", + "conversational", + "arxiv:2309.00071", + "arxiv:2404.06654", + "arxiv:2505.09388", + "arxiv:2501.15383", + "eval-results", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 56, + "context_window_tokens": 4096, + "parameters_total_b": 80, + "parameters_active_b": 80, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "h2ovl-mississippi-800m", + "name": "H2Ovl Mississippi 800M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/h2oai/h2ovl-mississippi-800m", + "description": "Open source model h2oai/h2ovl-mississippi-800m. 39 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 39, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "h2ovl_chat", + "feature-extraction", + "gpt", + "llm", + "multimodal large language model", + "ocr", + "conversational", + "custom_code", + "en", + "arxiv:2410.13611", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "bloomz-560m", + "name": "Bloomz 560M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/bigscience/bloomz-560m", + "description": "Open source model bigscience/bloomz-560m. 137 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 137, + "language": "Python", + "license": "bigscience-bloom-rail-1.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tensorboard", + "safetensors", + "bloom", + "ak", + "ar", + "as", + "bm", + "bn", + "ca", + "code", + "en", + "es", + "eu", + "fon", + "fr", + "gu", + "hi", + "id", + "ig", + "ki", + "kn", + "lg", + "ln", + "ml", + "mr", + "ne", + "nso", + "ny", + "or", + "pa", + "pt", + "rn", + "rw", + "sn", + "st", + "sw", + "ta", + "te", + "tn", + "ts", + "tum", + "tw", + "ur", + "vi", + "wo", + "xh", + "yo", + "zh", + "zu", + "dataset:bigscience/xP3", + "arxiv:2211.01786", + "model-index", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-1.5b-quantized.w8a8", + "name": "Qwen2.5 1.5B Quantized.W8A8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/RedHatAI/Qwen2.5-1.5B-quantized.w8a8", + "description": "Open source model RedHatAI/Qwen2.5-1.5B-quantized.w8a8. 2 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "safetensors", + "qwen2", + "chat", + "neuralmagic", + "llmcompressor", + "conversational", + "en", + "base_model:Qwen/Qwen2.5-1.5B", + "base_model:quantized:Qwen/Qwen2.5-1.5B", + "8-bit", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "h2ovl-mississippi-2b", + "name": "H2Ovl Mississippi 2B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/h2oai/h2ovl-mississippi-2b", + "description": "Open source model h2oai/h2ovl-mississippi-2b. 40 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 40, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "h2ovl_chat", + "feature-extraction", + "gpt", + "llm", + "multimodal large language model", + "ocr", + "conversational", + "custom_code", + "en", + "arxiv:2410.13611", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llava-v1.5-7b", + "name": "Llava V1.5 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/liuhaotian/llava-v1.5-7b", + "description": "Open source model liuhaotian/llava-v1.5-7b. 537 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 537, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "llava", + "image-text-to-text", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "t5-3b", + "name": "T5 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/google-t5/t5-3b", + "description": "Open source model google-t5/t5-3b. 51 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 51, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "safetensors", + "t5", + "summarization", + "translation", + "en", + "fr", + "ro", + "de", + "multilingual", + "dataset:c4", + "arxiv:1805.12471", + "arxiv:1708.00055", + "arxiv:1704.05426", + "arxiv:1606.05250", + "arxiv:1808.09121", + "arxiv:1810.12885", + "arxiv:1905.10044", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-14b-instruct-awq", + "name": "Qwen2.5 14B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-14B-Instruct-AWQ. 27 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 27, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-14B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-14B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.2-3b", + "name": "Llama 3.2 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.2-3B", + "description": "Open source model meta-llama/Llama-3.2-3B. 697 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 697, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "arxiv:2405.16406", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "phi-3-mini-4k-instruct-gptq-4bit", + "name": "Phi 3 Mini 4K Instruct Gptq 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/kaitchup/Phi-3-mini-4k-instruct-gptq-4bit", + "description": "Open source model kaitchup/Phi-3-mini-4k-instruct-gptq-4bit. 2 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3", + "conversational", + "custom_code", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "gptq", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-72b-instruct-awq", + "name": "Qwen2.5 72B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-72B-Instruct-AWQ. 74 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 74, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-72B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-72B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 50, + "context_window_tokens": 4096, + "parameters_total_b": 72, + "parameters_active_b": 72, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "smollm2-135m", + "name": "Smollm2 135M", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M", + "description": "Open source model HuggingFaceTB/SmolLM2-135M. 166 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 166, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "en", + "arxiv:2502.02737", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.3-70b-instruct", + "name": "Llama 3.3 70B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct", + "description": "Open source model meta-llama/Llama-3.3-70B-Instruct. 2658 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2658, + "language": "Python", + "license": "llama3.3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "de", + "arxiv:2204.05149", + "base_model:meta-llama/Llama-3.1-70B", + "base_model:finetune:meta-llama/Llama-3.1-70B", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-30b-a3b-instruct-2507-fp8", + "name": "Qwen3 30B A3B Instruct 2507 Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507-FP8", + "description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507-FP8. 112 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 112, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-30B-A3B-Instruct-2507", + "base_model:quantized:Qwen/Qwen3-30B-A3B-Instruct-2507", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-32b-instruct", + "name": "Qwen2.5 Coder 32B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct. 1995 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1995, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-32B", + "base_model:finetune:Qwen/Qwen2.5-Coder-32B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-235b-a22b-instruct-2507-fp8", + "name": "Qwen3 235B A22B Instruct 2507 Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-235B-A22B-Instruct-2507-FP8", + "description": "Open source model Qwen/Qwen3-235B-A22B-Instruct-2507-FP8. 145 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 145, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-235B-A22B-Instruct-2507", + "base_model:quantized:Qwen/Qwen3-235B-A22B-Instruct-2507", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 164, + "context_window_tokens": 4096, + "parameters_total_b": 235, + "parameters_active_b": 235, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-distill-qwen-7b", + "name": "Deepseek R1 Distill Qwen 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-7B. 787 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 787, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "phi-3-mini-4k-instruct", + "name": "Phi 3 Mini 4K Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", + "description": "Open source model microsoft/Phi-3-mini-4k-instruct. 1386 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1386, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3", + "nlp", + "code", + "conversational", + "custom_code", + "en", + "fr", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-14b", + "name": "Qwen3 14B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-14B", + "description": "Open source model Qwen/Qwen3-14B. 366 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 366, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-14B-Base", + "base_model:finetune:Qwen/Qwen3-14B-Base", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-1.5b", + "name": "Qwen2.5 Coder 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B", + "description": "Open source model Qwen/Qwen2.5-Coder-1.5B. 81 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 81, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "qwen", + "qwen-coder", + "codeqwen", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-1.5B", + "base_model:finetune:Qwen/Qwen2.5-1.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.1-70b-instruct", + "name": "Llama 3.1 70B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct", + "description": "Open source model meta-llama/Llama-3.1-70B-Instruct. 890 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 890, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "base_model:meta-llama/Llama-3.1-70B", + "base_model:finetune:meta-llama/Llama-3.1-70B", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "hunyuanimage-3.0", + "name": "Hunyuanimage 3.0", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tencent/HunyuanImage-3.0", + "description": "Open source model tencent/HunyuanImage-3.0. 640 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 640, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "hunyuan_image_3_moe", + "text-to-image", + "custom_code", + "arxiv:2509.23951", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-7b-instruct-awq", + "name": "Qwen2.5 Coder 7B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-AWQ. 19 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 19, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-7B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-coder-30b-a3b-instruct", + "name": "Qwen3 Coder 30B A3B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct", + "description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct. 945 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 945, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2505.09388", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-0528", + "name": "Deepseek R1 0528", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-0528", + "description": "Open source model deepseek-ai/DeepSeek-R1-0528. 2400 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2400, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v3", + "conversational", + "custom_code", + "arxiv:2501.12948", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "tiny-random-llama-3", + "name": "Tiny Random Llama 3", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/llamafactory/tiny-random-Llama-3", + "description": "Open source model llamafactory/tiny-random-Llama-3. 3 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "text-generation-inference", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-32b-instruct-awq", + "name": "Qwen2.5 Coder 32B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct-AWQ. 33 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 33, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-32B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-Coder-32B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "mistral-7b-instruct-v0.1", + "name": "Mistral 7B Instruct V0.1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1", + "description": "Open source model mistralai/Mistral-7B-Instruct-v0.1. 1826 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1826, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "mistral", + "finetuned", + "mistral-common", + "conversational", + "arxiv:2310.06825", + "base_model:mistralai/Mistral-7B-v0.1", + "base_model:finetune:mistralai/Mistral-7B-v0.1", + "text-generation-inference", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gpt-oss-20b-mxfp4-q8", + "name": "Gpt Oss 20B Mxfp4 Q8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mlx-community/gpt-oss-20b-MXFP4-Q8", + "description": "Open source model mlx-community/gpt-oss-20b-MXFP4-Q8. 31 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 31, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "mlx", + "safetensors", + "gpt_oss", + "vllm", + "conversational", + "base_model:openai/gpt-oss-20b", + "base_model:quantized:openai/gpt-oss-20b", + "4-bit", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 14, + "context_window_tokens": 4096, + "parameters_total_b": 20, + "parameters_active_b": 20, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-embedding-4b", + "name": "Qwen3 Embedding 4B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Embedding-4B", + "description": "Open source model Qwen/Qwen3-Embedding-4B. 224 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 224, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "sentence-transformers", + "safetensors", + "qwen3", + "transformers", + "sentence-similarity", + "feature-extraction", + "text-embeddings-inference", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-4B-Base", + "base_model:finetune:Qwen/Qwen3-4B-Base", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-1.5b-instruct-awq", + "name": "Qwen2.5 1.5B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-1.5B-Instruct-AWQ. 6 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 6, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-1.5B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-1.5B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "meta-llama-3.1-8b-instruct-fp8", + "name": "Meta Llama 3.1 8B Instruct Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8", + "description": "Open source model RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8. 44 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 44, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "fp8", + "vllm", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.1-8B-Instruct", + "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "phi-4", + "name": "Phi 4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/phi-4", + "description": "Open source model microsoft/phi-4. 2220 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2220, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3", + "phi", + "nlp", + "math", + "code", + "chat", + "conversational", + "en", + "arxiv:2412.08905", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1", + "name": "Deepseek R1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1", + "description": "Open source model deepseek-ai/DeepSeek-R1. 13011 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 13011, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v3", + "conversational", + "custom_code", + "arxiv:2501.12948", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.2-1b-instruct-fp8", + "name": "Llama 3.2 1B Instruct Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8", + "description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8. 3 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3, + "language": "Python", + "license": "llama3.2", + "tags": [ + "AI", + "LLM", + "safetensors", + "llama", + "llama-3", + "neuralmagic", + "llmcompressor", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.2-1B-Instruct", + "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.1-405b", + "name": "Llama 3.1 405B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-3.1-405B", + "description": "Open source model meta-llama/Llama-3.1-405B. 961 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 961, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "pytorch", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "arxiv:2204.05149", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 284, + "context_window_tokens": 4096, + "parameters_total_b": 405, + "parameters_active_b": 405, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-4b-thinking-2507", + "name": "Qwen3 4B Thinking 2507", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B-Thinking-2507", + "description": "Open source model Qwen/Qwen3-4B-Thinking-2507. 548 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 548, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gpt2-medium", + "name": "Gpt2 Medium", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai-community/gpt2-medium", + "description": "Open source model openai-community/gpt2-medium. 193 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 193, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "rust", + "onnx", + "safetensors", + "gpt2", + "en", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "tiny-gpt2", + "name": "Tiny Gpt2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/sshleifer/tiny-gpt2", + "description": "Open source model sshleifer/tiny-gpt2. 34 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 34, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "gpt2", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "hermes-3-llama-3.1-8b", + "name": "Hermes 3 Llama 3.1 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B", + "description": "Open source model NousResearch/Hermes-3-Llama-3.1-8B. 385 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 385, + "language": "Python", + "license": "llama3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "Llama-3", + "instruct", + "finetune", + "chatml", + "gpt4", + "synthetic data", + "distillation", + "function calling", + "json mode", + "axolotl", + "roleplaying", + "chat", + "conversational", + "en", + "arxiv:2408.11857", + "base_model:meta-llama/Llama-3.1-8B", + "base_model:finetune:meta-llama/Llama-3.1-8B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "phi-3.5-vision-instruct", + "name": "Phi 3.5 Vision Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/Phi-3.5-vision-instruct", + "description": "Open source model microsoft/Phi-3.5-vision-instruct. 726 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 726, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3_v", + "nlp", + "code", + "vision", + "image-text-to-text", + "conversational", + "custom_code", + "multilingual", + "arxiv:2404.14219", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": true + }, + "referral_url": "" + }, + { + "slug": "minimax-m2", + "name": "Minimax M2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/MiniMaxAI/MiniMax-M2", + "description": "Open source model MiniMaxAI/MiniMax-M2. 1485 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1485, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "minimax_m2", + "conversational", + "custom_code", + "arxiv:2504.07164", + "arxiv:2509.06501", + "arxiv:2509.13160", + "eval-results", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-distill-llama-8b", + "name": "Deepseek R1 Distill Llama 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-8B. 843 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 843, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-14b-awq", + "name": "Qwen3 14B Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-14B-AWQ", + "description": "Open source model Qwen/Qwen3-14B-AWQ. 57 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 57, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-14B", + "base_model:quantized:Qwen/Qwen3-14B", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-235b-a22b", + "name": "Qwen3 235B A22B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-235B-A22B", + "description": "Open source model Qwen/Qwen3-235B-A22B. 1075 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1075, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 164, + "context_window_tokens": 4096, + "parameters_total_b": 235, + "parameters_active_b": 235, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "meta-llama-3.1-8b-instruct-awq-int4", + "name": "Meta Llama 3.1 8B Instruct Awq Int4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4", + "description": "Open source model hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4. 87 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 87, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "llama-3.1", + "meta", + "autoawq", + "conversational", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "lfm2.5-1.2b-instruct-mlx-8bit", + "name": "Lfm2.5 1.2B Instruct Mlx 8Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit", + "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit. 1 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "lfm2", + "liquid", + "lfm2.5", + "edge", + "mlx", + "conversational", + "en", + "ar", + "zh", + "fr", + "de", + "ja", + "ko", + "es", + "base_model:LiquidAI/LFM2.5-1.2B-Instruct", + "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct", + "endpoints_compatible", + "8-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "glm-4.7-flash-gguf", + "name": "Glm 4.7 Flash Gguf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF", + "description": "Open source model unsloth/GLM-4.7-Flash-GGUF. 482 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 482, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "gguf", + "unsloth", + "en", + "zh", + "arxiv:2508.06471", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "deploy:azure", + "region:us", + "imatrix", + "conversational" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-distill-qwen-14b", + "name": "Deepseek R1 Distill Qwen 14B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-14B. 603 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 603, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "lfm2.5-1.2b-instruct-mlx-6bit", + "name": "Lfm2.5 1.2B Instruct Mlx 6Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit", + "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit. 4 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "lfm2", + "liquid", + "lfm2.5", + "edge", + "mlx", + "conversational", + "en", + "ar", + "zh", + "fr", + "de", + "ja", + "ko", + "es", + "base_model:LiquidAI/LFM2.5-1.2B-Instruct", + "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct", + "endpoints_compatible", + "6-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "lfm2.5-1.2b-instruct-mlx-4bit", + "name": "Lfm2.5 1.2B Instruct Mlx 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit", + "description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit. 1 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "lfm2", + "liquid", + "lfm2.5", + "edge", + "mlx", + "conversational", + "en", + "ar", + "zh", + "fr", + "de", + "ja", + "ko", + "es", + "base_model:LiquidAI/LFM2.5-1.2B-Instruct", + "base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct", + "endpoints_compatible", + "4-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "vicuna-7b-v1.5", + "name": "Vicuna 7B V1.5", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmsys/vicuna-7b-v1.5", + "description": "Open source model lmsys/vicuna-7b-v1.5. 387 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 387, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "llama", + "arxiv:2307.09288", + "arxiv:2306.05685", + "text-generation-inference", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.2-1b-instruct-q8_0-gguf", + "name": "Llama 3.2 1B Instruct Q8_0 Gguf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF", + "description": "Open source model hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF. 43 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 43, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "gguf", + "facebook", + "meta", + "pytorch", + "llama", + "llama-3", + "llama-cpp", + "gguf-my-repo", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.2-1B-Instruct", + "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct", + "endpoints_compatible", + "region:us", + "conversational" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-3.3-70b-instruct-awq", + "name": "Llama 3.3 70B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/kosbu/Llama-3.3-70B-Instruct-AWQ", + "description": "Open source model kosbu/Llama-3.3-70B-Instruct-AWQ. 10 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 10, + "language": "Python", + "license": "llama3.3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "facebook", + "meta", + "llama-3", + "awq", + "conversational", + "en", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "de", + "base_model:meta-llama/Llama-3.3-70B-Instruct", + "base_model:quantized:meta-llama/Llama-3.3-70B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-32b-fp8", + "name": "Qwen3 32B Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-32B-FP8", + "description": "Open source model Qwen/Qwen3-32B-FP8. 80 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 80, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-32B", + "base_model:quantized:Qwen/Qwen3-32B", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gpt2-xl", + "name": "Gpt2 Xl", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/openai-community/gpt2-xl", + "description": "Open source model openai-community/gpt2-xl. 373 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 373, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "rust", + "safetensors", + "gpt2", + "en", + "arxiv:1910.09700", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-4b-instruct-2507-fp8", + "name": "Qwen3 4B Instruct 2507 Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507-FP8", + "description": "Open source model Qwen/Qwen3-4B-Instruct-2507-FP8. 65 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 65, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-4B-Instruct-2507", + "base_model:quantized:Qwen/Qwen3-4B-Instruct-2507", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "xlnet-base-cased", + "name": "Xlnet Base Cased", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/xlnet/xlnet-base-cased", + "description": "Open source model xlnet/xlnet-base-cased. 80 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 80, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "rust", + "xlnet", + "en", + "dataset:bookcorpus", + "dataset:wikipedia", + "arxiv:1906.08237", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-2-7b-hf", + "name": "Llama 2 7B Hf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-2-7b-hf", + "description": "Open source model meta-llama/Llama-2-7b-hf. 2268 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2268, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "llama", + "facebook", + "meta", + "llama-2", + "en", + "arxiv:2307.09288", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-math-7b-instruct", + "name": "Qwen2.5 Math 7B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Math-7B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Math-7B-Instruct. 89 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 89, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2409.12122", + "base_model:Qwen/Qwen2.5-Math-7B", + "base_model:finetune:Qwen/Qwen2.5-Math-7B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-reranker-0.6b", + "name": "Qwen3 Reranker 0.6B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Reranker-0.6B", + "description": "Open source model Qwen/Qwen3-Reranker-0.6B. 305 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 305, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "text-ranking", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-0.6B-Base", + "base_model:finetune:Qwen/Qwen3-0.6B-Base", + "text-embeddings-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-1.5b", + "name": "Qwen2.5 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-1.5B", + "description": "Open source model Qwen/Qwen2.5-1.5B. 165 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 165, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2407.10671", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-30b-a3b-thinking-2507", + "name": "Qwen3 30B A3B Thinking 2507", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Thinking-2507", + "description": "Open source model Qwen/Qwen3-30B-A3B-Thinking-2507. 359 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 359, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2402.17463", + "arxiv:2407.02490", + "arxiv:2501.15383", + "arxiv:2404.06654", + "arxiv:2505.09388", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "smollm2-135m-instruct", + "name": "Smollm2 135M Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct", + "description": "Open source model HuggingFaceTB/SmolLM2-135M-Instruct. 292 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 292, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "tensorboard", + "onnx", + "safetensors", + "llama", + "transformers.js", + "conversational", + "en", + "arxiv:2502.02737", + "base_model:HuggingFaceTB/SmolLM2-135M", + "base_model:quantized:HuggingFaceTB/SmolLM2-135M", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-math-1.5b", + "name": "Qwen2.5 Math 1.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Math-1.5B", + "description": "Open source model Qwen/Qwen2.5-Math-1.5B. 100 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 100, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2409.12122", + "base_model:Qwen/Qwen2.5-1.5B", + "base_model:finetune:Qwen/Qwen2.5-1.5B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "glm-4.5-air-awq-4bit", + "name": "Glm 4.5 Air Awq 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/cyankiwi/GLM-4.5-Air-AWQ-4bit", + "description": "Open source model cyankiwi/GLM-4.5-Air-AWQ-4bit. 27 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 27, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe", + "conversational", + "en", + "zh", + "arxiv:2508.06471", + "base_model:zai-org/GLM-4.5-Air", + "base_model:quantized:zai-org/GLM-4.5-Air", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-2-7b-chat-hf", + "name": "Llama 2 7B Chat Hf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf", + "description": "Open source model meta-llama/Llama-2-7b-chat-hf. 4705 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4705, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "llama", + "facebook", + "meta", + "llama-2", + "conversational", + "en", + "arxiv:2307.09288", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-7b-instruct-gptq-int4", + "name": "Qwen2.5 Coder 7B Instruct Gptq Int4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4", + "description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4. 12 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 12, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-7B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "gptq", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-vl-30b-a3b-instruct-awq", + "name": "Qwen3 Vl 30B A3B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ", + "description": "Open source model QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ. 38 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 38, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_vl_moe", + "image-text-to-text", + "AWQ", + "vLLM", + "conversational", + "arxiv:2505.09388", + "arxiv:2502.13923", + "arxiv:2409.12191", + "arxiv:2308.12966", + "base_model:Qwen/Qwen3-VL-30B-A3B-Instruct", + "base_model:quantized:Qwen/Qwen3-VL-30B-A3B-Instruct", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-8b-base", + "name": "Qwen3 8B Base", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-8B-Base", + "description": "Open source model Qwen/Qwen3-8B-Base. 82 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 82, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-14b-instruct", + "name": "Qwen2.5 Coder 14B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct", + "description": "Open source model Qwen/Qwen2.5-Coder-14B-Instruct. 140 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 140, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "codeqwen", + "chat", + "qwen", + "qwen-coder", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-Coder-14B", + "base_model:finetune:Qwen/Qwen2.5-Coder-14B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 10, + "context_window_tokens": 4096, + "parameters_total_b": 14, + "parameters_active_b": 14, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "stories15m_moe", + "name": "Stories15M_Moe", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/ggml-org/stories15M_MOE", + "description": "Open source model ggml-org/stories15M_MOE. 5 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 5, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gguf", + "mixtral", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "opt-1.3b", + "name": "Opt 1.3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/facebook/opt-1.3b", + "description": "Open source model facebook/opt-1.3b. 182 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 182, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "opt", + "en", + "arxiv:2205.01068", + "arxiv:2005.14165", + "text-generation-inference", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "minimax-m2-awq", + "name": "Minimax M2 Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/QuantTrio/MiniMax-M2-AWQ", + "description": "Open source model QuantTrio/MiniMax-M2-AWQ. 8 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 8, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "mixtral", + "vLLM", + "AWQ", + "conversational", + "arxiv:2504.07164", + "arxiv:2509.06501", + "arxiv:2509.13160", + "base_model:MiniMaxAI/MiniMax-M2", + "base_model:quantized:MiniMaxAI/MiniMax-M2", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "glm-4.7-flash-nvfp4", + "name": "Glm 4.7 Flash Nvfp4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/GadflyII/GLM-4.7-Flash-NVFP4", + "description": "Open source model GadflyII/GLM-4.7-Flash-NVFP4. 62 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 62, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "moe", + "nvfp4", + "quantized", + "vllm", + "glm", + "30b", + "conversational", + "en", + "zh", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "hy-mt1.5-7b", + "name": "Hy Mt1.5 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tencent/HY-MT1.5-7B", + "description": "Open source model tencent/HY-MT1.5-7B. 133 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 133, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "hunyuan_v1_dense", + "translation", + "zh", + "en", + "fr", + "pt", + "es", + "ja", + "tr", + "ru", + "ar", + "ko", + "th", + "it", + "de", + "vi", + "ms", + "id", + "tl", + "hi", + "pl", + "cs", + "nl", + "km", + "my", + "fa", + "gu", + "ur", + "te", + "mr", + "he", + "bn", + "ta", + "uk", + "bo", + "kk", + "mn", + "ug", + "arxiv:2512.24092", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gemma-2-27b-it", + "name": "Gemma 2 27B It", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/google/gemma-2-27b-it", + "description": "Open source model google/gemma-2-27b-it. 559 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 559, + "language": "Python", + "license": "gemma", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gemma2", + "conversational", + "arxiv:2009.03300", + "arxiv:1905.07830", + "arxiv:1911.11641", + "arxiv:1904.09728", + "arxiv:1905.10044", + "arxiv:1907.10641", + "arxiv:1811.00937", + "arxiv:1809.02789", + "arxiv:1911.01547", + "arxiv:1705.03551", + "arxiv:2107.03374", + "arxiv:2108.07732", + "arxiv:2110.14168", + "arxiv:2009.11462", + "arxiv:2101.11718", + "arxiv:2110.08193", + "arxiv:1804.09301", + "arxiv:2109.07958", + "arxiv:1804.06876", + "arxiv:2103.03874", + "arxiv:2304.06364", + "arxiv:2206.04615", + "arxiv:2203.09509", + "base_model:google/gemma-2-27b", + "base_model:finetune:google/gemma-2-27b", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 19, + "context_window_tokens": 4096, + "parameters_total_b": 27, + "parameters_active_b": 27, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-coder-next-gguf", + "name": "Qwen3 Coder Next Gguf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/Qwen3-Coder-Next-GGUF", + "description": "Open source model unsloth/Qwen3-Coder-Next-GGUF. 347 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 347, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "gguf", + "qwen3_next", + "unsloth", + "qwen", + "qwen3", + "base_model:Qwen/Qwen3-Coder-Next", + "base_model:quantized:Qwen/Qwen3-Coder-Next", + "endpoints_compatible", + "region:us", + "imatrix", + "conversational" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gte-qwen2-1.5b-instruct", + "name": "Gte Qwen2 1.5B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Alibaba-NLP/gte-Qwen2-1.5B-instruct", + "description": "Open source model Alibaba-NLP/gte-Qwen2-1.5B-instruct. 229 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 229, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "sentence-transformers", + "safetensors", + "qwen2", + "mteb", + "transformers", + "Qwen2", + "sentence-similarity", + "custom_code", + "arxiv:2308.03281", + "model-index", + "text-embeddings-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "lfm2-1.2b", + "name": "Lfm2 1.2B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/LiquidAI/LFM2-1.2B", + "description": "Open source model LiquidAI/LFM2-1.2B. 349 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 349, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "lfm2", + "liquid", + "edge", + "conversational", + "en", + "ar", + "zh", + "fr", + "de", + "ja", + "ko", + "es", + "arxiv:2511.23404", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "saiga_llama3_8b", + "name": "Saiga_Llama3_8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/IlyaGusev/saiga_llama3_8b", + "description": "Open source model IlyaGusev/saiga_llama3_8b. 137 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 137, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "ru", + "dataset:IlyaGusev/saiga_scored", + "doi:10.57967/hf/2368", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-1.7b-base", + "name": "Qwen3 1.7B Base", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-1.7B-Base", + "description": "Open source model Qwen/Qwen3-1.7B-Base. 62 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 62, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "mistral-7b-v0.3-bnb-4bit", + "name": "Mistral 7B V0.3 Bnb 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/mistral-7b-v0.3-bnb-4bit", + "description": "Open source model unsloth/mistral-7b-v0.3-bnb-4bit. 22 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 22, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "mistral", + "unsloth", + "mistral-7b", + "en", + "base_model:mistralai/Mistral-7B-v0.3", + "base_model:quantized:mistralai/Mistral-7B-v0.3", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "bitsandbytes", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gemma-2-2b-it", + "name": "Gemma 2 2B It", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/google/gemma-2-2b-it", + "description": "Open source model google/gemma-2-2b-it. 1285 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1285, + "language": "Python", + "license": "gemma", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gemma2", + "conversational", + "arxiv:2009.03300", + "arxiv:1905.07830", + "arxiv:1911.11641", + "arxiv:1904.09728", + "arxiv:1905.10044", + "arxiv:1907.10641", + "arxiv:1811.00937", + "arxiv:1809.02789", + "arxiv:1911.01547", + "arxiv:1705.03551", + "arxiv:2107.03374", + "arxiv:2108.07732", + "arxiv:2110.14168", + "arxiv:2009.11462", + "arxiv:2101.11718", + "arxiv:2110.08193", + "arxiv:1804.09301", + "arxiv:2109.07958", + "arxiv:1804.06876", + "arxiv:2103.03874", + "arxiv:2304.06364", + "arxiv:1903.00161", + "arxiv:2206.04615", + "arxiv:2203.09509", + "arxiv:2403.13793", + "base_model:google/gemma-2-2b", + "base_model:finetune:google/gemma-2-2b", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 2, + "parameters_active_b": 2, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "phi-4-multimodal-instruct", + "name": "Phi 4 Multimodal Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/Phi-4-multimodal-instruct", + "description": "Open source model microsoft/Phi-4-multimodal-instruct. 1573 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1573, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi4mm", + "nlp", + "code", + "audio", + "automatic-speech-recognition", + "speech-summarization", + "speech-translation", + "visual-question-answering", + "phi-4-multimodal", + "phi", + "phi-4-mini", + "custom_code", + "multilingual", + "ar", + "zh", + "cs", + "da", + "nl", + "en", + "fi", + "fr", + "de", + "he", + "hu", + "it", + "ja", + "ko", + "no", + "pl", + "pt", + "ru", + "es", + "sv", + "th", + "tr", + "uk", + "arxiv:2503.01743", + "arxiv:2407.13833", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "pythia-70m-deduped", + "name": "Pythia 70M Deduped", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/EleutherAI/pythia-70m-deduped", + "description": "Open source model EleutherAI/pythia-70m-deduped. 27 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 27, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "gpt_neox", + "causal-lm", + "pythia", + "en", + "dataset:EleutherAI/the_pile_deduplicated", + "arxiv:2304.01373", + "arxiv:2101.00027", + "arxiv:2201.07311", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "dialogpt-medium", + "name": "Dialogpt Medium", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/DialoGPT-medium", + "description": "Open source model microsoft/DialoGPT-medium. 433 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 433, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "jax", + "rust", + "gpt2", + "conversational", + "arxiv:1911.00536", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "gpt-oss-20b-bf16", + "name": "Gpt Oss 20B Bf16", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/gpt-oss-20b-BF16", + "description": "Open source model unsloth/gpt-oss-20b-BF16. 29 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 29, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "gpt_oss", + "vllm", + "unsloth", + "conversational", + "base_model:openai/gpt-oss-20b", + "base_model:finetune:openai/gpt-oss-20b", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 14, + "context_window_tokens": 4096, + "parameters_total_b": 20, + "parameters_active_b": 20, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-72b-instruct", + "name": "Qwen2.5 72B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", + "description": "Open source model Qwen/Qwen2.5-72B-Instruct. 910 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 910, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-72B", + "base_model:finetune:Qwen/Qwen2.5-72B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 50, + "context_window_tokens": 4096, + "parameters_total_b": 72, + "parameters_active_b": 72, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-32b-awq", + "name": "Qwen3 32B Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-32B-AWQ", + "description": "Open source model Qwen/Qwen3-32B-AWQ. 125 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 125, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-32B", + "base_model:quantized:Qwen/Qwen3-32B", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "mimo-v2-flash", + "name": "Mimo V2 Flash", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/XiaomiMiMo/MiMo-V2-Flash", + "description": "Open source model XiaomiMiMo/MiMo-V2-Flash. 628 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 628, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "mimo_v2_flash", + "conversational", + "custom_code", + "eval-results", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-coder-30b-a3b-instruct-fp8", + "name": "Qwen3 Coder 30B A3B Instruct Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8", + "description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8. 158 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 158, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2505.09388", + "endpoints_compatible", + "fp8", + "deploy:azure", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-8b-fp8", + "name": "Qwen3 8B Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-8B-FP8", + "description": "Open source model Qwen/Qwen3-8B-FP8. 56 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 56, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-8B", + "base_model:quantized:Qwen/Qwen3-8B", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-v3.2", + "name": "Deepseek V3.2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3.2", + "description": "Open source model deepseek-ai/DeepSeek-V3.2. 1251 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1251, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v32", + "conversational", + "base_model:deepseek-ai/DeepSeek-V3.2-Exp-Base", + "base_model:finetune:deepseek-ai/DeepSeek-V3.2-Exp-Base", + "eval-results", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-coder-next", + "name": "Qwen3 Coder Next", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Coder-Next", + "description": "Open source model Qwen/Qwen3-Coder-Next. 912 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 912, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_next", + "conversational", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2-0.5b", + "name": "Qwen2 0.5B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2-0.5B", + "description": "Open source model Qwen/Qwen2-0.5B. 164 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 164, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "pretrained", + "conversational", + "en", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 5, + "parameters_active_b": 5, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "mistral-7b-v0.1", + "name": "Mistral 7B V0.1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/mistralai/Mistral-7B-v0.1", + "description": "Open source model mistralai/Mistral-7B-v0.1. 4042 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 4042, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "mistral", + "pretrained", + "mistral-common", + "en", + "arxiv:2310.06825", + "text-generation-inference", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "kimi-k2-thinking", + "name": "Kimi K2 Thinking", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/moonshotai/Kimi-K2-Thinking", + "description": "Open source model moonshotai/Kimi-K2-Thinking. 1670 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1670, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "kimi_k2", + "conversational", + "custom_code", + "eval-results", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-0528-qwen3-8b-mlx-4bit", + "name": "Deepseek R1 0528 Qwen3 8B Mlx 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit", + "description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit. 7 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 7, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "mlx", + "safetensors", + "qwen3", + "conversational", + "base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", + "base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", + "4-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-7b-instruct-awq", + "name": "Qwen2.5 7B Instruct Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-AWQ", + "description": "Open source model Qwen/Qwen2.5-7B-Instruct-AWQ. 36 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 36, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-7B-Instruct", + "base_model:quantized:Qwen/Qwen2.5-7B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "points-reader", + "name": "Points Reader", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tencent/POINTS-Reader", + "description": "Open source model tencent/POINTS-Reader. 100 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 100, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "image-text-to-text", + "conversational", + "custom_code", + "arxiv:2509.01215", + "arxiv:2412.08443", + "arxiv:2409.04828", + "arxiv:2405.11850", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-4b-base", + "name": "Qwen3 4B Base", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-4B-Base", + "description": "Open source model Qwen/Qwen3-4B-Base. 80 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 80, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "step-3.5-flash", + "name": "Step 3.5 Flash", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/stepfun-ai/Step-3.5-Flash", + "description": "Open source model stepfun-ai/Step-3.5-Flash. 621 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 621, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "step3p5", + "conversational", + "custom_code", + "arxiv:2602.10604", + "arxiv:2601.05593", + "arxiv:2507.19427", + "eval-results", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "kogpt2-base-v2", + "name": "Kogpt2 Base V2", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/skt/kogpt2-base-v2", + "description": "Open source model skt/kogpt2-base-v2. 60 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 60, + "language": "Python", + "license": "cc-by-nc-sa-4.0", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "jax", + "gpt2", + "ko", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "parler-tts-mini-multilingual-v1.1", + "name": "Parler Tts Mini Multilingual V1.1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/parler-tts/parler-tts-mini-multilingual-v1.1", + "description": "Open source model parler-tts/parler-tts-mini-multilingual-v1.1. 54 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 54, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "parler_tts", + "text-to-speech", + "annotation", + "en", + "fr", + "es", + "pt", + "pl", + "de", + "nl", + "it", + "dataset:facebook/multilingual_librispeech", + "dataset:parler-tts/libritts_r_filtered", + "dataset:parler-tts/libritts-r-filtered-speaker-descriptions", + "dataset:parler-tts/mls_eng", + "dataset:parler-tts/mls-eng-speaker-descriptions", + "dataset:ylacombe/mls-annotated", + "dataset:ylacombe/cml-tts-filtered-annotated", + "dataset:PHBJT/cml-tts-filtered", + "arxiv:2402.01912", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-reranker-8b", + "name": "Qwen3 Reranker 8B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-Reranker-8B", + "description": "Open source model Qwen/Qwen3-Reranker-8B. 213 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 213, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "text-ranking", + "arxiv:2506.05176", + "base_model:Qwen/Qwen3-8B-Base", + "base_model:finetune:Qwen/Qwen3-8B-Base", + "text-embeddings-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-0528-qwen3-8b-mlx-8bit", + "name": "Deepseek R1 0528 Qwen3 8B Mlx 8Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit", + "description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit. 13 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 13, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "mlx", + "safetensors", + "qwen3", + "conversational", + "base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", + "base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", + "8-bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "powermoe-3b", + "name": "Powermoe 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/ibm-research/PowerMoE-3b", + "description": "Open source model ibm-research/PowerMoE-3b. 14 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 14, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "granitemoe", + "arxiv:2408.13359", + "model-index", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llada-8b-instruct", + "name": "Llada 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/GSAI-ML/LLaDA-8B-Instruct", + "description": "Open source model GSAI-ML/LLaDA-8B-Instruct. 342 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 342, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llada", + "conversational", + "custom_code", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "apertus-8b-instruct-2509", + "name": "Apertus 8B Instruct 2509", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/swiss-ai/Apertus-8B-Instruct-2509", + "description": "Open source model swiss-ai/Apertus-8B-Instruct-2509. 435 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 435, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "apertus", + "multilingual", + "compliant", + "swiss-ai", + "conversational", + "arxiv:2509.14233", + "base_model:swiss-ai/Apertus-8B-2509", + "base_model:finetune:swiss-ai/Apertus-8B-2509", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-30b-a3b-gptq-int4", + "name": "Qwen3 30B A3B Gptq Int4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-GPTQ-Int4", + "description": "Open source model Qwen/Qwen3-30B-A3B-GPTQ-Int4. 45 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 45, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3_moe", + "conversational", + "arxiv:2309.00071", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-30B-A3B", + "base_model:quantized:Qwen/Qwen3-30B-A3B", + "endpoints_compatible", + "4-bit", + "gptq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "tinyllama-1.1b-chat-v0.3-gptq", + "name": "Tinyllama 1.1B Chat V0.3 Gptq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ", + "description": "Open source model TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ. 9 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 9, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "en", + "dataset:cerebras/SlimPajama-627B", + "dataset:bigcode/starcoderdata", + "dataset:OpenAssistant/oasst_top1_2023-08-25", + "base_model:TinyLlama/TinyLlama-1.1B-Chat-v0.3", + "base_model:quantized:TinyLlama/TinyLlama-1.1B-Chat-v0.3", + "text-generation-inference", + "4-bit", + "gptq", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 1, + "parameters_active_b": 1, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "prot_t5_xl_bfd", + "name": "Prot_T5_Xl_Bfd", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Rostlab/prot_t5_xl_bfd", + "description": "Open source model Rostlab/prot_t5_xl_bfd. 10 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 10, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "tf", + "t5", + "protein language model", + "dataset:BFD", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-4b-instruct-2507-unsloth-bnb-4bit", + "name": "Qwen3 4B Instruct 2507 Unsloth Bnb 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit", + "description": "Open source model unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit. 13 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 13, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "unsloth", + "conversational", + "arxiv:2505.09388", + "base_model:Qwen/Qwen3-4B-Instruct-2507", + "base_model:quantized:Qwen/Qwen3-4B-Instruct-2507", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "bitsandbytes", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "phi-3.5-mini-instruct", + "name": "Phi 3.5 Mini Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/microsoft/Phi-3.5-mini-instruct", + "description": "Open source model microsoft/Phi-3.5-mini-instruct. 963 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 963, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "phi3", + "nlp", + "code", + "conversational", + "custom_code", + "multilingual", + "arxiv:2404.14219", + "arxiv:2407.13833", + "arxiv:2403.06412", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "meta-llama-3.1-8b-instruct-bnb-4bit", + "name": "Meta Llama 3.1 8B Instruct Bnb 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit", + "description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit. 95 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 95, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "llama-3", + "meta", + "facebook", + "unsloth", + "conversational", + "en", + "arxiv:2204.05149", + "base_model:meta-llama/Llama-3.1-8B-Instruct", + "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "bitsandbytes", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "glm-4.7-flash-awq-4bit", + "name": "Glm 4.7 Flash Awq 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/cyankiwi/GLM-4.7-Flash-AWQ-4bit", + "description": "Open source model cyankiwi/GLM-4.7-Flash-AWQ-4bit. 43 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 43, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm4_moe_lite", + "conversational", + "en", + "zh", + "arxiv:2508.06471", + "base_model:zai-org/GLM-4.7-Flash", + "base_model:quantized:zai-org/GLM-4.7-Flash", + "endpoints_compatible", + "compressed-tensors", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 3, + "context_window_tokens": 4096, + "parameters_total_b": 4, + "parameters_active_b": 4, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "dots.ocr", + "name": "Dots.Ocr", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/rednote-hilab/dots.ocr", + "description": "Open source model rednote-hilab/dots.ocr. 1243 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1243, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "dots_ocr", + "safetensors", + "image-to-text", + "ocr", + "document-parse", + "layout", + "table", + "formula", + "transformers", + "custom_code", + "image-text-to-text", + "conversational", + "en", + "zh", + "multilingual", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "mistral-7b-bnb-4bit", + "name": "Mistral 7B Bnb 4Bit", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/mistral-7b-bnb-4bit", + "description": "Open source model unsloth/mistral-7b-bnb-4bit. 30 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 30, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "mistral", + "unsloth", + "mistral-7b", + "bnb", + "en", + "text-generation-inference", + "endpoints_compatible", + "4-bit", + "bitsandbytes", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "glm-5-fp8", + "name": "Glm 5 Fp8", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/zai-org/GLM-5-FP8", + "description": "Open source model zai-org/GLM-5-FP8. 108 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 108, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "glm_moe_dsa", + "conversational", + "en", + "zh", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen-7b", + "name": "Qwen 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen-7B", + "description": "Open source model Qwen/Qwen-7B. 395 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 395, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen", + "custom_code", + "zh", + "en", + "arxiv:2309.16609", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwq-32b-awq", + "name": "Qwq 32B Awq", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/QwQ-32B-AWQ", + "description": "Open source model Qwen/QwQ-32B-AWQ. 133 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 133, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "safetensors", + "qwen2", + "chat", + "conversational", + "en", + "arxiv:2309.00071", + "arxiv:2412.15115", + "base_model:Qwen/QwQ-32B", + "base_model:quantized:Qwen/QwQ-32B", + "4-bit", + "awq", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 22, + "context_window_tokens": 4096, + "parameters_total_b": 32, + "parameters_active_b": 32, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-r1-distill-llama-70b", + "name": "Deepseek R1 Distill Llama 70B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-70B. 741 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 741, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "conversational", + "arxiv:2501.12948", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 49, + "context_window_tokens": 4096, + "parameters_total_b": 70, + "parameters_active_b": 70, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-coder-7b", + "name": "Qwen2.5 Coder 7B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B", + "description": "Open source model Qwen/Qwen2.5-Coder-7B. 134 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 134, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen2", + "code", + "qwen", + "qwen-coder", + "codeqwen", + "conversational", + "en", + "arxiv:2409.12186", + "arxiv:2309.00071", + "arxiv:2407.10671", + "base_model:Qwen/Qwen2.5-7B", + "base_model:finetune:Qwen/Qwen2.5-7B", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen2.5-3b", + "name": "Qwen2.5 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen2.5-3B", + "description": "Open source model Qwen/Qwen2.5-3B. 169 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 169, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "safetensors", + "qwen2", + "conversational", + "en", + "arxiv:2407.10671", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-v2-lite-chat", + "name": "Deepseek V2 Lite Chat", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat", + "description": "Open source model deepseek-ai/DeepSeek-V2-Lite-Chat. 133 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 133, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v2", + "conversational", + "custom_code", + "arxiv:2405.04434", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "tiny-qwen3forcausallm", + "name": "Tiny Qwen3Forcausallm", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/trl-internal-testing/tiny-Qwen3ForCausalLM", + "description": "Open source model trl-internal-testing/tiny-Qwen3ForCausalLM. 1 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1, + "language": "Python", + "license": "unknown", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "trl", + "conversational", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-coder-v2-lite-instruct", + "name": "Deepseek Coder V2 Lite Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", + "description": "Open source model deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct. 539 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 539, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v2", + "conversational", + "custom_code", + "arxiv:2401.06066", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen3-0.6b-base", + "name": "Qwen3 0.6B Base", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen3-0.6B-Base", + "description": "Open source model Qwen/Qwen3-0.6B-Base. 146 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 146, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen3", + "conversational", + "arxiv:2505.09388", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 4, + "context_window_tokens": 4096, + "parameters_total_b": 6, + "parameters_active_b": 6, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "diffractgpt_mistral_chemical_formula", + "name": "Diffractgpt_Mistral_Chemical_Formula", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/knc6/diffractgpt_mistral_chemical_formula", + "description": "Open source model knc6/diffractgpt_mistral_chemical_formula. 1 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "peft", + "safetensors", + "chemistry", + "text-generation-inference", + "atomgpt", + "diffraction", + "en", + "base_model:unsloth/mistral-7b-bnb-4bit", + "base_model:adapter:unsloth/mistral-7b-bnb-4bit", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "qwen-7b-chat", + "name": "Qwen 7B Chat", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Qwen/Qwen-7B-Chat", + "description": "Open source model Qwen/Qwen-7B-Chat. 787 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 787, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "qwen", + "custom_code", + "zh", + "en", + "arxiv:2309.16609", + "arxiv:2305.08322", + "arxiv:2009.03300", + "arxiv:2305.05280", + "arxiv:2210.03629", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 5, + "context_window_tokens": 4096, + "parameters_total_b": 7, + "parameters_active_b": 7, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "nvidia-nemotron-3-nano-30b-a3b-nvfp4", + "name": "Nvidia Nemotron 3 Nano 30B A3B Nvfp4", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4", + "description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4. 100 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 100, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "nemotron_h", + "feature-extraction", + "nvidia", + "pytorch", + "conversational", + "custom_code", + "en", + "es", + "fr", + "de", + "ja", + "it", + "dataset:nvidia/Nemotron-Pretraining-Code-v1", + "dataset:nvidia/Nemotron-CC-v2", + "dataset:nvidia/Nemotron-Pretraining-SFT-v1", + "dataset:nvidia/Nemotron-CC-Math-v1", + "dataset:nvidia/Nemotron-Pretraining-Code-v2", + "dataset:nvidia/Nemotron-Pretraining-Specialized-v1", + "dataset:nvidia/Nemotron-CC-v2.1", + "dataset:nvidia/Nemotron-CC-Code-v1", + "dataset:nvidia/Nemotron-Pretraining-Dataset-sample", + "dataset:nvidia/Nemotron-Competitive-Programming-v1", + "dataset:nvidia/Nemotron-Math-v2", + "dataset:nvidia/Nemotron-Agentic-v1", + "dataset:nvidia/Nemotron-Math-Proofs-v1", + "dataset:nvidia/Nemotron-Instruction-Following-Chat-v1", + "dataset:nvidia/Nemotron-Science-v1", + "dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend", + "arxiv:2512.20848", + "arxiv:2512.20856", + "arxiv:2601.20088", + "base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16", + "region:us" + ], + "hardware_req": "24GB+ VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 21, + "context_window_tokens": 4096, + "parameters_total_b": 30, + "parameters_active_b": 30, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "falcon-h1-tiny-90m-instruct", + "name": "Falcon H1 Tiny 90M Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/tiiuae/Falcon-H1-Tiny-90M-Instruct", + "description": "Open source model tiiuae/Falcon-H1-Tiny-90M-Instruct. 31 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 31, + "language": "Python", + "license": "other", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "falcon_h1", + "falcon-h1", + "edge", + "conversational", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "hermes-3-llama-3.2-3b", + "name": "Hermes 3 Llama 3.2 3B", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.2-3B", + "description": "Open source model NousResearch/Hermes-3-Llama-3.2-3B. 174 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 174, + "language": "Python", + "license": "llama3", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "Llama-3", + "instruct", + "finetune", + "chatml", + "gpt4", + "synthetic data", + "distillation", + "function calling", + "json mode", + "axolotl", + "roleplaying", + "chat", + "conversational", + "en", + "arxiv:2408.11857", + "text-generation-inference", + "endpoints_compatible", + "deploy:azure", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 2, + "context_window_tokens": 4096, + "parameters_total_b": 3, + "parameters_active_b": 3, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "meta-llama-3.1-8b-instruct", + "name": "Meta Llama 3.1 8B Instruct", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct", + "description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct. 94 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 94, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "llama-3", + "meta", + "facebook", + "unsloth", + "conversational", + "en", + "base_model:meta-llama/Llama-3.1-8B-Instruct", + "base_model:finetune:meta-llama/Llama-3.1-8B-Instruct", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "meta-llama-3.1-8b-instruct-gguf", + "name": "Meta Llama 3.1 8B Instruct Gguf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", + "description": "Open source model bartowski/Meta-Llama-3.1-8B-Instruct-GGUF. 321 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 321, + "language": "Python", + "license": "llama3.1", + "tags": [ + "AI", + "LLM", + "gguf", + "facebook", + "meta", + "pytorch", + "llama", + "llama-3", + "en", + "de", + "fr", + "it", + "pt", + "hi", + "es", + "th", + "base_model:meta-llama/Llama-3.1-8B-Instruct", + "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct", + "endpoints_compatible", + "region:us", + "conversational" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 6, + "context_window_tokens": 4096, + "parameters_total_b": 8, + "parameters_active_b": 8, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "deepseek-v3-0324", + "name": "Deepseek V3 0324", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/deepseek-ai/DeepSeek-V3-0324", + "description": "Open source model deepseek-ai/DeepSeek-V3-0324. 3087 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 3087, + "language": "Python", + "license": "mit", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "deepseek_v3", + "conversational", + "custom_code", + "arxiv:2412.19437", + "eval-results", + "text-generation-inference", + "endpoints_compatible", + "fp8", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "elm", + "name": "Elm", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/Joaoffg/ELM", + "description": "Open source model Joaoffg/ELM. 2 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 2, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "academic", + "university", + "en", + "nl", + "arxiv:2408.06931", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "llama-2-13b-chat-hf", + "name": "Llama 2 13B Chat Hf", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/meta-llama/Llama-2-13b-chat-hf", + "description": "Open source model meta-llama/Llama-2-13b-chat-hf. 1109 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 1109, + "language": "Python", + "license": "llama2", + "tags": [ + "AI", + "LLM", + "transformers", + "pytorch", + "safetensors", + "llama", + "facebook", + "meta", + "llama-2", + "conversational", + "en", + "arxiv:2307.09288", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "16GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 9, + "context_window_tokens": 4096, + "parameters_total_b": 13, + "parameters_active_b": 13, + "is_multimodal": false + }, + "referral_url": "" + }, + { + "slug": "svara-tts-v1", + "name": "Svara Tts V1", + "category": "AI Models", + "is_open_source": true, + "website": "https://huggingface.co/kenpath/svara-tts-v1", + "description": "Open source model kenpath/svara-tts-v1. 18 likes on Hugging Face.", + "pros": [ + "Open Source", + "Running Locally" + ], + "cons": [ + "Requires GPU" + ], + "stars": 18, + "language": "Python", + "license": "apache-2.0", + "tags": [ + "AI", + "LLM", + "transformers", + "safetensors", + "llama", + "text-to-speech", + "speech-synthesis", + "multilingual", + "indic", + "orpheus", + "lora", + "low-latency", + "gguf", + "zero-shot", + "emotions", + "discrete-audio-tokens", + "hi", + "bn", + "mr", + "te", + "kn", + "bho", + "mag", + "hne", + "mai", + "as", + "brx", + "doi", + "gu", + "ml", + "pa", + "ta", + "ne", + "sa", + "en", + "dataset:SYSPIN", + "dataset:RASA", + "dataset:IndicTTS", + "dataset:SPICOR", + "base_model:canopylabs/3b-hi-ft-research_release", + "base_model:adapter:canopylabs/3b-hi-ft-research_release", + "text-generation-inference", + "endpoints_compatible", + "region:us" + ], + "hardware_req": "8GB VRAM", + "hosting_type": "self-hosted", + "ai_metadata": { + "vram_inference_gb": 1, + "context_window_tokens": 4096, + "parameters_total_b": 0, + "parameters_active_b": 0, + "is_multimodal": false + }, + "referral_url": "" + } +] \ No newline at end of file diff --git a/docs/app/_meta.ts b/docs/app/_meta.ts new file mode 100644 index 0000000..e58811a --- /dev/null +++ b/docs/app/_meta.ts @@ -0,0 +1,36 @@ +import type { MetaRecord } from 'nextra' + +const meta: MetaRecord = { + index: { + title: 'Home', + type: 'page', + display: 'hidden', + }, + why: { + title: '📜 Why These Docs Exist', + }, + 'quick-start': { + title: '🚀 Quick Start', + }, + deploy: { + title: '📦 Deploy Guides', + }, + stacks: { + title: '🔥 Stacks', + }, + concepts: { + title: '🧠 Concepts', + }, + // -- External links -- + directory: { + title: '← Back to Directory', + href: 'https://thealtstack.com', + type: 'page', + }, + contact: { + title: 'Contact Us', + display: 'hidden' + }, +} + +export default meta diff --git a/docs/app/concepts/_meta.ts b/docs/app/concepts/_meta.ts new file mode 100644 index 0000000..33cd761 --- /dev/null +++ b/docs/app/concepts/_meta.ts @@ -0,0 +1,33 @@ +import type { MetaRecord } from 'nextra' + +const meta: MetaRecord = { + 'docker-basics': { + title: 'Docker in 10 Minutes', + }, + networking: { + title: 'Networking for Self-Hosters', + }, + 'reverse-proxies': { + title: 'Reverse Proxies Explained', + }, + 'ssl-tls': { + title: 'SSL/TLS for Self-Hosters', + }, + 'env-secrets': { + title: 'Environment Variables & Secrets', + }, + monitoring: { + title: 'Monitoring & Observability', + }, + updates: { + title: 'Updating & Maintaining Containers', + }, + backups: { + title: 'Backups That Actually Work', + }, + hardware: { + title: 'Hardware & VPS Sizing', + }, +} + +export default meta diff --git a/docs/app/concepts/backups/page.mdx b/docs/app/concepts/backups/page.mdx new file mode 100644 index 0000000..3cd6d39 --- /dev/null +++ b/docs/app/concepts/backups/page.mdx @@ -0,0 +1,103 @@ +--- +title: Backups That Actually Work +description: "How to back up your self-hosted tools. Docker volumes, database dumps, and automated backup scripts that run while you sleep." +--- + +# Backups That Actually Work + +Self-hosting means *you're* responsible for your data. No "Contact Support to restore from backup." **You are the support.** + +The good news: backing up Docker-based tools is simple once you set up a system. + +## What to Back Up + +| Component | Where It Lives | How to Back Up | +|---|---|---| +| **Docker volumes** | `/var/lib/docker/volumes/` | Volume export or rsync | +| **Databases (Postgres)** | Inside a Docker container | `pg_dump` | +| **Config files** | Your `docker-compose.yml` and `.env` | Git or file copy | + +> ⚠️ **Heads Up:** `docker-compose.yml` files are easy to recreate. Database data is not. Prioritize database backups above everything else. + +## Method 1: Database Dumps (Essential) + +Most self-hosted tools use PostgreSQL. Here's how to dump it: + +```bash +# Dump a Postgres database running in a container +docker exec your-db-container \ + pg_dump -U postgres your_database > backup_$(date +%Y%m%d).sql +``` + +To restore: + +```bash +cat backup_20260218.sql | docker exec -i your-db-container \ + psql -U postgres your_database +``` + +## Method 2: Volume Backup + +For tools that store data in Docker volumes: + +```bash +# Find your volumes +docker volume ls + +# Backup a volume to a tar file +docker run --rm \ + -v my_volume:/data \ + -v $(pwd)/backups:/backup \ + alpine tar czf /backup/my_volume_backup.tar.gz /data +``` + +## Method 3: Automated Script + +Create a backup script that runs daily via cron: + +```bash +#!/bin/bash +# /opt/backup.sh + +BACKUP_DIR="/opt/backups" +DATE=$(date +%Y%m%d_%H%M) +mkdir -p $BACKUP_DIR + +# Dump Postgres databases +docker exec supabase-db pg_dump -U postgres postgres > $BACKUP_DIR/supabase_$DATE.sql +docker exec plausible_db pg_dump -U postgres plausible_db > $BACKUP_DIR/plausible_$DATE.sql + +# Clean backups older than 7 days +find $BACKUP_DIR -name "*.sql" -mtime +7 -delete + +echo "Backup complete: $DATE" +``` + +Add to cron: + +```bash +# Run at 3 AM every day +crontab -e +# Add this line: +0 3 * * * /opt/backup.sh >> /var/log/backup.log 2>&1 +``` + +## The 3-2-1 Rule + +For serious setups, follow the **3-2-1 backup rule**: + +- **3** copies of your data +- **2** different storage types (local + remote) +- **1** offsite copy (rsync to another server, or upload to B2/S3) + +```bash +# Sync backups to a remote server +rsync -avz /opt/backups/ user@backup-server:/backups/ +``` + +## Next Steps + +You now have the four foundational concepts: Docker, reverse proxies, SSL, and backups. Time to build: + +→ [Deploy Guides](/deploy) — 65+ tools ready to deploy +→ [The Bootstrapper Stack](/stacks/bootstrapper) — A complete SaaS toolkit diff --git a/docs/app/concepts/docker-basics/page.mdx b/docs/app/concepts/docker-basics/page.mdx new file mode 100644 index 0000000..c1343d2 --- /dev/null +++ b/docs/app/concepts/docker-basics/page.mdx @@ -0,0 +1,127 @@ +--- +title: Understanding Docker in 10 Minutes +description: "Docker explained for self-hosters. No CS degree required. Containers, images, volumes, and Docker Compose — the only concepts you actually need." +--- + +# Understanding Docker in 10 Minutes + +Docker is the reason self-hosting went from "sysadmin hobby" to "anyone can do it." It packages software into neat, isolated containers that run the same everywhere. + +You don't need to become a Docker expert. You need to understand **four concepts**. + +## Concept 1: Images + +An **image** is a snapshot of software — pre-built, pre-configured, ready to run. Think of it like an `.iso` file, but for apps. + +```bash +# Download the Plausible Analytics image +docker pull plausible/analytics:latest +``` + +Images live on [Docker Hub](https://hub.docker.com) — a public registry of 100,000+ images. When our deploy guides say `image: plausible/analytics:latest`, they're pulling from here. + +## Concept 2: Containers + +A **container** is a running instance of an image. Image = blueprint. Container = the actual building. + +```bash +# Start a container from an image +docker run -d --name my-plausible plausible/analytics:latest + +# See running containers +docker ps + +# Stop a container +docker stop my-plausible + +# Remove a container (data in volumes is safe) +docker rm my-plausible +``` + +> 💡 **Why?** Containers are isolated from each other and from your host system. Breaking one container doesn't break anything else. + +## Concept 3: Volumes + +**Volumes** store your data *outside* the container. This is critical because containers are disposable — when you update an image, you destroy the old container and create a new one. Volumes survive this process. + +```bash +# Mount a volume called "plausible-data" +docker run -v plausible-data:/var/lib/clickhouse plausible/analytics +``` + +Without volumes, your data dies when the container dies. **Always use volumes.** + +```bash +# List all volumes +docker volume ls + +# Backup a volume (copy to local tar) +docker run --rm -v plausible-data:/data -v $(pwd):/backup alpine \ + tar czf /backup/plausible-backup.tar.gz /data +``` + +## Concept 4: Docker Compose + +This is the big one. **Docker Compose** lets you define multi-container setups in a single YAML file. Most real-world tools need multiple containers (app + database + cache), and Docker Compose handles that. + +```yaml +# docker-compose.yml +version: '3.8' + +services: + app: + image: plausible/analytics:latest + ports: + - "8000:8000" + depends_on: + - db + + db: + image: postgres:14-alpine + volumes: + - db_data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: supersecret + +volumes: + db_data: +``` + +Then run it: + +```bash +# Start everything +docker compose up -d + +# See logs +docker compose logs -f + +# Stop everything +docker compose down + +# Update to latest images +docker compose pull && docker compose up -d +``` + +That's the pattern for **every single deploy guide** in these docs: +1. Copy the `docker-compose.yml` +2. Tweak the environment variables +3. Run `docker compose up -d` +4. Done. + +## The 5 Commands You'll Actually Use + +| Command | What it does | +|---|---| +| `docker compose up -d` | Start all services in the background | +| `docker compose down` | Stop all services | +| `docker compose logs -f` | Watch live logs (Ctrl+C to exit) | +| `docker compose pull` | Download latest images | +| `docker ps` | List running containers | + +That's it. That's Docker for self-hosters. + +## Next Steps + +→ [Reverse Proxies Explained](/concepts/reverse-proxies) — How to access your tools via `app.yourdomain.com` +→ [Your First Deployment](/quick-start/first-deployment) — Put this knowledge to use diff --git a/docs/app/concepts/env-secrets/page.mdx b/docs/app/concepts/env-secrets/page.mdx new file mode 100644 index 0000000..9009be3 --- /dev/null +++ b/docs/app/concepts/env-secrets/page.mdx @@ -0,0 +1,153 @@ +--- +title: "Environment Variables & Secrets" +description: "How to manage .env files, Docker secrets, and sensitive configuration for self-hosted tools. Stop hardcoding passwords." +--- + +# Environment Variables & Secrets + +Every self-hosted tool needs configuration: database passwords, API keys, admin emails. The **wrong** way is hardcoding them in `docker-compose.yml`. The **right** way is environment variables. + +## The Basics: `.env` Files + +Docker Compose automatically reads a `.env` file in the same directory as your `docker-compose.yml`: + +```bash +# .env +POSTGRES_PASSWORD=super_secret_password_123 +ADMIN_EMAIL=you@yourdomain.com +SECRET_KEY=a1b2c3d4e5f6g7h8i9j0 +``` + +```yaml +# docker-compose.yml +services: + db: + image: postgres:16 + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} +``` + +Docker Compose substitutes `${POSTGRES_PASSWORD}` with the value from `.env`. Your secrets stay out of your Compose file. + +> ⚠️ **Critical:** Add `.env` to your `.gitignore` immediately. Never commit secrets to Git. + +```bash +echo ".env" >> .gitignore +``` + +## Generating Strong Passwords + +Don't use `password123`. Generate proper secrets: + +```bash +# Generate a 32-character random string +openssl rand -base64 32 + +# Generate a hex string (great for SECRET_KEY) +openssl rand -hex 32 + +# Generate a URL-safe string +python3 -c "import secrets; print(secrets.token_urlsafe(32))" +``` + +### Template for Common Tools + +Most self-hosted tools need similar variables. Here's a reusable `.env` template: + +```bash +# .env template — generate all values before first run + +# Database +POSTGRES_USER=app +POSTGRES_PASSWORD= # openssl rand -base64 32 +POSTGRES_DB=app_db + +# App +SECRET_KEY= # openssl rand -hex 32 +ADMIN_EMAIL=you@yourdomain.com +ADMIN_PASSWORD= # openssl rand -base64 24 +BASE_URL=https://app.yourdomain.com + +# SMTP (for email notifications) +SMTP_HOST=smtp.gmail.com +SMTP_PORT=587 +SMTP_USER=you@gmail.com +SMTP_PASSWORD= # Use app-specific password +``` + +## Default Values (Fallbacks) + +Use the `:-` syntax for non-sensitive defaults: + +```yaml +environment: + NODE_ENV: ${NODE_ENV:-production} # Defaults to "production" + LOG_LEVEL: ${LOG_LEVEL:-info} # Defaults to "info" + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} # No default — MUST be set +``` + +## Docker Secrets (Advanced) + +For production setups, Docker Secrets are more secure than environment variables — they're stored encrypted and mounted as files: + +```yaml +services: + db: + image: postgres:16 + environment: + POSTGRES_PASSWORD_FILE: /run/secrets/db_password + secrets: + - db_password + +secrets: + db_password: + file: ./secrets/db_password.txt +``` + +```bash +# Create the secret file +mkdir -p secrets +openssl rand -base64 32 > secrets/db_password.txt +chmod 600 secrets/db_password.txt +``` + +> 💡 Not all images support `_FILE` suffix variables. Check the image's documentation on Docker Hub. + +## Multiple Environments + +Keep separate `.env` files for different environments: + +```bash +.env # Production (default) +.env.local # Local development +.env.staging # Staging server +``` + +Use them explicitly: + +```bash +# Use a specific env file +docker compose --env-file .env.staging up -d +``` + +## Security Checklist + +- [ ] `.env` is in `.gitignore` +- [ ] No secrets are hardcoded in `docker-compose.yml` +- [ ] All passwords are randomly generated (32+ characters) +- [ ] Database ports are NOT exposed to the internet +- [ ] Secret files have `chmod 600` permissions +- [ ] Default passwords from docs have been changed + +## Common Mistakes + +**"Variable is empty in the container"** → Check for typos. Variable names are case-sensitive. `POSTGRES_password` ≠ `POSTGRES_PASSWORD`. + +**"Changes to .env aren't applying"** → You need to recreate the container: `docker compose up -d --force-recreate`. + +**"I committed my .env to Git"** → Even after removing it, it's in Git history. Rotate ALL secrets immediately and use `git filter-branch` or BFG Repo Cleaner. + +## Next Steps + +→ [Monitoring & Observability](/concepts/monitoring) — Know when things break +→ [Docker in 10 Minutes](/concepts/docker-basics) — Review the fundamentals diff --git a/docs/app/concepts/hardware/page.mdx b/docs/app/concepts/hardware/page.mdx new file mode 100644 index 0000000..a4b4282 --- /dev/null +++ b/docs/app/concepts/hardware/page.mdx @@ -0,0 +1,145 @@ +--- +title: "Hardware & VPS Sizing" +description: "How much RAM, CPU, and disk you actually need for self-hosting. VPS provider comparison and scaling strategies." +--- + +# Hardware & VPS Sizing + +The #1 question new self-hosters ask: **"What server do I need?"** + +Short answer: less than you think to start, more than you think once you're hooked. + +## Quick Sizing Guide + +### How Much RAM Do I Need? + +| Setup | RAM | What You Can Run | +|---|---|---| +| **Starter** | 2 GB | 1–2 lightweight tools (Uptime Kuma, Plausible) | +| **Hobbyist** | 4 GB | 3–5 tools + a database + reverse proxy | +| **Power User** | 8 GB | 8–12 tools + multiple databases | +| **Homelab** | 16 GB | Everything + AI models (small ones) | +| **AI Workloads** | 32+ GB | LLMs, image generation, video AI | + +> 💡 **Start with 4 GB.** You can always upgrade. Most VPS providers let you resize without downtime. + +### CPU Guidelines + +| Workload | vCPUs Needed | +|---|---| +| Static tools (Uptime Kuma, PocketBase) | 1 vCPU | +| Web apps (Plausible, Outline, n8n) | 2 vCPUs | +| Heavy apps (PostHog, Supabase, Metabase) | 4 vCPUs | +| AI inference (Ollama, Stable Diffusion) | 4+ vCPUs + GPU | + +### Disk Space + +| Component | Typical Usage | +|---|---| +| Base OS + Docker | 5–8 GB | +| Each Docker image | 100 MB – 2 GB | +| PostgreSQL database (small app) | 500 MB – 5 GB | +| Log files (unmanaged) | 1–10 GB | +| AI models (per model) | 4–70 GB | + +**Minimum recommended:** 50 GB SSD. +**Comfortable:** 80–160 GB SSD. +**AI workloads:** 200+ GB NVMe. + +## VPS Provider Comparison + +| Provider | Starting At | Pros | Best For | +|---|---|---|---| +| [**DigitalOcean**](https://m.do.co/c/2ed27757a361) | $6/mo (1 GB) | Simple UI, great docs, predictable pricing | Beginners | +| **Hetzner** | €3.79/mo (2 GB) | Best price-to-performance in EU | Power users, EU hosting | +| **Contabo** | €5.99/mo (4 GB) | Cheapest for RAM-heavy setups | Budget homelab | +| **Linode (Akamai)** | $5/mo (1 GB) | Reliable, good network | Small projects | +| **Vultr** | $5/mo (1 GB) | Global locations, hourly billing | Testing and experimentation | +| **Oracle Cloud** | Free (4 vCPUs, 24 GB ARM) | Unbeatable free tier | Zero-budget hosting | +| **Home Server** | One-time cost | Full control, unlimited bandwidth | Privacy maximalists | + +> 🏆 **Our Pick:** [DigitalOcean](https://m.do.co/c/2ed27757a361) for beginners (simple, reliable, [$200 free credit](https://m.do.co/c/2ed27757a361)). **Hetzner** for best value. **Oracle Cloud free tier** if you want to pay nothing. + +## Real-World Stack Sizing + +Here's what actual AltStack setups typically need: + +### The Bootstrapper Stack (4 GB RAM) +- Coolify (deployment platform) +- Plausible (analytics) +- Uptime Kuma (monitoring) +- Listmonk (newsletters) +- Caddy (reverse proxy) + +### The Privacy Stack (4 GB RAM) +- Vaultwarden (passwords) +- Jitsi Meet (video calls) +- Mattermost (messaging) +- Caddy (reverse proxy) + +### The AI Stack (16–32 GB RAM) +- Ollama (LLM inference) +- Stable Diffusion (image generation) +- TabbyML (code completion) +- Continue.dev (AI coding) + +## Scaling Strategies + +### Vertical Scaling (Bigger Server) + +The simplest approach. Just resize your VPS: + +- **DigitalOcean:** Resize droplet (takes ~1 minute) +- **Hetzner:** Rescale server (may require reboot) +- **Home server:** Add RAM sticks + +### Horizontal Scaling (More Servers) + +When one server isn't enough: + +``` +Server 1: Databases (Postgres, Redis) +Server 2: Application containers +Server 3: AI workloads (GPU) +``` + +Connect them with a private network (most VPS providers offer this for free) or a VPN like WireGuard. + +### The "Start Small" Strategy + +1. **Month 1:** $6/mo droplet (1 GB) — Deploy 1–2 tools +2. **Month 3:** Resize to $12/mo (2 GB) — Add more tools +3. **Month 6:** Resize to $24/mo (4 GB) — Running your full stack +4. **Month 12+:** Add a second server or move to Hetzner for better value + +## Monitoring Your Resources + +Always know how much headroom you have: + +```bash +# Quick resource check +free -h # RAM usage +df -h # Disk usage +nproc # CPU cores +uptime # Load average + +# Docker resource usage +docker stats # Live container metrics +docker system df # Docker disk usage +``` + +## Red Flags + +🚩 **RAM constantly above 90%** → Resize or move a service to another server. + +🚩 **Disk above 80%** → Clean Docker images (`docker system prune -f`) or resize disk. + +🚩 **CPU at 100% for extended periods** → Check which container is the culprit with `docker stats`. + +🚩 **Swap usage above 1 GB** → You need more RAM. Swap is a band-aid, not a solution. + +## Next Steps + +→ [Quick Start](/quick-start) — Deploy your first tool +→ [Deploy Guides](/deploy) — Browse 65+ tools +→ [Docker in 10 Minutes](/concepts/docker-basics) — Foundation knowledge diff --git a/docs/app/concepts/monitoring/page.mdx b/docs/app/concepts/monitoring/page.mdx new file mode 100644 index 0000000..4546167 --- /dev/null +++ b/docs/app/concepts/monitoring/page.mdx @@ -0,0 +1,163 @@ +--- +title: "Monitoring & Observability" +description: "Know when things break before your users do. Uptime monitoring, disk alerts, log aggregation, and observability for self-hosters." +--- + +# Monitoring & Observability + +You deployed 5 tools. They're running great. You go to bed. At 3 AM, the disk fills up, Postgres crashes, and everything dies. You find out at 9 AM when a user emails you. + +**Monitoring prevents this.** + +## The Three Layers + +| Layer | What It Watches | Tool | +|---|---|---| +| **Uptime** | "Is the service responding?" | Uptime Kuma | +| **System** | CPU, RAM, disk, network | Node Exporter + Grafana | +| **Logs** | What's actually happening inside | Docker logs, Dozzle, SigNoz | + +You need **at least** the first layer. The other two are for when you get serious. + +## Layer 1: Uptime Monitoring (Essential) + +[Uptime Kuma](/deploy/uptime-kuma) is the single best tool for self-hosters. Deploy it first, always. + +```yaml +# docker-compose.yml +services: + uptime-kuma: + image: louislam/uptime-kuma:1 + container_name: uptime-kuma + restart: unless-stopped + ports: + - "3001:3001" + volumes: + - uptime_data:/app/data + +volumes: + uptime_data: +``` + +### What to Monitor + +Add a monitor for **every** service you run: + +| Type | Target | Check Interval | +|---|---|---| +| HTTP(s) | `https://plausible.yourdomain.com` | 60s | +| HTTP(s) | `https://uptime.yourdomain.com` | 60s | +| TCP Port | `localhost:5432` (Postgres) | 120s | +| Docker Container | Container name | 60s | +| DNS | `yourdomain.com` | 300s | + +### Notifications + +Uptime Kuma supports 90+ notification channels. Set up **at least two**: + +- **Email** — For non-urgent alerts +- **Telegram/Discord/Slack** — For instant mobile alerts + +> 🔥 **Pro Tip:** Monitor your monitoring. Set up an external free ping service (like [UptimeRobot](https://uptimerobot.com)) to watch your Uptime Kuma instance. + +## Layer 2: System Metrics + +### Quick Disk Alert Script + +The #1 cause of self-hosting outages is **running out of disk space**. This script sends an alert when disk usage exceeds 80%: + +```bash +#!/bin/bash +# /opt/scripts/disk-alert.sh + +THRESHOLD=80 +USAGE=$(df / | tail -1 | awk '{print $5}' | sed 's/%//') + +if [ "$USAGE" -gt "$THRESHOLD" ]; then + echo "⚠️ Disk usage is at ${USAGE}% on $(hostname)" | \ + mail -s "Disk Alert: ${USAGE}%" you@yourdomain.com +fi +``` + +Add to cron: + +```bash +# Check every hour +0 * * * * /opt/scripts/disk-alert.sh +``` + +### What to Watch + +| Metric | Warning Threshold | Critical Threshold | +|---|---|---| +| Disk usage | 70% | 85% | +| RAM usage | 80% | 95% | +| CPU sustained | 80% for 5 min | 95% for 5 min | +| Container restarts | 3 in 1 hour | 10 in 1 hour | + +### Docker Resource Monitoring + +Quick commands to check what's eating your resources: + +```bash +# Live resource usage per container +docker stats + +# Show container sizes (disk) +docker system df -v + +# Find large volumes +du -sh /var/lib/docker/volumes/*/ +``` + +## Layer 3: Log Aggregation + +Docker captures all stdout/stderr from your containers. Use it: + +```bash +# Live logs for a service +docker compose logs -f plausible + +# Last 100 lines +docker compose logs --tail=100 plausible + +# Logs since a specific time +docker compose logs --since="2h" plausible +``` + +### Dozzle (Docker Log Viewer) + +For a beautiful web-based log viewer: + +```yaml +services: + dozzle: + image: amir20/dozzle:latest + container_name: dozzle + ports: + - "8080:8080" + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro +``` + +### For Serious Setups: SigNoz + +If you need traces, metrics, **and** logs in one place, deploy [SigNoz](/deploy/signoz). It's an open-source Datadog alternative built on OpenTelemetry. + +## Maintenance Routine + +Set a weekly calendar reminder: + +``` +☐ Check Uptime Kuma — all green? +☐ Run `docker stats` — anything hogging resources? +☐ Run `df -h` — disk space OK? +☐ Run `docker system prune -f` — clean unused images +☐ Check logs for any errors — `docker compose logs --since=168h | grep -i error` +``` + +## Next Steps + +→ [Updating & Maintaining Containers](/concepts/updates) — Keep your tools up to date safely +→ [Backups That Actually Work](/concepts/backups) — Protect your data +→ [Deploy Uptime Kuma](/deploy/uptime-kuma) — Set up monitoring now diff --git a/docs/app/concepts/networking/page.mdx b/docs/app/concepts/networking/page.mdx new file mode 100644 index 0000000..26f0539 --- /dev/null +++ b/docs/app/concepts/networking/page.mdx @@ -0,0 +1,160 @@ +--- +title: "Networking for Self-Hosters" +description: "Ports, DNS, firewalls, and private networks — the networking basics every self-hoster needs to know." +--- + +# Networking for Self-Hosters + +You deployed a tool. It works on `localhost:3000`. You try to access it from your phone. Nothing. Welcome to networking. + +This guide covers the **four things** standing between your server and the outside world. + +## 1. Ports + +Every network service listens on a **port** — a numbered door on your server. Some well-known ones: + +| Port | Service | +|---|---| +| `22` | SSH | +| `80` | HTTP | +| `443` | HTTPS | +| `5432` | PostgreSQL | +| `3000–9000` | Where most self-hosted tools live | + +When Docker maps `-p 8080:3000`, it's saying: "When traffic hits port 8080 on the host, send it to port 3000 inside the container." + +```yaml +# In docker-compose.yml +ports: + - "8080:3000" # host:container +``` + +> ⚠️ **Never expose database ports** (5432, 3306, 27017) to the internet. Keep them internal to Docker networks. + +## 2. DNS (Domain Name System) + +DNS translates human-readable names to IP addresses: + +``` +plausible.yourdomain.com → 203.0.113.42 +``` + +### Setting Up DNS Records + +In your domain registrar (Cloudflare, Namecheap, etc.): + +| Type | Name | Value | What it does | +|---|---|---|---| +| **A** | `@` | `203.0.113.42` | Points root domain to your server | +| **A** | `plausible` | `203.0.113.42` | Points subdomain to your server | +| **CNAME** | `www` | `yourdomain.com` | Aliases `www` to root | +| **A** | `*` | `203.0.113.42` | Wildcard — catch-all for any subdomain | + +> 💡 **Pro Tip:** A wildcard `*` A record + Caddy reverse proxy = unlimited subdomains with zero DNS management. Just add entries to your Caddyfile. + +### DNS Propagation + +After changing DNS records, it can take **5 minutes to 48 hours** to propagate globally. Use [dnschecker.org](https://dnschecker.org) to verify. + +## 3. Firewalls (UFW) + +A firewall controls which ports are open to the internet. On Ubuntu/Debian, use **UFW** (Uncomplicated Firewall): + +```bash +# Check current status +ufw status + +# Allow essential ports +ufw allow 22/tcp # SSH — DON'T lock yourself out +ufw allow 80/tcp # HTTP +ufw allow 443/tcp # HTTPS + +# Enable the firewall +ufw enable + +# Deny everything else by default +ufw default deny incoming +ufw default allow outgoing +``` + +### The Golden Rule + +Only open three ports to the internet: **22** (SSH), **80** (HTTP), **443** (HTTPS). + +Your reverse proxy (Caddy/Nginx) handles port 80/443 and routes traffic internally to your containers. Individual tool ports (3000, 8080, etc.) should **never** be exposed publicly. + +``` +Internet → Port 443 → Caddy → Internal Docker Network → Your Tools +``` + +### Common Mistakes + +**"I can't SSH into my server"** → You blocked port 22 before enabling UFW. Contact your hosting provider for console access. + +**"My tool works locally but not remotely"** → Port 80/443 isn't open. Run `ufw allow 80/tcp && ufw allow 443/tcp`. + +**"I opened port 8080 and got hacked"** → Never expose app ports directly. Use a reverse proxy instead. + +## 4. Docker Networks + +Docker creates isolated **networks** for your containers. By default, containers in the same `docker-compose.yml` can talk to each other by service name: + +```yaml +services: + app: + image: myapp:latest + depends_on: + - db # Can reach the database at "db:5432" + + db: + image: postgres:16 + # No "ports:" = not accessible from outside Docker +``` + +### When to Create Custom Networks + +If you need containers from **different** Compose files to communicate (e.g., a shared Caddy reverse proxy): + +```yaml +# In your Caddyfile's docker-compose.yml +networks: + proxy: + external: true + +# In your app's docker-compose.yml +networks: + default: + name: proxy + external: true +``` + +Create the shared network first: + +```bash +docker network create proxy +``` + +Now all containers on the `proxy` network can reach each other by service name — across different Compose files. + +## Quick Reference + +```bash +# See what's listening on which port +ss -tlnp + +# Test if a port is open from outside +nc -zv your-server-ip 443 + +# See Docker networks +docker network ls + +# Check DNS resolution +dig plausible.yourdomain.com +nslookup plausible.yourdomain.com +``` + +## Next Steps + +→ [Reverse Proxies Explained](/concepts/reverse-proxies) — Route traffic from domains to containers +→ [SSL/TLS for Self-Hosters](/concepts/ssl-tls) — Encrypt your traffic +→ [Environment Variables & Secrets](/concepts/env-secrets) — Secure your configuration diff --git a/docs/app/concepts/page.mdx b/docs/app/concepts/page.mdx new file mode 100644 index 0000000..e3484bb --- /dev/null +++ b/docs/app/concepts/page.mdx @@ -0,0 +1,56 @@ +--- +title: "Concepts" +description: "The foundational knowledge for self-hosting. Docker, networking, security, backups — explained like you're a human, not a sysadmin." +--- + +# Concepts + +Before you deploy anything, understand the building blocks. These guides cover the **why** and **how** behind self-hosting infrastructure — no fluff, no PhD required. + +> 📖 **Reading order matters.** Start from the top and work down. Each article builds on the one before it. + +--- + +## The Foundations + +These four are non-negotiable. Read them before your first deploy. + +| # | Guide | What You'll Learn | +|---|---|---| +| 1 | [Docker in 10 Minutes](/concepts/docker-basics) | Images, containers, volumes, Docker Compose — the only 4 concepts you need | +| 2 | [Networking for Self-Hosters](/concepts/networking) | Ports, DNS, firewalls, and why your tool isn't accessible from the internet | +| 3 | [Reverse Proxies Explained](/concepts/reverse-proxies) | Map `app.yourdomain.com` to your containers with Caddy | +| 4 | [SSL/TLS for Self-Hosters](/concepts/ssl-tls) | HTTPS, Let's Encrypt, and why it matters | + +--- + +## Running in Production + +Once your tools are deployed, keep them alive and healthy. + +| # | Guide | What You'll Learn | +|---|---|---| +| 5 | [Environment Variables & Secrets](/concepts/env-secrets) | `.env` files, Docker secrets, and never hardcoding passwords again | +| 6 | [Monitoring & Observability](/concepts/monitoring) | Know when things break before your users do | +| 7 | [Updating & Maintaining Containers](/concepts/updates) | Safe update workflows, rollbacks, and automating the boring parts | +| 8 | [Backups That Actually Work](/concepts/backups) | Database dumps, volume backups, and the 3-2-1 rule | + +--- + +## Planning & Scaling + +Before you buy a server (or a bigger one). + +| # | Guide | What You'll Learn | +|---|---|---| +| 9 | [Hardware & VPS Sizing](/concepts/hardware) | How much RAM/CPU you actually need, and which providers are worth it | + +--- + +## Ready to Deploy? + +You've got the knowledge. Now put it to work: + +→ [Deploy Guides](/deploy) — 65+ tools with Docker Compose configs +→ [Quick Start](/quick-start) — Your first deployment in 5 minutes +→ [Curated Stacks](/stacks) — Pre-built tool bundles for specific use cases diff --git a/docs/app/concepts/reverse-proxies/page.mdx b/docs/app/concepts/reverse-proxies/page.mdx new file mode 100644 index 0000000..9bfa495 --- /dev/null +++ b/docs/app/concepts/reverse-proxies/page.mdx @@ -0,0 +1,113 @@ +--- +title: Reverse Proxies Explained +description: "What a reverse proxy does and why you need one. Set up Caddy or Nginx to serve your self-hosted tools on proper domains with automatic HTTPS." +--- + +# Reverse Proxies Explained + +Right now your tools run on ports like `:3001`, `:8000`, `:8080`. That's fine for testing, but you don't want users visiting `http://your-ip:8000`. + +A **reverse proxy** maps clean domains to those ugly ports: + +``` +plausible.yourdomain.com → localhost:8000 +uptime.yourdomain.com → localhost:3001 +supabase.yourdomain.com → localhost:8443 +``` + +It also handles **HTTPS** (SSL certificates) automatically. + +## Which One to Use? + +| Proxy | Our Take | +|---|---| +| **Caddy** ✅ | **Use this.** Automatic HTTPS, zero-config SSL, human-readable config. Built for self-hosters. | +| **Nginx Proxy Manager** | GUI-first option. Great if you hate config files. Slightly more resource-heavy. | +| **Traefik** | Powerful but complex. Built for Kubernetes. Overkill for most self-hosting setups. | +| **Nginx (raw)** | The classic. Fine but verbose. No auto-SSL without certbot scripts. | + +> 🏆 **The Verdict:** Start with Caddy. Seriously. The config file is 6 lines. + +## Setting Up Caddy (Recommended) + +### Step 1: Deploy Caddy + +```yaml +# docker-compose.yml +version: '3.8' + +services: + caddy: + image: caddy:2-alpine + container_name: caddy + restart: unless-stopped + ports: + - "80:80" + - "443:443" + volumes: + - ./Caddyfile:/etc/caddy/Caddyfile + - caddy_data:/data + - caddy_config:/config + +volumes: + caddy_data: + caddy_config: +``` + +### Step 2: Configure Your Domains + +Create a `Caddyfile` in the same directory: + +``` +plausible.yourdomain.com { + reverse_proxy localhost:8000 +} + +uptime.yourdomain.com { + reverse_proxy localhost:3001 +} + +git.yourdomain.com { + reverse_proxy localhost:3000 +} +``` + +That's the entire config. Caddy automatically obtains and renews Let's Encrypt SSL certificates for every domain listed. + +### Step 3: Point DNS + +In your domain registrar (Cloudflare, Namecheap, etc.), add A records: + +| Type | Name | Value | +|---|---|---| +| A | `plausible` | `your-server-ip` | +| A | `uptime` | `your-server-ip` | +| A | `git` | `your-server-ip` | + +### Step 4: Start + +```bash +docker compose up -d +``` + +Within 60 seconds, Caddy will obtain SSL certificates and your tools will be live on proper HTTPS domains. + +## How It Works (Simplified) + +``` +User visits plausible.yourdomain.com + ↓ + DNS resolves to your server IP + ↓ + Caddy receives the request on port 443 + ↓ + Caddy reads Caddyfile: "plausible.yourdomain.com → localhost:8000" + ↓ + Caddy forwards the request to your Plausible container + ↓ + User sees Plausible dashboard over HTTPS 🔒 +``` + +→ [Setting Up a Reverse Proxy (Practical Guide)](/quick-start/reverse-proxy) — Get Nginx, Caddy, or Traefik running now +→ [SSL/TLS for Self-Hosters](/concepts/ssl-tls) — Deep dive into certificates and security +→ [Deploy Guides](/deploy) — All our guides include reverse proxy config diff --git a/docs/app/concepts/ssl-tls/page.mdx b/docs/app/concepts/ssl-tls/page.mdx new file mode 100644 index 0000000..9c801ea --- /dev/null +++ b/docs/app/concepts/ssl-tls/page.mdx @@ -0,0 +1,56 @@ +--- +title: "SSL/TLS for Self-Hosters" +description: "HTTPS for your self-hosted tools. How SSL works, why you need it, and how to set it up with Caddy or Let's Encrypt." +--- + +# SSL/TLS for Self-Hosters + +**SSL/TLS** is what makes the padlock appear in your browser. It encrypts traffic between your users and your server so nobody can snoop on it. + +Every self-hosted tool accessible from the internet **must** have HTTPS. No exceptions. + +## The Easy Way: Caddy (Automatic) + +If you followed our [reverse proxy guide](/concepts/reverse-proxies) and are using Caddy, **you already have SSL**. Caddy obtains and renews Let's Encrypt certificates automatically for every domain in your Caddyfile. + +No config needed. No cron jobs. No certbot. It just works. + +> 🔥 **Pro Tip:** This is the #1 reason we recommend Caddy over Nginx. + +## The Manual Way: Let's Encrypt + Certbot + +If you're using raw Nginx, you'll need certbot: + +```bash +# Install certbot +apt install certbot python3-certbot-nginx -y + +# Obtain a certificate +certbot --nginx -d plausible.yourdomain.com + +# Verify auto-renewal +certbot renew --dry-run +``` + +Certbot will modify your Nginx config automatically and set up a cron job for renewal. + +## SSL Checklist + +After setting up SSL, verify: + +- [ ] Site loads on `https://` (padlock visible) +- [ ] `http://` redirects to `https://` automatically +- [ ] Certificate is from Let's Encrypt (click padlock → "Certificate") +- [ ] No mixed-content warnings in browser console + +## Common Gotchas + +**"Certificate not found"** → Your DNS hasn't propagated yet. Wait 5–10 minutes and try again. + +**"Too many requests"** → Let's Encrypt rate-limits to 50 certificates/week per domain. If you're testing, use `--staging` flag first. + +**"Connection refused on port 443"** → Port 443 isn't open in your firewall. Run: `ufw allow 443/tcp` + +## Next Steps + +→ [Backups That Actually Work](/concepts/backups) — Protect the data you're securing with SSL diff --git a/docs/app/concepts/updates/page.mdx b/docs/app/concepts/updates/page.mdx new file mode 100644 index 0000000..3aec444 --- /dev/null +++ b/docs/app/concepts/updates/page.mdx @@ -0,0 +1,153 @@ +--- +title: "Updating & Maintaining Containers" +description: "How to safely update self-hosted tools running in Docker. Update workflows, rollbacks, and optional automation with Watchtower." +--- + +# Updating & Maintaining Containers + +Your tools need updates — security patches, bug fixes, new features. But updating a self-hosted tool isn't like clicking "Update" in an app store. You need a process. + +## The Safe Update Workflow + +Follow this **every time** you update a tool: + +```bash +# 1. Backup first (ALWAYS) +docker exec my-db pg_dump -U postgres mydb > backup_$(date +%Y%m%d).sql + +# 2. Pull the new image +docker compose pull + +# 3. Recreate containers with new image +docker compose up -d + +# 4. Check logs for errors +docker compose logs -f --tail=50 + +# 5. Verify the tool works +curl -I https://app.yourdomain.com +``` + +> ⚠️ **Golden Rule:** Never update without a backup. If something breaks, you can roll back in 60 seconds. + +## Rolling Back + +Something went wrong? Here's how to revert to the previous version: + +### Option 1: Pin to Previous Version + +```yaml +# docker-compose.yml — change the tag +services: + app: + image: plausible/analytics:v2.0.0 # Was :v2.1.0 +``` + +```bash +docker compose up -d +``` + +### Option 2: Restore From Backup + +```bash +# Stop the broken service +docker compose down + +# Restore the database backup +cat backup_20260218.sql | docker exec -i my-db psql -U postgres mydb + +# Start with the old image +docker compose up -d +``` + +## Image Tags: `latest` vs Pinned Versions + +| Approach | Pros | Cons | +|---|---|---| +| `image: app:latest` | Always gets newest | Can break unexpectedly | +| `image: app:v2.1.0` | Predictable, reproducible | Manual updates required | +| `image: app:2` | Gets patches within major version | Some risk of breaking changes | + +> 🏆 **Our Recommendation:** Use **major version tags** (`image: postgres:16`) for databases and **pinned versions** (`image: plausible/analytics:v2.1.0`) for applications. Avoid `latest` in production. + +## Automated Updates with Watchtower + +If you want hands-off updates (with some risk), **Watchtower** watches your containers and auto-updates them: + +```yaml +services: + watchtower: + image: containrrr/watchtower + container_name: watchtower + restart: unless-stopped + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + WATCHTOWER_CLEANUP: "true" + WATCHTOWER_SCHEDULE: "0 0 4 * * *" # 4 AM daily + WATCHTOWER_NOTIFICATIONS: "email" + command: --include-restarting +``` + +### Watchtower Caveats + +- It updates **all** containers by default. Use labels to control which ones: + +```yaml +services: + plausible: + image: plausible/analytics:latest + labels: + - "com.centurylinklabs.watchtower.enable=true" + + database: + image: postgres:16 + labels: + - "com.centurylinklabs.watchtower.enable=false" # NEVER auto-update databases +``` + +- It doesn't run migrations. Some tools need `docker exec app migrate` after updates. +- It can't roll back automatically. + +> ⚠️ **Never auto-update databases.** Postgres, MySQL, and Redis major version upgrades require manual migration steps. Always pin database images. + +## Cleanup: Reclaiming Disk Space + +Old images pile up. Docker doesn't clean them automatically: + +```bash +# See how much space Docker is using +docker system df + +# Remove unused images (safe) +docker image prune -f + +# Nuclear option: remove ALL unused data +docker system prune -a -f --volumes +# ⚠️ This deletes stopped containers, unused images, AND orphaned volumes +``` + +### Automate Cleanup + +Add to your crontab: + +```bash +# Weekly cleanup at 3 AM Sunday +0 3 * * 0 docker image prune -f >> /var/log/docker-cleanup.log 2>&1 +``` + +## Update Checklist + +Before updating any tool: + +- [ ] Database backed up +- [ ] Current version noted (in case of rollback) +- [ ] Changelog reviewed for breaking changes +- [ ] `.env` file backed up +- [ ] Update applied and logs checked +- [ ] Service verified working + +## Next Steps + +→ [Backups That Actually Work](/concepts/backups) — Make sure you can actually roll back +→ [Monitoring & Observability](/concepts/monitoring) — Catch failed updates automatically diff --git a/docs/app/contact/page.mdx b/docs/app/contact/page.mdx new file mode 100644 index 0000000..c3444ca --- /dev/null +++ b/docs/app/contact/page.mdx @@ -0,0 +1,9 @@ +import ContactForm from '../../components/ContactForm' + +# Contact Us + +Have a question regarding self-hosting, a suggestion for a new stack, or just want to say hello? We're here to help. + +Fill out the form below and we'll get back to you as soon as possible. + + diff --git a/docs/app/deploy/_meta.ts b/docs/app/deploy/_meta.ts new file mode 100644 index 0000000..6faf178 --- /dev/null +++ b/docs/app/deploy/_meta.ts @@ -0,0 +1,201 @@ +import type { MetaRecord } from 'nextra' + +const meta: MetaRecord = { + "activepieces": { + "title": "Activepieces" + }, + "affine": { + "title": "AFFiNE" + }, + "akaunting": { + "title": "Akaunting" + }, + "appflowy": { + "title": "AppFlowy" + }, + "appwrite": { + "title": "Appwrite" + }, + "authentik": { + "title": "Authentik" + }, + "bitwarden": { + "title": "Bitwarden" + }, + "calcom": { + "title": "Cal.com" + }, + "chaskiq": { + "title": "Chaskiq" + }, + "coder": { + "title": "Coder" + }, + "continue-dev": { + "title": "Continue" + }, + "coolify": { + "title": "Coolify" + }, + "deepseek": { + "title": "DeepSeek-V3 / R1" + }, + "documenso": { + "title": "Documenso" + }, + "dokku": { + "title": "Dokku" + }, + "erpnext": { + "title": "ERPNext" + }, + "flux": { + "title": "FLUX" + }, + "freecad": { + "title": "FreeCAD" + }, + "gemma": { + "title": "Google Gemma 2" + }, + "gimp": { + "title": "GIMP" + }, + "glitchtip": { + "title": "GlitchTip" + }, + "gpt4all": { + "title": "GPT4All" + }, + "hunyuan-video": { + "title": "HunyuanVideo 1.5" + }, + "jitsi-meet": { + "title": "Jitsi Meet" + }, + "jitsu": { + "title": "Jitsu" + }, + "kdenlive": { + "title": "Kdenlive" + }, + "keepassxc": { + "title": "KeePassXC" + }, + "keycloak": { + "title": "Keycloak" + }, + "krita": { + "title": "Krita" + }, + "librecad": { + "title": "LibreCAD" + }, + "listmonk": { + "title": "Listmonk" + }, + "llama": { + "title": "Meta Llama 3.1" + }, + "matomo": { + "title": "Matomo" + }, + "mattermost": { + "title": "Mattermost" + }, + "mautic": { + "title": "Mautic" + }, + "medusa": { + "title": "Medusa.js" + }, + "metabase": { + "title": "Metabase" + }, + "minio": { + "title": "MinIO" + }, + "mistral": { + "title": "Mistral Large 2" + }, + "mixpost": { + "title": "Mixpost" + }, + "mochi-1": { + "title": "Mochi-1" + }, + "n8n": { + "title": "n8n" + }, + "odoo": { + "title": "Odoo" + }, + "ollama": { + "title": "Ollama" + }, + "onlyoffice": { + "title": "ONLYOFFICE" + }, + "orangehrm": { + "title": "OrangeHRM" + }, + "outline": { + "title": "Outline" + }, + "penpot": { + "title": "Penpot" + }, + "plane": { + "title": "Plane" + }, + "plausible": { + "title": "Plausible" + }, + "pocketbase": { + "title": "PocketBase" + }, + "postal": { + "title": "Postal" + }, + "posthog": { + "title": "PostHog" + }, + "qwen": { + "title": "Qwen 2.5" + }, + "rocketchat": { + "title": "Rocket.Chat" + }, + "signoz": { + "title": "SigNoz" + }, + "stable-diffusion": { + "title": "Stable Diffusion 3.5" + }, + "supabase": { + "title": "Supabase" + }, + "superset": { + "title": "Apache Superset" + }, + "tabby": { + "title": "TabbyML" + }, + "taiga": { + "title": "Taiga" + }, + "twenty": { + "title": "Twenty" + }, + "uptime-kuma": { + "title": "Uptime Kuma" + }, + "vaultwarden": { + "title": "Vaultwarden" + }, + "zammad": { + "title": "Zammad" + } +} + +export default meta diff --git a/docs/app/deploy/activepieces/page.mdx b/docs/app/deploy/activepieces/page.mdx new file mode 100644 index 0000000..08da280 --- /dev/null +++ b/docs/app/deploy/activepieces/page.mdx @@ -0,0 +1,158 @@ +--- +title: "Deploy Activepieces Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Activepieces with Docker Compose. " +--- + +# Deploy Activepieces + +Open source alternative to Zapier. Automate your work with 200+ apps. + +
+ ⭐ 11.0k stars + 📜 MIT + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Activepieces instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Activepieces and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + activepieces: + image: activepieces/activepieces:latest + container_name: activepieces + restart: unless-stopped + depends_on: + - db + - redis + ports: + - "8080:80" + environment: + - AP_FRONTEND_URL=http://localhost:8080 + - AP_POSTGRES_DATABASE=activepieces + - AP_POSTGRES_HOST=db + - AP_POSTGRES_PORT=5432 + - AP_POSTGRES_USERNAME=activepieces + - AP_POSTGRES_PASSWORD=activepieces + - AP_REDIS_HOST=redis + - AP_REDIS_PORT=6379 + + db: + image: postgres:14-alpine + container_name: activepieces-db + restart: unless-stopped + environment: + - POSTGRES_USER=activepieces + - POSTGRES_PASSWORD=activepieces + - POSTGRES_DB=activepieces + volumes: + - activepieces_db_data:/var/lib/postgresql/data + + redis: + image: redis:alpine + container_name: activepieces-redis + restart: unless-stopped + +volumes: + activepieces_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/activepieces && cd /opt/activepieces + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `AP_FRONTEND_URL` | `http://localhost:8080` | No | +| `AP_POSTGRES_DATABASE` | `activepieces` | No | +| `AP_POSTGRES_HOST` | `db` | No | +| `AP_POSTGRES_PORT` | `5432` | No | +| `AP_POSTGRES_USERNAME` | `activepieces` | No | +| `AP_POSTGRES_PASSWORD` | `activepieces` | No | +| `AP_REDIS_HOST` | `redis` | No | +| `AP_REDIS_PORT` | `6379` | No | +| `POSTGRES_USER` | `activepieces` | No | +| `POSTGRES_PASSWORD` | `activepieces` | No | +| `POSTGRES_DB` | `activepieces` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs activepieces | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Activepieces on AltStack Directory](https://thealtstack.com/alternative-to/activepieces) +- [Activepieces Self-Hosted Guide](https://thealtstack.com/self-hosted/activepieces) +- [Official Documentation](https://www.activepieces.com) +- [GitHub Repository](https://github.com/activepieces/activepieces) diff --git a/docs/app/deploy/affine/page.mdx b/docs/app/deploy/affine/page.mdx new file mode 100644 index 0000000..d932eca --- /dev/null +++ b/docs/app/deploy/affine/page.mdx @@ -0,0 +1,171 @@ +--- +title: "Deploy AFFiNE Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting AFFiNE with Docker Compose. " +--- + +# Deploy AFFiNE + +There can be more than Notion and Miro. AFFiNE(pronounced [ə‘fain]) is a next-gen knowledge base that brings planning, sorting and creating all together. Privacy first, open-source, customizable and ready to use. + +
+ ⭐ 62.7k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working AFFiNE instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for AFFiNE and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for AFFiNE +version: '3.8' + +services: + affine: + image: ghcr.io/toeverything/affine-graphql:latest # Using official as fallback but custom build setup exists in Dockerfile + container_name: affine + ports: + - "3000:3000" + environment: + - DATABASE_URL=postgres://affine:affine@db:5432/affine + - REDIS_URL=redis://redis:6379 + - NODE_ENV=production + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + networks: + - affine_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:3000/" ] + interval: 30s + timeout: 10s + retries: 3 + + db: + image: postgres:15-alpine + container_name: affine-db + environment: + POSTGRES_USER: affine + POSTGRES_PASSWORD: affine + POSTGRES_DB: affine + volumes: + - affine_db_data:/var/lib/postgresql/data + networks: + - affine_net + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U affine" ] + interval: 5s + timeout: 5s + retries: 5 + + redis: + image: redis:7-alpine + container_name: affine-redis + networks: + - affine_net + healthcheck: + test: [ "CMD", "redis-cli", "ping" ] + interval: 5s + timeout: 5s + retries: 5 + +networks: + affine_net: + driver: bridge + +volumes: + affine_db_data: + name: affine_db_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/affine && cd /opt/affine + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `DATABASE_URL` | `postgres://affine:affine@db:5432/affine` | No | +| `REDIS_URL` | `redis://redis:6379` | No | +| `NODE_ENV` | `production` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs affine | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [AFFiNE on AltStack Directory](https://thealtstack.com/alternative-to/affine) +- [AFFiNE Self-Hosted Guide](https://thealtstack.com/self-hosted/affine) +- [Official Documentation](https://affine.pro) +- [GitHub Repository](https://github.com/toeverything/AFFiNE) diff --git a/docs/app/deploy/akaunting/page.mdx b/docs/app/deploy/akaunting/page.mdx new file mode 100644 index 0000000..fbf61ee --- /dev/null +++ b/docs/app/deploy/akaunting/page.mdx @@ -0,0 +1,146 @@ +--- +title: "Deploy Akaunting Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Akaunting with Docker Compose. " +--- + +# Deploy Akaunting + +Free and open source online accounting software for small businesses and freelancers. + +
+ ⭐ 12.0k stars + 📜 GPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Akaunting instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Akaunting and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + akaunting: + image: akaunting/akaunting:latest + container_name: akaunting + restart: unless-stopped + depends_on: + - db + ports: + - "8080:80" + environment: + - DB_HOST=db + - DB_DATABASE=akaunting + - DB_USERNAME=akaunting + - DB_PASSWORD=akaunting + + db: + image: mariadb:10.6 + container_name: akaunting-db + restart: unless-stopped + environment: + - MYSQL_DATABASE=akaunting + - MYSQL_USER=akaunting + - MYSQL_PASSWORD=akaunting + - MYSQL_ROOT_PASSWORD=root + volumes: + - akaunting_db_data:/var/lib/mysql + +volumes: + akaunting_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/akaunting && cd /opt/akaunting + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `DB_HOST` | `db` | No | +| `DB_DATABASE` | `akaunting` | No | +| `DB_USERNAME` | `akaunting` | No | +| `DB_PASSWORD` | `akaunting` | No | +| `MYSQL_DATABASE` | `akaunting` | No | +| `MYSQL_USER` | `akaunting` | No | +| `MYSQL_PASSWORD` | `akaunting` | No | +| `MYSQL_ROOT_PASSWORD` | `root` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs akaunting | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Akaunting on AltStack Directory](https://thealtstack.com/alternative-to/akaunting) +- [Akaunting Self-Hosted Guide](https://thealtstack.com/self-hosted/akaunting) +- [Official Documentation](https://akaunting.com) +- [GitHub Repository](https://github.com/akaunting/akaunting) diff --git a/docs/app/deploy/appflowy/page.mdx b/docs/app/deploy/appflowy/page.mdx new file mode 100644 index 0000000..6169147 --- /dev/null +++ b/docs/app/deploy/appflowy/page.mdx @@ -0,0 +1,171 @@ +--- +title: "Deploy AppFlowy Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting AppFlowy with Docker Compose. " +--- + +# Deploy AppFlowy + +Bring projects, wikis, and teams together with AI. AppFlowy is the AI collaborative workspace where you achieve more without losing control of your data. The leading open source Notion alternative. + +
+ ⭐ 68.0k stars + 📜 GNU Affero General Public License v3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working AppFlowy instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for AppFlowy and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for AppFlowy Cloud +version: '3.8' + +services: + appflowy: + build: + context: . + dockerfile: Dockerfile + container_name: appflowy-cloud + ports: + - "8080:8080" + environment: + - DATABASE_URL=postgres://postgres:${POSTGRES_PASSWORD:-password}@db:5432/appflowy + - REDIS_URL=redis://redis:6379 + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + networks: + - appflowy_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:8080/health" ] + interval: 10s + timeout: 5s + retries: 5 + + db: + image: postgres:15-alpine + container_name: appflowy-db + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password} + POSTGRES_DB: appflowy + volumes: + - appflowy_db_data:/var/lib/postgresql/data + networks: + - appflowy_net + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U postgres" ] + interval: 5s + timeout: 5s + retries: 5 + + redis: + image: redis:7-alpine + container_name: appflowy-redis + networks: + - appflowy_net + healthcheck: + test: [ "CMD", "redis-cli", "ping" ] + interval: 5s + timeout: 5s + retries: 5 + +networks: + appflowy_net: + driver: bridge + +volumes: + appflowy_db_data: + name: appflowy_db_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/appflowy && cd /opt/appflowy + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `DATABASE_URL` | `postgres://postgres:${POSTGRES_PASSWORD:-password}@db:5432/appflowy` | No | +| `REDIS_URL` | `redis://redis:6379` | No | +| `POSTGRES_PASSWORD` | `password` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs appflowy | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [AppFlowy on AltStack Directory](https://thealtstack.com/alternative-to/appflowy) +- [AppFlowy Self-Hosted Guide](https://thealtstack.com/self-hosted/appflowy) +- [Official Documentation](https://www.appflowy.io) +- [GitHub Repository](https://github.com/AppFlowy-IO/AppFlowy) diff --git a/docs/app/deploy/appwrite/page.mdx b/docs/app/deploy/appwrite/page.mdx new file mode 100644 index 0000000..2483ea6 --- /dev/null +++ b/docs/app/deploy/appwrite/page.mdx @@ -0,0 +1,181 @@ +--- +title: "Deploy Appwrite Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Appwrite with Docker Compose. " +--- + +# Deploy Appwrite + +Appwrite® - complete cloud infrastructure for your web, mobile and AI apps. Including Auth, Databases, Storage, Functions, Messaging, Hosting, Realtime and more + +
+ ⭐ 54.7k stars + 📜 BSD 3-Clause "New" or "Revised" License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Appwrite instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Appwrite and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for Appwrite +# Note: Appwrite is a complex multi-service system. +# This is a production-ready configuration for the core services. +version: '3.8' + +services: + appwrite: + image: appwrite/appwrite:1.5.4 + container_name: appwrite + ports: + - "80:80" + - "443:443" + environment: + - _APP_ENV=production + - _APP_DB_HOST=db + - _APP_DB_USER=appwrite + - _APP_DB_PASS=${DB_PASSWORD:-password} + - _APP_REDIS_HOST=redis + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + networks: + - appwrite_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost/v1/health" ] + interval: 30s + timeout: 10s + retries: 3 + + db: + image: mariadb:10.11 # Appwrite uses MariaDB by default + container_name: appwrite-db + environment: + MARIADB_USER: appwrite + MARIADB_PASSWORD: ${DB_PASSWORD:-password} + MARIADB_DATABASE: appwrite + MARIADB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD:-rootpassword} + volumes: + - appwrite_db_data:/var/lib/mysql + networks: + - appwrite_net + healthcheck: + test: [ "CMD-SHELL", "mysqladmin ping -h localhost -u root -p${DB_ROOT_PASSWORD:-rootpassword}" ] + interval: 10s + timeout: 5s + retries: 5 + + redis: + image: redis:7-alpine + container_name: appwrite-redis + networks: + - appwrite_net + healthcheck: + test: [ "CMD", "redis-cli", "ping" ] + interval: 5s + timeout: 5s + retries: 5 + +networks: + appwrite_net: + driver: bridge + +volumes: + appwrite_db_data: + name: appwrite_db_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/appwrite && cd /opt/appwrite + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `_APP_ENV` | `production` | No | +| `_APP_DB_HOST` | `db` | No | +| `_APP_DB_USER` | `appwrite` | No | +| `_APP_DB_PASS` | `${DB_PASSWORD:-password}` | No | +| `_APP_REDIS_HOST` | `redis` | No | +| `DB_PASSWORD` | `password` | No | +| `DB_ROOT_PASSWORD` | `rootpassword` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs appwrite | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Appwrite on AltStack Directory](https://thealtstack.com/alternative-to/appwrite) +- [Appwrite Self-Hosted Guide](https://thealtstack.com/self-hosted/appwrite) +- [Official Documentation](https://appwrite.io) +- [GitHub Repository](https://github.com/appwrite/appwrite) diff --git a/docs/app/deploy/authentik/page.mdx b/docs/app/deploy/authentik/page.mdx new file mode 100644 index 0000000..2a8e8a3 --- /dev/null +++ b/docs/app/deploy/authentik/page.mdx @@ -0,0 +1,172 @@ +--- +title: "Deploy Authentik Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Authentik with Docker Compose. " +--- + +# Deploy Authentik + +The overall-best open-source identity provider, focused on flexibility and versatility. + +
+ ⭐ 15.0k stars + 📜 MIT + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Authentik instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Authentik and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + server: + image: ghcr.io/goauthentik/server:latest + container_name: authentik-server + restart: unless-stopped + command: server + depends_on: + - db + - redis + ports: + - "9000:9000" + - "9443:9443" + environment: + - AUTHENTIK_REDIS__HOST=redis + - AUTHENTIK_POSTGRESQL__HOST=db + - AUTHENTIK_POSTGRESQL__USER=authentik + - AUTHENTIK_POSTGRESQL__NAME=authentik + - AUTHENTIK_POSTGRESQL__PASSWORD=authentik + - AUTHENTIK_SECRET_KEY=generate-a-random-secret-key + + worker: + image: ghcr.io/goauthentik/server:latest + container_name: authentik-worker + restart: unless-stopped + command: worker + depends_on: + - db + - redis + environment: + - AUTHENTIK_REDIS__HOST=redis + - AUTHENTIK_POSTGRESQL__HOST=db + - AUTHENTIK_POSTGRESQL__USER=authentik + - AUTHENTIK_POSTGRESQL__NAME=authentik + - AUTHENTIK_POSTGRESQL__PASSWORD=authentik + - AUTHENTIK_SECRET_KEY=generate-a-random-secret-key + + db: + image: postgres:12-alpine + container_name: authentik-db + restart: unless-stopped + environment: + - POSTGRES_PASSWORD=authentik + - POSTGRES_USER=authentik + - POSTGRES_DB=authentik + volumes: + - authentik_db_data:/var/lib/postgresql/data + + redis: + image: redis:6-alpine + container_name: authentik-redis + restart: unless-stopped + +volumes: + authentik_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/authentik && cd /opt/authentik + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `AUTHENTIK_REDIS__HOST` | `redis` | No | +| `AUTHENTIK_POSTGRESQL__HOST` | `db` | No | +| `AUTHENTIK_POSTGRESQL__USER` | `authentik` | No | +| `AUTHENTIK_POSTGRESQL__NAME` | `authentik` | No | +| `AUTHENTIK_POSTGRESQL__PASSWORD` | `authentik` | No | +| `AUTHENTIK_SECRET_KEY` | `generate-a-random-secret-key` | No | +| `POSTGRES_PASSWORD` | `authentik` | No | +| `POSTGRES_USER` | `authentik` | No | +| `POSTGRES_DB` | `authentik` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs authentik | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Authentik on AltStack Directory](https://thealtstack.com/alternative-to/authentik) +- [Authentik Self-Hosted Guide](https://thealtstack.com/self-hosted/authentik) +- [Official Documentation](https://goauthentik.io) +- [GitHub Repository](https://github.com/goauthentik/authentik) diff --git a/docs/app/deploy/bitwarden/page.mdx b/docs/app/deploy/bitwarden/page.mdx new file mode 100644 index 0000000..7acadc5 --- /dev/null +++ b/docs/app/deploy/bitwarden/page.mdx @@ -0,0 +1,117 @@ +--- +title: "Deploy Bitwarden Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Bitwarden with Docker Compose. " +--- + +# Deploy Bitwarden + +Bitwarden infrastructure/backend (API, database, Docker, etc). + +
+ ⭐ 18.0k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Bitwarden instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Bitwarden and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + bitwarden: + image: vaultwarden/server:latest + container_name: bitwarden + restart: unless-stopped + ports: + - "8088:80" + volumes: + - bw-data:/data + +volumes: + bw-data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/bitwarden && cd /opt/bitwarden + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs bitwarden | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Bitwarden on AltStack Directory](https://thealtstack.com/alternative-to/bitwarden) +- [Bitwarden Self-Hosted Guide](https://thealtstack.com/self-hosted/bitwarden) +- [Official Documentation](https://bitwarden.com) +- [GitHub Repository](https://github.com/bitwarden/server) diff --git a/docs/app/deploy/calcom/page.mdx b/docs/app/deploy/calcom/page.mdx new file mode 100644 index 0000000..52b02c3 --- /dev/null +++ b/docs/app/deploy/calcom/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Cal.com Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Cal.com with Docker Compose. " +--- + +# Deploy Cal.com + +The open-source Calendly alternative. Take control of your scheduling. + +
+ ⭐ 30.0k stars + 📜 AGPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Cal.com instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Cal.com and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + calcom: + image: calcom/cal.com:latest + container_name: calcom + restart: unless-stopped + ports: + - "3000:3000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/calcom && cd /opt/calcom + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs calcom | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Cal.com on AltStack Directory](https://thealtstack.com/alternative-to/calcom) +- [Cal.com Self-Hosted Guide](https://thealtstack.com/self-hosted/calcom) +- [Official Documentation](https://cal.com) +- [GitHub Repository](https://github.com/calcom/cal.com) diff --git a/docs/app/deploy/chaskiq/page.mdx b/docs/app/deploy/chaskiq/page.mdx new file mode 100644 index 0000000..0d0ee48 --- /dev/null +++ b/docs/app/deploy/chaskiq/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Chaskiq Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Chaskiq with Docker Compose. " +--- + +# Deploy Chaskiq + +Open source conversational marketing platform alternative to Intercom and Drift. + +
+ ⭐ 4.0k stars + 📜 GPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Chaskiq instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Chaskiq and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + chaskiq: + image: chaskiq/chaskiq:latest + container_name: chaskiq + restart: unless-stopped + ports: + - "3000:3000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/chaskiq && cd /opt/chaskiq + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs chaskiq | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Chaskiq on AltStack Directory](https://thealtstack.com/alternative-to/chaskiq) +- [Chaskiq Self-Hosted Guide](https://thealtstack.com/self-hosted/chaskiq) +- [Official Documentation](https://chaskiq.io) +- [GitHub Repository](https://github.com/chaskiq/chaskiq) diff --git a/docs/app/deploy/coder/page.mdx b/docs/app/deploy/coder/page.mdx new file mode 100644 index 0000000..e1ca29c --- /dev/null +++ b/docs/app/deploy/coder/page.mdx @@ -0,0 +1,144 @@ +--- +title: "Deploy Coder Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Coder with Docker Compose. " +--- + +# Deploy Coder + +Provision software development environments as code on your infrastructure. + +
+ ⭐ 20.0k stars + 📜 AGPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Coder instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Coder and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + coder: + image: ghcr.io/coder/coder:latest + container_name: coder + restart: unless-stopped + depends_on: + - db + ports: + - "7080:7080" + environment: + - CODER_PG_CONNECTION_URL=postgresql://coder:coder@db:5432/coder + - CODER_ACCESS_URL=http://localhost:7080 + - CODER_HTTP_ADDRESS=0.0.0.0:7080 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + + db: + image: postgres:13 + container_name: coder-db + restart: unless-stopped + environment: + - POSTGRES_USER=coder + - POSTGRES_PASSWORD=coder + - POSTGRES_DB=coder + volumes: + - coder_db_data:/var/lib/postgresql/data + +volumes: + coder_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/coder && cd /opt/coder + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `CODER_PG_CONNECTION_URL` | `postgresql://coder:coder@db:5432/coder` | No | +| `CODER_ACCESS_URL` | `http://localhost:7080` | No | +| `CODER_HTTP_ADDRESS` | `0.0.0.0:7080` | No | +| `POSTGRES_USER` | `coder` | No | +| `POSTGRES_PASSWORD` | `coder` | No | +| `POSTGRES_DB` | `coder` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs coder | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Coder on AltStack Directory](https://thealtstack.com/alternative-to/coder) +- [Coder Self-Hosted Guide](https://thealtstack.com/self-hosted/coder) +- [Official Documentation](https://coder.com) +- [GitHub Repository](https://github.com/coder/coder) diff --git a/docs/app/deploy/continue-dev/page.mdx b/docs/app/deploy/continue-dev/page.mdx new file mode 100644 index 0000000..8697d87 --- /dev/null +++ b/docs/app/deploy/continue-dev/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Continue Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Continue with Docker Compose. " +--- + +# Deploy Continue + +Open-source AI code assistant for VS Code and JetBrains. Use any model (local or API). + +
+ ⭐ 25.0k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Continue instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Continue and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + continue: + image: continuedev/continue:latest + container_name: continue + restart: unless-stopped + ports: + - "8080:8080" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/continue-dev && cd /opt/continue-dev + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs continue-dev | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Continue on AltStack Directory](https://thealtstack.com/alternative-to/continue-dev) +- [Continue Self-Hosted Guide](https://thealtstack.com/self-hosted/continue-dev) +- [Official Documentation](https://continue.dev) +- [GitHub Repository](https://github.com/continuedev/continue) diff --git a/docs/app/deploy/coolify/page.mdx b/docs/app/deploy/coolify/page.mdx new file mode 100644 index 0000000..4c59634 --- /dev/null +++ b/docs/app/deploy/coolify/page.mdx @@ -0,0 +1,171 @@ +--- +title: "Deploy Coolify Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Coolify with Docker Compose. " +--- + +# Deploy Coolify + +An open-source, self-hostable PaaS alternative to Vercel, Heroku & Netlify that lets you easily deploy static sites, databases, full-stack applications and 280+ one-click services on your own servers. + +
+ ⭐ 50.4k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +A fully operational Coolify instance. Think of Coolify as a self-hosted Vercel or Heroku. Once installed, it manages your other Docker containers, handles deployments from GitHub/GitLab, and provides an integrated reverse proxy. + +> 🚀 **Self-Hosting Level:** If you only deploy one thing, let it be Coolify. It makes deploying everything else 10x easier. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Coolify and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for Coolify +# Note: Coolify is a self-hosted PaaS. +version: '3.8' + +services: + coolify: + image: ghcr.io/coollabsio/coolify:latest + container_name: coolify + ports: + - "8000:8000" + environment: + - APP_ENV=production + - DB_CONNECTION=pgsql + - DB_HOST=db + - DB_DATABASE=coolify + - DB_USERNAME=coolify + - DB_PASSWORD=${DB_PASSWORD:-password} + volumes: + - coolify_data:/var/www/html/storage + - /var/run/docker.sock:/var/run/docker.sock # Essential for controlling Docker + depends_on: + db: + condition: service_healthy + networks: + - coolify_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:8000/api/health" ] + interval: 30s + timeout: 10s + retries: 3 + restart: unless-stopped + + db: + image: postgres:15-alpine + container_name: coolify-db + environment: + POSTGRES_USER: coolify + POSTGRES_PASSWORD: ${DB_PASSWORD:-password} + POSTGRES_DB: coolify + volumes: + - coolify_db_data:/var/lib/postgresql/data + networks: + - coolify_net + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U coolify" ] + interval: 5s + timeout: 5s + retries: 5 + +networks: + coolify_net: + driver: bridge + +volumes: + coolify_data: + name: coolify_data + coolify_db_data: + name: coolify_db_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/coolify && cd /opt/coolify + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `APP_ENV` | `production` | No | +| `DB_CONNECTION` | `pgsql` | No | +| `DB_HOST` | `db` | No | +| `DB_DATABASE` | `coolify` | No | +| `DB_USERNAME` | `coolify` | No | +| `DB_PASSWORD` | `${DB_PASSWORD:-password}` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs coolify | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Coolify on AltStack Directory](https://thealtstack.com/alternative-to/coolify) +- [Coolify Self-Hosted Guide](https://thealtstack.com/self-hosted/coolify) +- [Official Documentation](https://coolify.io) +- [GitHub Repository](https://github.com/coollabsio/coolify) diff --git a/docs/app/deploy/deepseek/page.mdx b/docs/app/deploy/deepseek/page.mdx new file mode 100644 index 0000000..7b1176b --- /dev/null +++ b/docs/app/deploy/deepseek/page.mdx @@ -0,0 +1,117 @@ +--- +title: "Deploy DeepSeek-V3 / R1 Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting DeepSeek-V3 / R1 with Docker Compose. Replaces: meta-llama-3-1, mistral, qwen-2-5." +--- + +# Deploy DeepSeek-V3 / R1 + +Powerful open-source models including V3 (671B) and R1 (Reasoning). Rivals GPT-4o and o1. + +
+ ⭐ 110.0k stars + 📜 MIT License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working DeepSeek-V3 / R1 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for DeepSeek-V3 / R1 and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + ollama-deepseek: + image: ollama/ollama:latest + container_name: ollama-deepseek + restart: unless-stopped + ports: + - "11435:11434" + volumes: + - ollama_deepseek:/root/.ollama + +volumes: + ollama_deepseek: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/deepseek && cd /opt/deepseek + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs deepseek | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [DeepSeek-V3 / R1 on AltStack Directory](https://thealtstack.com/alternative-to/deepseek) +- [DeepSeek-V3 / R1 Self-Hosted Guide](https://thealtstack.com/self-hosted/deepseek) +- [Official Documentation](https://deepseek.com) +- [GitHub Repository](https://github.com/deepseek-ai/DeepSeek-V3) diff --git a/docs/app/deploy/documenso/page.mdx b/docs/app/deploy/documenso/page.mdx new file mode 100644 index 0000000..57c6b96 --- /dev/null +++ b/docs/app/deploy/documenso/page.mdx @@ -0,0 +1,142 @@ +--- +title: "Deploy Documenso Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Documenso with Docker Compose. " +--- + +# Deploy Documenso + +The open-source DocuSign alternative. We aim to be the world's most trusted document signing platform. + +
+ ⭐ 8.0k stars + 📜 AGPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Documenso instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Documenso and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + documenso: + image: documenso/documenso:latest + container_name: documenso + restart: unless-stopped + depends_on: + - db + ports: + - "3000:3000" + environment: + - DATABASE_URL=postgresql://documenso:documenso@db:5432/documenso + - NEXTAUTH_URL=http://localhost:3000 + - NEXTAUTH_SECRET=supersecret + + db: + image: postgres:15-alpine + container_name: documenso-db + restart: unless-stopped + environment: + - POSTGRES_USER=documenso + - POSTGRES_PASSWORD=documenso + - POSTGRES_DB=documenso + volumes: + - documenso_db_data:/var/lib/postgresql/data + +volumes: + documenso_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/documenso && cd /opt/documenso + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `DATABASE_URL` | `postgresql://documenso:documenso@db:5432/documenso` | No | +| `NEXTAUTH_URL` | `http://localhost:3000` | No | +| `NEXTAUTH_SECRET` | `supersecret` | No | +| `POSTGRES_USER` | `documenso` | No | +| `POSTGRES_PASSWORD` | `documenso` | No | +| `POSTGRES_DB` | `documenso` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs documenso | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Documenso on AltStack Directory](https://thealtstack.com/alternative-to/documenso) +- [Documenso Self-Hosted Guide](https://thealtstack.com/self-hosted/documenso) +- [Official Documentation](https://documenso.com) +- [GitHub Repository](https://github.com/documenso/documenso) diff --git a/docs/app/deploy/dokku/page.mdx b/docs/app/deploy/dokku/page.mdx new file mode 100644 index 0000000..b3bc8b8 --- /dev/null +++ b/docs/app/deploy/dokku/page.mdx @@ -0,0 +1,114 @@ +--- +title: "Deploy Dokku Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Dokku with Docker Compose. " +--- + +# Deploy Dokku + +A docker-powered PaaS that helps you build and manage the lifecycle of applications + +
+ ⭐ 31.9k stars + 📜 MIT License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Dokku instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Dokku and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + dokku: + image: dokku/dokku:latest + container_name: dokku + restart: unless-stopped + ports: + - "80:80" + - "443:443" + - "22:22" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/dokku && cd /opt/dokku + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs dokku | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Dokku on AltStack Directory](https://thealtstack.com/alternative-to/dokku) +- [Dokku Self-Hosted Guide](https://thealtstack.com/self-hosted/dokku) +- [Official Documentation](https://dokku.com) +- [GitHub Repository](https://github.com/dokku/dokku) diff --git a/docs/app/deploy/erpnext/page.mdx b/docs/app/deploy/erpnext/page.mdx new file mode 100644 index 0000000..c836160 --- /dev/null +++ b/docs/app/deploy/erpnext/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy ERPNext Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting ERPNext with Docker Compose. " +--- + +# Deploy ERPNext + +A free and open-source integrated Enterprise Resource Planning (ERP) software. + +
+ ⭐ 31.6k stars + 📜 GNU General Public License v3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working ERPNext instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for ERPNext and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + erpnext: + image: frappe/erpnext-worker:latest + container_name: erpnext + restart: unless-stopped + ports: + - "8000:8000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/erpnext && cd /opt/erpnext + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs erpnext | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [ERPNext on AltStack Directory](https://thealtstack.com/alternative-to/erpnext) +- [ERPNext Self-Hosted Guide](https://thealtstack.com/self-hosted/erpnext) +- [Official Documentation](https://erpnext.com) +- [GitHub Repository](https://github.com/frappe/erpnext) diff --git a/docs/app/deploy/flux/page.mdx b/docs/app/deploy/flux/page.mdx new file mode 100644 index 0000000..9709497 --- /dev/null +++ b/docs/app/deploy/flux/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy FLUX Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting FLUX with Docker Compose. " +--- + +# Deploy FLUX + +Next-gen open image generation model from Black Forest Labs. State-of-the-art quality rivaling Midjourney. + +
+ ⭐ 20.0k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working FLUX instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for FLUX and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + flux: + image: blackforestlabs/flux:latest + container_name: flux + restart: unless-stopped + ports: + - "8000:8000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/flux && cd /opt/flux + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs flux | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [FLUX on AltStack Directory](https://thealtstack.com/alternative-to/flux) +- [FLUX Self-Hosted Guide](https://thealtstack.com/self-hosted/flux) +- [Official Documentation](https://blackforestlabs.ai) +- [GitHub Repository](https://github.com/black-forest-labs/flux) diff --git a/docs/app/deploy/freecad/page.mdx b/docs/app/deploy/freecad/page.mdx new file mode 100644 index 0000000..a7e0689 --- /dev/null +++ b/docs/app/deploy/freecad/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy FreeCAD Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting FreeCAD with Docker Compose. " +--- + +# Deploy FreeCAD + +A general-purpose parametric 3D CAD modeler and a BIM software application. + +
+ ⭐ 21.0k stars + 📜 LGPLv2+ + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working FreeCAD instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for FreeCAD and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + freecad: + image: lscr.io/linuxserver/freecad:latest + container_name: freecad + restart: unless-stopped + ports: + - "3000:3000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/freecad && cd /opt/freecad + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs freecad | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [FreeCAD on AltStack Directory](https://thealtstack.com/alternative-to/freecad) +- [FreeCAD Self-Hosted Guide](https://thealtstack.com/self-hosted/freecad) +- [Official Documentation](https://www.freecad.org) +- [GitHub Repository](https://github.com/FreeCAD/FreeCAD) diff --git a/docs/app/deploy/gemma/page.mdx b/docs/app/deploy/gemma/page.mdx new file mode 100644 index 0000000..dce1fc1 --- /dev/null +++ b/docs/app/deploy/gemma/page.mdx @@ -0,0 +1,117 @@ +--- +title: "Deploy Google Gemma 2 Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Google Gemma 2 with Docker Compose. " +--- + +# Deploy Google Gemma 2 + +Google's open-weight models (9B, 27B) with class-leading performance and efficient architecture. + +
+ ⭐ 20.0k stars + 📜 Gemma License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Google Gemma 2 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Google Gemma 2 and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + ollama-gemma: + image: ollama/ollama:latest + container_name: ollama-gemma + restart: unless-stopped + ports: + - "11437:11434" + volumes: + - ollama_gemma:/root/.ollama + +volumes: + ollama_gemma: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/gemma && cd /opt/gemma + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs gemma | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Google Gemma 2 on AltStack Directory](https://thealtstack.com/alternative-to/gemma) +- [Google Gemma 2 Self-Hosted Guide](https://thealtstack.com/self-hosted/gemma) +- [Official Documentation](https://ai.google.dev/gemma) +- [GitHub Repository](https://github.com/google/gemma-2) diff --git a/docs/app/deploy/gimp/page.mdx b/docs/app/deploy/gimp/page.mdx new file mode 100644 index 0000000..248fa2c --- /dev/null +++ b/docs/app/deploy/gimp/page.mdx @@ -0,0 +1,121 @@ +--- +title: "Deploy GIMP Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting GIMP with Docker Compose. " +--- + +# Deploy GIMP + +Read-only mirror of https://gitlab.gnome.org/GNOME/gimp + +
+ ⭐ 6.0k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working GIMP instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for GIMP and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + gimp: + image: linuxserver/gimp:latest + container_name: gimp + restart: unless-stopped + ports: + - "3000:3000" + environment: + - PUID=1000 + - PGID=1000 +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/gimp && cd /opt/gimp + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `PUID` | `1000` | No | +| `PGID` | `1000` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs gimp | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [GIMP on AltStack Directory](https://thealtstack.com/alternative-to/gimp) +- [GIMP Self-Hosted Guide](https://thealtstack.com/self-hosted/gimp) +- [Official Documentation](https://www.gimp.org) +- [GitHub Repository](https://github.com/GNOME/gimp) diff --git a/docs/app/deploy/glitchtip/page.mdx b/docs/app/deploy/glitchtip/page.mdx new file mode 100644 index 0000000..0ce9253 --- /dev/null +++ b/docs/app/deploy/glitchtip/page.mdx @@ -0,0 +1,150 @@ +--- +title: "Deploy GlitchTip Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting GlitchTip with Docker Compose. " +--- + +# Deploy GlitchTip + +Open source error tracking that's compatible with Sentry SDKs. + +
+ ⭐ 3.0k stars + 📜 MIT + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working GlitchTip instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for GlitchTip and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + glitchtip: + image: glitchtip/glitchtip:latest + container_name: glitchtip + restart: unless-stopped + depends_on: + - db + - redis + ports: + - "8000:8000" + environment: + - DATABASE_URL=postgres://glitchtip:glitchtip@db:5432/glitchtip + - REDIS_URL=redis://redis:6379 + - SECRET_KEY=change_me_to_something_random + - PORT=8000 + + db: + image: postgres:14 + container_name: glitchtip-db + restart: unless-stopped + environment: + - POSTGRES_USER=glitchtip + - POSTGRES_PASSWORD=glitchtip + - POSTGRES_DB=glitchtip + volumes: + - glitchtip_db_data:/var/lib/postgresql/data + + redis: + image: redis:alpine + container_name: glitchtip-redis + restart: unless-stopped + +volumes: + glitchtip_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/glitchtip && cd /opt/glitchtip + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `DATABASE_URL` | `postgres://glitchtip:glitchtip@db:5432/glitchtip` | No | +| `REDIS_URL` | `redis://redis:6379` | No | +| `SECRET_KEY` | `change_me_to_something_random` | No | +| `PORT` | `8000` | No | +| `POSTGRES_USER` | `glitchtip` | No | +| `POSTGRES_PASSWORD` | `glitchtip` | No | +| `POSTGRES_DB` | `glitchtip` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs glitchtip | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [GlitchTip on AltStack Directory](https://thealtstack.com/alternative-to/glitchtip) +- [GlitchTip Self-Hosted Guide](https://thealtstack.com/self-hosted/glitchtip) +- [Official Documentation](https://glitchtip.com) +- [GitHub Repository](https://github.com/glitchtip/glitchtip) diff --git a/docs/app/deploy/gpt4all/page.mdx b/docs/app/deploy/gpt4all/page.mdx new file mode 100644 index 0000000..9d107bf --- /dev/null +++ b/docs/app/deploy/gpt4all/page.mdx @@ -0,0 +1,132 @@ +--- +title: "Deploy GPT4All Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting GPT4All with Docker Compose. " +--- + +# Deploy GPT4All + +Run open-source LLMs locally on your CPU and GPU. No internet required. + +
+ ⭐ 65.0k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working GPT4All instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for GPT4All and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for GPT4All +version: '3.8' + +services: + gpt4all: + build: + context: . + dockerfile: Dockerfile + container_name: gpt4all-server + ports: + - "4891:4891" + volumes: + - gpt4all_models:/app/models + networks: + - gpt4all_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:4891/v1/models" ] # GPT4All local API endpoint + interval: 30s + timeout: 10s + retries: 3 + restart: unless-stopped + +networks: + gpt4all_net: + driver: bridge + +volumes: + gpt4all_models: + name: gpt4all_models +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/gpt4all && cd /opt/gpt4all + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs gpt4all | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [GPT4All on AltStack Directory](https://thealtstack.com/alternative-to/gpt4all) +- [GPT4All Self-Hosted Guide](https://thealtstack.com/self-hosted/gpt4all) +- [Official Documentation](https://gpt4all.io) +- [GitHub Repository](https://github.com/nomic-ai/gpt4all) diff --git a/docs/app/deploy/hunyuan-video/page.mdx b/docs/app/deploy/hunyuan-video/page.mdx new file mode 100644 index 0000000..bb79ffe --- /dev/null +++ b/docs/app/deploy/hunyuan-video/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy HunyuanVideo 1.5 Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting HunyuanVideo 1.5 with Docker Compose. " +--- + +# Deploy HunyuanVideo 1.5 + +Tencent's state-of-the-art open-source video generation model with 13B parameters. + +
+ ⭐ 8.0k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working HunyuanVideo 1.5 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for HunyuanVideo 1.5 and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + hunyuan: + image: tencent/hunyuan:latest + container_name: hunyuan + restart: unless-stopped + ports: + - "8000:8000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/hunyuan-video && cd /opt/hunyuan-video + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs hunyuan-video | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [HunyuanVideo 1.5 on AltStack Directory](https://thealtstack.com/alternative-to/hunyuan-video) +- [HunyuanVideo 1.5 Self-Hosted Guide](https://thealtstack.com/self-hosted/hunyuan-video) +- [Official Documentation](https://github.com/Tencent/HunyuanVideo) +- [GitHub Repository](https://github.com/Tencent/HunyuanVideo) diff --git a/docs/app/deploy/jitsi-meet/page.mdx b/docs/app/deploy/jitsi-meet/page.mdx new file mode 100644 index 0000000..972d00f --- /dev/null +++ b/docs/app/deploy/jitsi-meet/page.mdx @@ -0,0 +1,122 @@ +--- +title: "Deploy Jitsi Meet Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Jitsi Meet with Docker Compose. " +--- + +# Deploy Jitsi Meet + +Jitsi Meet - Secure, Simple and Scalable Video Conferences that you use as a standalone app or embed in your web application. + +
+ ⭐ 28.6k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Jitsi Meet instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Jitsi Meet and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + jitsi-web: + image: jitsi/web:latest + container_name: jitsi-web + restart: unless-stopped + ports: + - "8000:80" + - "8443:443" + environment: + - PUBLIC_URL=https://localhost:8443 + - XMPP_SERVER=xmpp.meet.jitsi +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/jitsi-meet && cd /opt/jitsi-meet + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `PUBLIC_URL` | `https://localhost:8443` | No | +| `XMPP_SERVER` | `xmpp.meet.jitsi` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs jitsi-meet | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Jitsi Meet on AltStack Directory](https://thealtstack.com/alternative-to/jitsi-meet) +- [Jitsi Meet Self-Hosted Guide](https://thealtstack.com/self-hosted/jitsi-meet) +- [Official Documentation](https://jitsi.org) +- [GitHub Repository](https://github.com/jitsi/jitsi-meet) diff --git a/docs/app/deploy/jitsu/page.mdx b/docs/app/deploy/jitsu/page.mdx new file mode 100644 index 0000000..df54180 --- /dev/null +++ b/docs/app/deploy/jitsu/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Jitsu Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Jitsu with Docker Compose. " +--- + +# Deploy Jitsu + +High-performance data collection platform and open-source Segment alternative. + +
+ ⭐ 5.0k stars + 📜 MIT + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Jitsu instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Jitsu and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + jitsu: + image: jitsu/jitsu:latest + container_name: jitsu + restart: unless-stopped + ports: + - "8000:8000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/jitsu && cd /opt/jitsu + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs jitsu | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Jitsu on AltStack Directory](https://thealtstack.com/alternative-to/jitsu) +- [Jitsu Self-Hosted Guide](https://thealtstack.com/self-hosted/jitsu) +- [Official Documentation](https://jitsu.com) +- [GitHub Repository](https://github.com/jitsucom/jitsu) diff --git a/docs/app/deploy/kdenlive/page.mdx b/docs/app/deploy/kdenlive/page.mdx new file mode 100644 index 0000000..c49356e --- /dev/null +++ b/docs/app/deploy/kdenlive/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Kdenlive Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Kdenlive with Docker Compose. " +--- + +# Deploy Kdenlive + +Open source video editing software based on the MLT Framework and KDE. + +
+ ⭐ 3.5k stars + 📜 GPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Kdenlive instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Kdenlive and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + kdenlive: + image: lscr.io/linuxserver/kdenlive:latest + container_name: kdenlive + restart: unless-stopped + ports: + - "3000:3000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/kdenlive && cd /opt/kdenlive + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs kdenlive | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Kdenlive on AltStack Directory](https://thealtstack.com/alternative-to/kdenlive) +- [Kdenlive Self-Hosted Guide](https://thealtstack.com/self-hosted/kdenlive) +- [Official Documentation](https://kdenlive.org) +- [GitHub Repository](https://github.com/KDE/kdenlive) diff --git a/docs/app/deploy/keepassxc/page.mdx b/docs/app/deploy/keepassxc/page.mdx new file mode 100644 index 0000000..d952dad --- /dev/null +++ b/docs/app/deploy/keepassxc/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy KeePassXC Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting KeePassXC with Docker Compose. " +--- + +# Deploy KeePassXC + +KeePassXC is a cross-platform community-driven port of the Windows application “KeePass Password Safe”. + +
+ ⭐ 25.8k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working KeePassXC instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for KeePassXC and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + keepassxc: + image: jlesage/keepassxc:latest + container_name: keepassxc + restart: unless-stopped + ports: + - "5800:5800" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/keepassxc && cd /opt/keepassxc + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs keepassxc | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [KeePassXC on AltStack Directory](https://thealtstack.com/alternative-to/keepassxc) +- [KeePassXC Self-Hosted Guide](https://thealtstack.com/self-hosted/keepassxc) +- [Official Documentation](https://keepassxc.org) +- [GitHub Repository](https://github.com/keepassxreboot/keepassxc) diff --git a/docs/app/deploy/keycloak/page.mdx b/docs/app/deploy/keycloak/page.mdx new file mode 100644 index 0000000..b767e62 --- /dev/null +++ b/docs/app/deploy/keycloak/page.mdx @@ -0,0 +1,149 @@ +--- +title: "Deploy Keycloak Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Keycloak with Docker Compose. " +--- + +# Deploy Keycloak + +Open source identity and access management for modern applications and services. + +
+ ⭐ 23.0k stars + 📜 Apache 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Keycloak instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Keycloak and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + keycloak: + image: quay.io/keycloak/keycloak:latest + container_name: keycloak + restart: unless-stopped + command: start-dev + depends_on: + - db + ports: + - "8080:8080" + environment: + - KEYCLOAK_ADMIN=admin + - KEYCLOAK_ADMIN_PASSWORD=admin + - KC_DB=postgres + - KC_DB_URL=jdbc:postgresql://db:5432/keycloak + - KC_DB_USERNAME=keycloak + - KC_DB_PASSWORD=keycloak + + db: + image: postgres:15-alpine + container_name: keycloak-db + restart: unless-stopped + environment: + - POSTGRES_DB=keycloak + - POSTGRES_USER=keycloak + - POSTGRES_PASSWORD=keycloak + volumes: + - keycloak_db_data:/var/lib/postgresql/data + +volumes: + keycloak_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/keycloak && cd /opt/keycloak + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `KEYCLOAK_ADMIN` | `admin` | No | +| `KEYCLOAK_ADMIN_PASSWORD` | `admin` | No | +| `KC_DB` | `postgres` | No | +| `KC_DB_URL` | `jdbc:postgresql://db:5432/keycloak` | No | +| `KC_DB_USERNAME` | `keycloak` | No | +| `KC_DB_PASSWORD` | `keycloak` | No | +| `POSTGRES_DB` | `keycloak` | No | +| `POSTGRES_USER` | `keycloak` | No | +| `POSTGRES_PASSWORD` | `keycloak` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs keycloak | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Keycloak on AltStack Directory](https://thealtstack.com/alternative-to/keycloak) +- [Keycloak Self-Hosted Guide](https://thealtstack.com/self-hosted/keycloak) +- [Official Documentation](https://www.keycloak.org) +- [GitHub Repository](https://github.com/keycloak/keycloak) diff --git a/docs/app/deploy/krita/page.mdx b/docs/app/deploy/krita/page.mdx new file mode 100644 index 0000000..ee518f0 --- /dev/null +++ b/docs/app/deploy/krita/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Krita Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Krita with Docker Compose. " +--- + +# Deploy Krita + +Krita is a free and open source cross-platform application that offers an end-to-end solution for creating digital art files from scratch built on the KDE and Qt frameworks. + +
+ ⭐ 9.3k stars + 📜 GNU General Public License v3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Krita instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Krita and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + krita: + image: linuxserver/krita:latest + container_name: krita + restart: unless-stopped + ports: + - "3000:3000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/krita && cd /opt/krita + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs krita | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Krita on AltStack Directory](https://thealtstack.com/alternative-to/krita) +- [Krita Self-Hosted Guide](https://thealtstack.com/self-hosted/krita) +- [Official Documentation](https://krita.org) +- [GitHub Repository](https://github.com/KDE/krita) diff --git a/docs/app/deploy/librecad/page.mdx b/docs/app/deploy/librecad/page.mdx new file mode 100644 index 0000000..c3834f3 --- /dev/null +++ b/docs/app/deploy/librecad/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy LibreCAD Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting LibreCAD with Docker Compose. " +--- + +# Deploy LibreCAD + +A mature, feature-rich 2D CAD application with a loyal user community. + +
+ ⭐ 6.5k stars + 📜 GPLv2 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working LibreCAD instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for LibreCAD and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + librecad: + image: lscr.io/linuxserver/librecad:latest + container_name: librecad + restart: unless-stopped + ports: + - "3000:3000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/librecad && cd /opt/librecad + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs librecad | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [LibreCAD on AltStack Directory](https://thealtstack.com/alternative-to/librecad) +- [LibreCAD Self-Hosted Guide](https://thealtstack.com/self-hosted/librecad) +- [Official Documentation](https://librecad.org) +- [GitHub Repository](https://github.com/LibreCAD/LibreCAD) diff --git a/docs/app/deploy/listmonk/page.mdx b/docs/app/deploy/listmonk/page.mdx new file mode 100644 index 0000000..51b07d6 --- /dev/null +++ b/docs/app/deploy/listmonk/page.mdx @@ -0,0 +1,138 @@ +--- +title: "Deploy Listmonk Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Listmonk with Docker Compose. " +--- + +# Deploy Listmonk + +High performance, self-hosted newsletter and mailing list manager with a modern dashboard. + +
+ ⭐ 19.0k stars + 📜 AGPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Listmonk instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Listmonk and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + listmonk: + image: listmonk/listmonk:latest + container_name: listmonk + restart: unless-stopped + command: sh -c './listmonk --install --yes --idempotent && ./listmonk' + depends_on: + - listmonk-db + ports: + - "9000:9000" + volumes: + - ./config.toml:/listmonk/config.toml + + listmonk-db: + image: postgres:13-alpine + container_name: listmonk-db + restart: unless-stopped + environment: + - POSTGRES_USER=listmonk + - POSTGRES_PASSWORD=listmonk + - POSTGRES_DB=listmonk + volumes: + - listmonk_db_data:/var/lib/postgresql/data + +volumes: + listmonk_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/listmonk && cd /opt/listmonk + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `POSTGRES_USER` | `listmonk` | No | +| `POSTGRES_PASSWORD` | `listmonk` | No | +| `POSTGRES_DB` | `listmonk` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs listmonk | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Listmonk on AltStack Directory](https://thealtstack.com/alternative-to/listmonk) +- [Listmonk Self-Hosted Guide](https://thealtstack.com/self-hosted/listmonk) +- [Official Documentation](https://listmonk.app) +- [GitHub Repository](https://github.com/knadh/listmonk) diff --git a/docs/app/deploy/llama/page.mdx b/docs/app/deploy/llama/page.mdx new file mode 100644 index 0000000..f4067f8 --- /dev/null +++ b/docs/app/deploy/llama/page.mdx @@ -0,0 +1,118 @@ +--- +title: "Deploy Meta Llama 3.1 Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Meta Llama 3.1 with Docker Compose. " +--- + +# Deploy Meta Llama 3.1 + +Meta's flagship open-weight model with 128K context. Supports 8B, 70B, and 405B parameters. + +
+ ⭐ 65.0k stars + 📜 Llama 3.1 Community License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Meta Llama 3.1 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Meta Llama 3.1 and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + ollama-llama: + image: ollama/ollama:latest + container_name: ollama-llama + restart: unless-stopped + command: serve + ports: + - "11434:11434" + volumes: + - ollama:/root/.ollama + +volumes: + ollama: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/llama && cd /opt/llama + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs llama | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Meta Llama 3.1 on AltStack Directory](https://thealtstack.com/alternative-to/llama) +- [Meta Llama 3.1 Self-Hosted Guide](https://thealtstack.com/self-hosted/llama) +- [Official Documentation](https://llama.meta.com) +- [GitHub Repository](https://github.com/meta-llama/llama3) diff --git a/docs/app/deploy/matomo/page.mdx b/docs/app/deploy/matomo/page.mdx new file mode 100644 index 0000000..98b4a8b --- /dev/null +++ b/docs/app/deploy/matomo/page.mdx @@ -0,0 +1,119 @@ +--- +title: "Deploy Matomo Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Matomo with Docker Compose. " +--- + +# Deploy Matomo + +Empowering People Ethically 🚀 — Matomo is hiring! Join us → https://matomo.org/jobs Matomo is the leading open-source alternative to Google Analytics, giving you complete control and built-in privacy. Easily collect, visualise, and analyse data from websites & apps. Star us on GitHub ⭐️ – Pull Requests welcome! + +
+ ⭐ 21.3k stars + 📜 GNU General Public License v3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Matomo instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Matomo and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + matomo: + image: matomo:latest + container_name: matomo + restart: unless-stopped + ports: + - "8080:80" + environment: + - MATOMO_DATABASE_HOST=db +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/matomo && cd /opt/matomo + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `MATOMO_DATABASE_HOST` | `db` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs matomo | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Matomo on AltStack Directory](https://thealtstack.com/alternative-to/matomo) +- [Matomo Self-Hosted Guide](https://thealtstack.com/self-hosted/matomo) +- [Official Documentation](https://matomo.org) +- [GitHub Repository](https://github.com/matomo-org/matomo) diff --git a/docs/app/deploy/mattermost/page.mdx b/docs/app/deploy/mattermost/page.mdx new file mode 100644 index 0000000..8b3feff --- /dev/null +++ b/docs/app/deploy/mattermost/page.mdx @@ -0,0 +1,143 @@ +--- +title: "Deploy Mattermost Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Mattermost with Docker Compose. " +--- + +# Deploy Mattermost + +Mattermost is an open source platform for secure collaboration across the entire software development lifecycle.. + +
+ ⭐ 35.2k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Mattermost instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Mattermost and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + mattermost: + image: mattermost/mattermost-team-edition:latest + container_name: mattermost + restart: unless-stopped + depends_on: + - db + ports: + - "8065:8065" + environment: + - MM_SQLSETTINGS_DRIVERNAME=postgres + - MM_SQLSETTINGS_DATASOURCE=postgres://mmuser:mmuser_password@db:5432/mattermost?sslmode=disable&connect_timeout=10 + - MM_SERVICESETTINGS_SITEURL=http://localhost:8065 + volumes: + - ./volumes/app/config:/mattermost/config + - ./volumes/app/data:/mattermost/data + - ./volumes/app/logs:/mattermost/logs + + db: + image: postgres:13-alpine + container_name: mattermost-db + restart: unless-stopped + environment: + - POSTGRES_USER=mmuser + - POSTGRES_PASSWORD=mmuser_password + - POSTGRES_DB=mattermost + volumes: + - ./volumes/db/var/lib/postgresql/data:/var/lib/postgresql/data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/mattermost && cd /opt/mattermost + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `MM_SQLSETTINGS_DRIVERNAME` | `postgres` | No | +| `MM_SQLSETTINGS_DATASOURCE` | `postgres://mmuser:mmuser_password@db:5432/mattermost?sslmode=disable&connect_timeout=10` | No | +| `MM_SERVICESETTINGS_SITEURL` | `http://localhost:8065` | No | +| `POSTGRES_USER` | `mmuser` | No | +| `POSTGRES_PASSWORD` | `mmuser_password` | No | +| `POSTGRES_DB` | `mattermost` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs mattermost | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Mattermost on AltStack Directory](https://thealtstack.com/alternative-to/mattermost) +- [Mattermost Self-Hosted Guide](https://thealtstack.com/self-hosted/mattermost) +- [Official Documentation](https://mattermost.com) +- [GitHub Repository](https://github.com/mattermost/mattermost) diff --git a/docs/app/deploy/mautic/page.mdx b/docs/app/deploy/mautic/page.mdx new file mode 100644 index 0000000..55e7bbd --- /dev/null +++ b/docs/app/deploy/mautic/page.mdx @@ -0,0 +1,153 @@ +--- +title: "Deploy Mautic Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Mautic with Docker Compose. " +--- + +# Deploy Mautic + +World's largest open source marketing automation project. + +
+ ⭐ 7.0k stars + 📜 GPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Mautic instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Mautic and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + mautic: + image: mautic/mautic:latest + container_name: mautic + restart: unless-stopped + depends_on: + - db + ports: + - "8080:80" + environment: + - MAUTIC_DB_HOST=db + - MAUTIC_DB_USER=mautic + - MAUTIC_DB_PASSWORD=mautic + - MAUTIC_DB_NAME=mautic + - MAUTIC_RUN_CRON_JOBS=true + volumes: + - mautic_data:/var/www/html + + db: + image: mysql:5.7 + container_name: mautic-db + restart: unless-stopped + command: --default-authentication-plugin=mysql_native_password + environment: + - MYSQL_ROOT_PASSWORD=root + - MYSQL_USER=mautic + - MYSQL_PASSWORD=mautic + - MYSQL_DATABASE=mautic + volumes: + - mautic_db_data:/var/lib/mysql + +volumes: + mautic_data: + mautic_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/mautic && cd /opt/mautic + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `MAUTIC_DB_HOST` | `db` | No | +| `MAUTIC_DB_USER` | `mautic` | No | +| `MAUTIC_DB_PASSWORD` | `mautic` | No | +| `MAUTIC_DB_NAME` | `mautic` | No | +| `MAUTIC_RUN_CRON_JOBS` | `true` | No | +| `plugin` | `mysql_native_password` | No | +| `MYSQL_ROOT_PASSWORD` | `root` | No | +| `MYSQL_USER` | `mautic` | No | +| `MYSQL_PASSWORD` | `mautic` | No | +| `MYSQL_DATABASE` | `mautic` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs mautic | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Mautic on AltStack Directory](https://thealtstack.com/alternative-to/mautic) +- [Mautic Self-Hosted Guide](https://thealtstack.com/self-hosted/mautic) +- [Official Documentation](https://www.mautic.org) +- [GitHub Repository](https://github.com/mautic/mautic) diff --git a/docs/app/deploy/medusa/page.mdx b/docs/app/deploy/medusa/page.mdx new file mode 100644 index 0000000..8d3d166 --- /dev/null +++ b/docs/app/deploy/medusa/page.mdx @@ -0,0 +1,150 @@ +--- +title: "Deploy Medusa.js Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Medusa.js with Docker Compose. " +--- + +# Deploy Medusa.js + +The open-source alternative to Shopify. Building blocks for digital commerce. + +
+ ⭐ 24.0k stars + 📜 MIT + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Medusa.js instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Medusa.js and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + medusa: + image: medusajs/medusa:latest + container_name: medusa + restart: unless-stopped + depends_on: + - db + - redis + ports: + - "9000:9000" + environment: + - DATABASE_URL=postgres://medusa:medusa@db:5432/medusa + - REDIS_URL=redis://redis:6379 + - JWT_SECRET=supersecret + - COOKIE_SECRET=supersecret + + db: + image: postgres:15-alpine + container_name: medusa-db + restart: unless-stopped + environment: + - POSTGRES_USER=medusa + - POSTGRES_PASSWORD=medusa + - POSTGRES_DB=medusa + volumes: + - medusa_db_data:/var/lib/postgresql/data + + redis: + image: redis:alpine + container_name: medusa-redis + restart: unless-stopped + +volumes: + medusa_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/medusa && cd /opt/medusa + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `DATABASE_URL` | `postgres://medusa:medusa@db:5432/medusa` | No | +| `REDIS_URL` | `redis://redis:6379` | No | +| `JWT_SECRET` | `supersecret` | No | +| `COOKIE_SECRET` | `supersecret` | No | +| `POSTGRES_USER` | `medusa` | No | +| `POSTGRES_PASSWORD` | `medusa` | No | +| `POSTGRES_DB` | `medusa` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs medusa | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Medusa.js on AltStack Directory](https://thealtstack.com/alternative-to/medusa) +- [Medusa.js Self-Hosted Guide](https://thealtstack.com/self-hosted/medusa) +- [Official Documentation](https://medusajs.com) +- [GitHub Repository](https://github.com/medusajs/medusa) diff --git a/docs/app/deploy/metabase/page.mdx b/docs/app/deploy/metabase/page.mdx new file mode 100644 index 0000000..5a3a0a7 --- /dev/null +++ b/docs/app/deploy/metabase/page.mdx @@ -0,0 +1,148 @@ +--- +title: "Deploy Metabase Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Metabase with Docker Compose. " +--- + +# Deploy Metabase + +The simplest, fastest way to get business intelligence and analytics throughout your company. + +
+ ⭐ 38.0k stars + 📜 AGPLv3 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Metabase instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Metabase and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + metabase: + image: metabase/metabase:latest + container_name: metabase + restart: unless-stopped + depends_on: + - db + ports: + - "3000:3000" + environment: + - MB_DB_TYPE=postgres + - MB_DB_DBNAME=metabase + - MB_DB_PORT=5432 + - MB_DB_USER=metabase + - MB_DB_PASS=metabase + - MB_DB_HOST=db + + db: + image: postgres:14-alpine + container_name: metabase-db + restart: unless-stopped + environment: + - POSTGRES_USER=metabase + - POSTGRES_PASSWORD=metabase + - POSTGRES_DB=metabase + volumes: + - metabase_db_data:/var/lib/postgresql/data + +volumes: + metabase_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/metabase && cd /opt/metabase + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `MB_DB_TYPE` | `postgres` | No | +| `MB_DB_DBNAME` | `metabase` | No | +| `MB_DB_PORT` | `5432` | No | +| `MB_DB_USER` | `metabase` | No | +| `MB_DB_PASS` | `metabase` | No | +| `MB_DB_HOST` | `db` | No | +| `POSTGRES_USER` | `metabase` | No | +| `POSTGRES_PASSWORD` | `metabase` | No | +| `POSTGRES_DB` | `metabase` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs metabase | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Metabase on AltStack Directory](https://thealtstack.com/alternative-to/metabase) +- [Metabase Self-Hosted Guide](https://thealtstack.com/self-hosted/metabase) +- [Official Documentation](https://www.metabase.com) +- [GitHub Repository](https://github.com/metabase/metabase) diff --git a/docs/app/deploy/minio/page.mdx b/docs/app/deploy/minio/page.mdx new file mode 100644 index 0000000..80d95c4 --- /dev/null +++ b/docs/app/deploy/minio/page.mdx @@ -0,0 +1,128 @@ +--- +title: "Deploy MinIO Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting MinIO with Docker Compose. " +--- + +# Deploy MinIO + +High-performance, S3-compatible object storage for AI and enterprise data. + +
+ ⭐ 45.0k stars + 📜 AGPLv3 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working MinIO instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for MinIO and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + minio: + image: minio/minio:latest + container_name: minio + restart: unless-stopped + ports: + - "9000:9000" + - "9090:9090" + command: server /data --console-address ":9090" + environment: + - MINIO_ROOT_USER=minioadmin + - MINIO_ROOT_PASSWORD=minioadmin + volumes: + - minio_data:/data + +volumes: + minio_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/minio && cd /opt/minio + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `MINIO_ROOT_USER` | `minioadmin` | No | +| `MINIO_ROOT_PASSWORD` | `minioadmin` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs minio | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [MinIO on AltStack Directory](https://thealtstack.com/alternative-to/minio) +- [MinIO Self-Hosted Guide](https://thealtstack.com/self-hosted/minio) +- [Official Documentation](https://min.io) +- [GitHub Repository](https://github.com/minio/minio) diff --git a/docs/app/deploy/mistral/page.mdx b/docs/app/deploy/mistral/page.mdx new file mode 100644 index 0000000..e585357 --- /dev/null +++ b/docs/app/deploy/mistral/page.mdx @@ -0,0 +1,117 @@ +--- +title: "Deploy Mistral Large 2 Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Mistral Large 2 with Docker Compose. " +--- + +# Deploy Mistral Large 2 + +Flagship 123B model from Mistral AI. Optimized for multilingual, reasoning, and coding tasks. + +
+ ⭐ 20.0k stars + 📜 Mistral Research License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Mistral Large 2 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Mistral Large 2 and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + ollama-mistral: + image: ollama/ollama:latest + container_name: ollama-mistral + restart: unless-stopped + ports: + - "11436:11434" + volumes: + - ollama_mistral:/root/.ollama + +volumes: + ollama_mistral: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/mistral && cd /opt/mistral + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs mistral | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Mistral Large 2 on AltStack Directory](https://thealtstack.com/alternative-to/mistral) +- [Mistral Large 2 Self-Hosted Guide](https://thealtstack.com/self-hosted/mistral) +- [Official Documentation](https://mistral.ai) +- [GitHub Repository](https://github.com/mistralai/mistral-inference) diff --git a/docs/app/deploy/mixpost/page.mdx b/docs/app/deploy/mixpost/page.mdx new file mode 100644 index 0000000..5f70e08 --- /dev/null +++ b/docs/app/deploy/mixpost/page.mdx @@ -0,0 +1,156 @@ +--- +title: "Deploy Mixpost Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Mixpost with Docker Compose. " +--- + +# Deploy Mixpost + +Self-hosted social media management software. + +
+ ⭐ 3.0k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Mixpost instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Mixpost and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + mixpost: + image: inovector/mixpost:latest + container_name: mixpost + restart: unless-stopped + depends_on: + - db + - redis + ports: + - "80:80" + environment: + - APP_URL=http://localhost + - DB_HOST=db + - DB_DATABASE=mixpost + - DB_USERNAME=mixpost + - DB_PASSWORD=mixpost + - REDIS_HOST=redis + + db: + image: mysql:8.0 + container_name: mixpost-db + restart: unless-stopped + environment: + - MYSQL_DATABASE=mixpost + - MYSQL_USER=mixpost + - MYSQL_PASSWORD=mixpost + - MYSQL_ROOT_PASSWORD=root + volumes: + - mixpost_db_data:/var/lib/mysql + + redis: + image: redis:alpine + container_name: mixpost-redis + restart: unless-stopped + +volumes: + mixpost_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/mixpost && cd /opt/mixpost + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `APP_URL` | `http://localhost` | No | +| `DB_HOST` | `db` | No | +| `DB_DATABASE` | `mixpost` | No | +| `DB_USERNAME` | `mixpost` | No | +| `DB_PASSWORD` | `mixpost` | No | +| `REDIS_HOST` | `redis` | No | +| `MYSQL_DATABASE` | `mixpost` | No | +| `MYSQL_USER` | `mixpost` | No | +| `MYSQL_PASSWORD` | `mixpost` | No | +| `MYSQL_ROOT_PASSWORD` | `root` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs mixpost | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Mixpost on AltStack Directory](https://thealtstack.com/alternative-to/mixpost) +- [Mixpost Self-Hosted Guide](https://thealtstack.com/self-hosted/mixpost) +- [Official Documentation](https://mixpost.app) +- [GitHub Repository](https://github.com/inovector/mixpost) diff --git a/docs/app/deploy/mochi-1/page.mdx b/docs/app/deploy/mochi-1/page.mdx new file mode 100644 index 0000000..ad3f3de --- /dev/null +++ b/docs/app/deploy/mochi-1/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Mochi-1 Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Mochi-1 with Docker Compose. " +--- + +# Deploy Mochi-1 + +High-fidelity open-weights video generation model from Genmo, rivaling closed-source alternatives. + +
+ ⭐ 5.0k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Mochi-1 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Mochi-1 and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + mochi-1: + image: genmo/mochi-1:latest + container_name: mochi-1 + restart: unless-stopped + ports: + - "8000:8000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/mochi-1 && cd /opt/mochi-1 + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs mochi-1 | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Mochi-1 on AltStack Directory](https://thealtstack.com/alternative-to/mochi-1) +- [Mochi-1 Self-Hosted Guide](https://thealtstack.com/self-hosted/mochi-1) +- [Official Documentation](https://www.genmo.ai) +- [GitHub Repository](https://github.com/genmoai/mochi1) diff --git a/docs/app/deploy/n8n/page.mdx b/docs/app/deploy/n8n/page.mdx new file mode 100644 index 0000000..b06e507 --- /dev/null +++ b/docs/app/deploy/n8n/page.mdx @@ -0,0 +1,138 @@ +--- +title: "Deploy n8n Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting n8n with Docker Compose. " +--- + +# Deploy n8n + +Fair-code workflow automation tool. Easily automate tasks across different services. + +
+ ⭐ 49.0k stars + 📜 Sustainable Use License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working n8n instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for n8n and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + n8n: + image: n8nio/n8n:latest + container_name: n8n + restart: unless-stopped + ports: + - "5678:5678" + environment: + - N8N_BASIC_AUTH_ACTIVE=true + - N8N_BASIC_AUTH_USER=admin + - N8N_BASIC_AUTH_PASSWORD=password + - N8N_HOST=localhost + - N8N_PORT=5678 + - N8N_PROTOCOL=http + - NODE_ENV=production + - WEBHOOK_URL=http://localhost:5678/ + volumes: + - n8n_data:/home/node/.n8n + +volumes: + n8n_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/n8n && cd /opt/n8n + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `N8N_BASIC_AUTH_ACTIVE` | `true` | No | +| `N8N_BASIC_AUTH_USER` | `admin` | No | +| `N8N_BASIC_AUTH_PASSWORD` | `password` | No | +| `N8N_HOST` | `localhost` | No | +| `N8N_PORT` | `5678` | No | +| `N8N_PROTOCOL` | `http` | No | +| `NODE_ENV` | `production` | No | +| `WEBHOOK_URL` | `http://localhost:5678/` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs n8n | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [n8n on AltStack Directory](https://thealtstack.com/alternative-to/n8n) +- [n8n Self-Hosted Guide](https://thealtstack.com/self-hosted/n8n) +- [Official Documentation](https://n8n.io) +- [GitHub Repository](https://github.com/n8n-io/n8n) diff --git a/docs/app/deploy/odoo/page.mdx b/docs/app/deploy/odoo/page.mdx new file mode 100644 index 0000000..ce53b35 --- /dev/null +++ b/docs/app/deploy/odoo/page.mdx @@ -0,0 +1,161 @@ +--- +title: "Deploy Odoo Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Odoo with Docker Compose. " +--- + +# Deploy Odoo + +A suite of open source business apps: CRM, eCommerce, accounting, manufacturing, warehouse, and more. + +
+ ⭐ 48.9k stars + 📜 LGPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Odoo instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Odoo and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for Odoo +version: '3.8' + +services: + odoo: + build: + context: . + dockerfile: Dockerfile + container_name: odoo + ports: + - "8069:8069" + environment: + - HOST=db + - USER=odoo + - PASSWORD=odoo + depends_on: + db: + condition: service_healthy + networks: + - odoo_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:8069/" ] + interval: 30s + timeout: 10s + retries: 3 + restart: unless-stopped + + db: + image: postgres:15-alpine + container_name: odoo-db + environment: + POSTGRES_USER: odoo + POSTGRES_PASSWORD: odoo + POSTGRES_DB: postgres + volumes: + - odoo_db_data:/var/lib/postgresql/data + networks: + - odoo_net + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U odoo" ] + interval: 5s + timeout: 5s + retries: 5 + +networks: + odoo_net: + driver: bridge + +volumes: + odoo_db_data: + name: odoo_db_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/odoo && cd /opt/odoo + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `HOST` | `db` | No | +| `USER` | `odoo` | No | +| `PASSWORD` | `odoo` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs odoo | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Odoo on AltStack Directory](https://thealtstack.com/alternative-to/odoo) +- [Odoo Self-Hosted Guide](https://thealtstack.com/self-hosted/odoo) +- [Official Documentation](https://www.odoo.com) +- [GitHub Repository](https://github.com/odoo/odoo) diff --git a/docs/app/deploy/ollama/page.mdx b/docs/app/deploy/ollama/page.mdx new file mode 100644 index 0000000..6224dc3 --- /dev/null +++ b/docs/app/deploy/ollama/page.mdx @@ -0,0 +1,137 @@ +--- +title: "Deploy Ollama Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Ollama with Docker Compose. " +--- + +# Deploy Ollama + +Get up and running with Llama 3, Mistral, Gemma, and other large language models locally. + +
+ ⭐ 60.0k stars + 📜 MIT License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Ollama instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Ollama and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for Ollama +version: '3.8' + +services: + ollama: + image: ollama/ollama:latest # Official image is highly recommended for GPU support + container_name: ollama + ports: + - "11434:11434" + volumes: + - ollama_data:/root/.ollama + # For GPU support (NVIDIA), uncomment the following: + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: all + # capabilities: [gpu] + networks: + - ollama_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:11434/api/tags" ] + interval: 10s + timeout: 5s + retries: 5 + +networks: + ollama_net: + driver: bridge + +volumes: + ollama_data: + name: ollama_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/ollama && cd /opt/ollama + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs ollama | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Ollama on AltStack Directory](https://thealtstack.com/alternative-to/ollama) +- [Ollama Self-Hosted Guide](https://thealtstack.com/self-hosted/ollama) +- [Official Documentation](https://ollama.com) +- [GitHub Repository](https://github.com/ollama/ollama) diff --git a/docs/app/deploy/onlyoffice/page.mdx b/docs/app/deploy/onlyoffice/page.mdx new file mode 100644 index 0000000..ceec9f0 --- /dev/null +++ b/docs/app/deploy/onlyoffice/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy ONLYOFFICE Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting ONLYOFFICE with Docker Compose. " +--- + +# Deploy ONLYOFFICE + +Powerful online document editors for text, spreadsheets, and presentations. Highly compatible with MS Office. + +
+ ⭐ 11.0k stars + 📜 AGPLv3 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working ONLYOFFICE instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for ONLYOFFICE and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + onlyoffice: + image: onlyoffice/documentserver:latest + container_name: onlyoffice + restart: unless-stopped + ports: + - "8080:80" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/onlyoffice && cd /opt/onlyoffice + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs onlyoffice | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [ONLYOFFICE on AltStack Directory](https://thealtstack.com/alternative-to/onlyoffice) +- [ONLYOFFICE Self-Hosted Guide](https://thealtstack.com/self-hosted/onlyoffice) +- [Official Documentation](https://www.onlyoffice.com) +- [GitHub Repository](https://github.com/ONLYOFFICE/DocumentServer) diff --git a/docs/app/deploy/orangehrm/page.mdx b/docs/app/deploy/orangehrm/page.mdx new file mode 100644 index 0000000..e9d96d4 --- /dev/null +++ b/docs/app/deploy/orangehrm/page.mdx @@ -0,0 +1,146 @@ +--- +title: "Deploy OrangeHRM Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting OrangeHRM with Docker Compose. " +--- + +# Deploy OrangeHRM + +The world's most popular open source human resource management software. + +
+ ⭐ 1.2k stars + 📜 GPLv2 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working OrangeHRM instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for OrangeHRM and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + orangehrm: + image: orangehrm/orangehrm:latest + container_name: orangehrm + restart: unless-stopped + depends_on: + - db + ports: + - "80:80" + environment: + - ORANGEHRM_DATABASE_HOST=db + - ORANGEHRM_DATABASE_USER=orangehrm + - ORANGEHRM_DATABASE_PASSWORD=orangehrm + - ORANGEHRM_DATABASE_NAME=orangehrm + + db: + image: mariadb:10.6 + container_name: orangehrm-db + restart: unless-stopped + environment: + - MYSQL_ROOT_PASSWORD=root + - MYSQL_USER=orangehrm + - MYSQL_PASSWORD=orangehrm + - MYSQL_DATABASE=orangehrm + volumes: + - orangehrm_db_data:/var/lib/mysql + +volumes: + orangehrm_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/orangehrm && cd /opt/orangehrm + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `ORANGEHRM_DATABASE_HOST` | `db` | No | +| `ORANGEHRM_DATABASE_USER` | `orangehrm` | No | +| `ORANGEHRM_DATABASE_PASSWORD` | `orangehrm` | No | +| `ORANGEHRM_DATABASE_NAME` | `orangehrm` | No | +| `MYSQL_ROOT_PASSWORD` | `root` | No | +| `MYSQL_USER` | `orangehrm` | No | +| `MYSQL_PASSWORD` | `orangehrm` | No | +| `MYSQL_DATABASE` | `orangehrm` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs orangehrm | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [OrangeHRM on AltStack Directory](https://thealtstack.com/alternative-to/orangehrm) +- [OrangeHRM Self-Hosted Guide](https://thealtstack.com/self-hosted/orangehrm) +- [Official Documentation](https://www.orangehrm.com) +- [GitHub Repository](https://github.com/orangehrm/orangehrm) diff --git a/docs/app/deploy/outline/page.mdx b/docs/app/deploy/outline/page.mdx new file mode 100644 index 0000000..b040f70 --- /dev/null +++ b/docs/app/deploy/outline/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Outline Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Outline with Docker Compose. " +--- + +# Deploy Outline + +Fast, collaborative, knowledge base for your team built using React and Markdown. + +
+ ⭐ 24.0k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Outline instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Outline and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + outline: + image: outlinewiki/outline:latest + container_name: outline + restart: unless-stopped + ports: + - "3000:3000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/outline && cd /opt/outline + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs outline | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Outline on AltStack Directory](https://thealtstack.com/alternative-to/outline) +- [Outline Self-Hosted Guide](https://thealtstack.com/self-hosted/outline) +- [Official Documentation](https://www.getoutline.com) +- [GitHub Repository](https://github.com/outline/outline) diff --git a/docs/app/deploy/page.mdx b/docs/app/deploy/page.mdx new file mode 100644 index 0000000..96635b5 --- /dev/null +++ b/docs/app/deploy/page.mdx @@ -0,0 +1,171 @@ +--- +title: "Deploy Guides" +description: "65+ self-hosting deploy guides with Docker Compose configs. Find your tool, copy the config, ship it." +--- + +# Deploy Guides + +Every guide follows the same pattern: **copy the Docker Compose config, tweak the `.env`, run `docker compose up -d`.** Done. + +> 💡 **New to self-hosting?** Start with the [Quick Start](/quick-start) guide first, then come back here. + +--- + +## 🤖 AI & Machine Learning + +Run AI models on your own hardware. No API keys, no usage limits, no data leaving your server. + +| Tool | What It Does | +|---|---| +| [Ollama](/deploy/ollama) | Run LLMs locally with a simple CLI | +| [DeepSeek](/deploy/deepseek) | DeepSeek-V3 / R1 reasoning models | +| [Meta Llama](/deploy/llama) | Meta's open-weight Llama 3.1 | +| [Mistral](/deploy/mistral) | Mistral Large 2 — fast and capable | +| [Qwen](/deploy/qwen) | Alibaba's Qwen 2.5 models | +| [Google Gemma](/deploy/gemma) | Google's lightweight open models | +| [GPT4All](/deploy/gpt4all) | Desktop-first local LLM runner | +| [Continue](/deploy/continue-dev) | AI code assistant for VS Code/JetBrains | +| [TabbyML](/deploy/tabby) | Self-hosted GitHub Copilot alternative | +| [Stable Diffusion](/deploy/stable-diffusion) | AI image generation (SD 3.5) | +| [FLUX](/deploy/flux) | Next-gen image generation | +| [HunyuanVideo](/deploy/hunyuan-video) | AI video generation | +| [Mochi-1](/deploy/mochi-1) | Text-to-video AI | + +--- + +## 📊 Analytics & Monitoring + +Own your data. No more sending user behavior to Google. + +| Tool | What It Does | +|---|---| +| [Plausible](/deploy/plausible) | Privacy-first web analytics | +| [PostHog](/deploy/posthog) | Product analytics + session replay | +| [Matomo](/deploy/matomo) | Full Google Analytics replacement | +| [Jitsu](/deploy/jitsu) | Open-source Segment alternative | +| [Metabase](/deploy/metabase) | Business intelligence dashboards | +| [Apache Superset](/deploy/superset) | Enterprise data visualization | +| [GlitchTip](/deploy/glitchtip) | Error tracking (Sentry alternative) | +| [SigNoz](/deploy/signoz) | Full-stack observability platform | +| [Uptime Kuma](/deploy/uptime-kuma) | Beautiful uptime monitoring | + +--- + +## 💬 Productivity & Collaboration + +Replace Slack, Notion, and Jira — on your terms. + +| Tool | What It Does | +|---|---| +| [Mattermost](/deploy/mattermost) | Slack alternative for teams | +| [Rocket.Chat](/deploy/rocketchat) | Team chat with omnichannel support | +| [Outline](/deploy/outline) | Beautiful team knowledge base | +| [AFFiNE](/deploy/affine) | Notion + Miro hybrid workspace | +| [AppFlowy](/deploy/appflowy) | Open-source Notion alternative | +| [ONLYOFFICE](/deploy/onlyoffice) | Self-hosted Google Docs/Sheets | +| [Plane](/deploy/plane) | Project management (Jira alternative) | +| [Taiga](/deploy/taiga) | Agile project management | +| [Cal.com](/deploy/calcom) | Scheduling (Calendly alternative) | +| [Documenso](/deploy/documenso) | Digital signatures (DocuSign alternative) | +| [Zammad](/deploy/zammad) | Helpdesk & ticketing system | + +--- + +## 🏢 Business & CRM + +Run your business without SaaS subscriptions. + +| Tool | What It Does | +|---|---| +| [Odoo](/deploy/odoo) | Full ERP suite (CRM, accounting, HR) | +| [ERPNext](/deploy/erpnext) | Manufacturing & distribution ERP | +| [Twenty](/deploy/twenty) | Modern CRM (Salesforce alternative) | +| [Akaunting](/deploy/akaunting) | Free accounting software | +| [OrangeHRM](/deploy/orangehrm) | HR management platform | +| [Medusa.js](/deploy/medusa) | Headless e-commerce engine | + +--- + +## 🔐 Security & Authentication + +Control who gets in. Period. + +| Tool | What It Does | +|---|---| +| [Keycloak](/deploy/keycloak) | Enterprise identity & access management | +| [Authentik](/deploy/authentik) | Modern SSO and user management | +| [Vaultwarden](/deploy/vaultwarden) | Bitwarden-compatible password vault | +| [Bitwarden](/deploy/bitwarden) | Official password manager server | +| [KeePassXC](/deploy/keepassxc) | Offline password manager | + +--- + +## ⚙️ DevOps & Infrastructure + +The tools that run your tools. + +| Tool | What It Does | +|---|---| +| [Coolify](/deploy/coolify) | Self-hosted Vercel/Netlify | +| [Dokku](/deploy/dokku) | Mini Heroku on your server | +| [n8n](/deploy/n8n) | Workflow automation (Zapier alternative) | +| [Activepieces](/deploy/activepieces) | Visual automation builder | +| [Coder](/deploy/coder) | Cloud development environments | +| [MinIO](/deploy/minio) | S3-compatible object storage | +| [PocketBase](/deploy/pocketbase) | Backend in a single binary | +| [Appwrite](/deploy/appwrite) | Firebase alternative | +| [Supabase](/deploy/supabase) | Postgres-powered Firebase alternative | + +--- + +## 📧 Marketing & Email + +Send emails, run campaigns, own your audience. + +| Tool | What It Does | +|---|---| +| [Listmonk](/deploy/listmonk) | Newsletter & mailing list manager | +| [Mautic](/deploy/mautic) | Marketing automation platform | +| [Postal](/deploy/postal) | Mail delivery platform (Mailgun alternative) | +| [Mixpost](/deploy/mixpost) | Social media management | +| [Chaskiq](/deploy/chaskiq) | Customer messaging platform | + +--- + +## 🎨 Creative Tools + +Design, edit, and create without Adobe subscriptions. + +| Tool | What It Does | +|---|---| +| [Penpot](/deploy/penpot) | Design & prototyping (Figma alternative) | +| [GIMP](/deploy/gimp) | Image editing (Photoshop alternative) | +| [Krita](/deploy/krita) | Digital painting & illustration | +| [Kdenlive](/deploy/kdenlive) | Video editing | +| [FreeCAD](/deploy/freecad) | 3D parametric modeling | +| [LibreCAD](/deploy/librecad) | 2D CAD drafting | + +--- + +## 🔌 Communication + +| Tool | What It Does | +|---|---| +| [Jitsi Meet](/deploy/jitsi-meet) | Video conferencing (Zoom alternative) | + +--- + +## Prerequisites for All Guides + +Every guide assumes you have: +- A server with Docker and Docker Compose installed → [Setup Guide](/quick-start/choosing-a-server) +- Basic terminal access (SSH) +- A domain name (optional but recommended) → [Reverse Proxy Setup](/concepts/reverse-proxies) + +## Essential Reading + +Before your first deploy, read these: +- [Docker in 10 Minutes](/concepts/docker-basics) +- [Reverse Proxies Explained](/concepts/reverse-proxies) +- [SSL/TLS for Self-Hosters](/concepts/ssl-tls) +- [Backups That Actually Work](/concepts/backups) diff --git a/docs/app/deploy/penpot/page.mdx b/docs/app/deploy/penpot/page.mdx new file mode 100644 index 0000000..fdfe275 --- /dev/null +++ b/docs/app/deploy/penpot/page.mdx @@ -0,0 +1,185 @@ +--- +title: "Deploy Penpot Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Penpot with Docker Compose. " +--- + +# Deploy Penpot + +Penpot: The open-source design tool for design and code collaboration + +
+ ⭐ 44.2k stars + 📜 Mozilla Public License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Penpot instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Penpot and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + penpot-frontend: + image: penpotapp/frontend:latest + container_name: penpot-frontend + restart: unless-stopped + depends_on: + - penpot-backend + - penpot-exporter + ports: + - "9010:80" + environment: + - PENPOT_FLAGS=disable-registration disable-login-with-password + volumes: + - penpot_assets:/opt/data/assets + + penpot-backend: + image: penpotapp/backend:latest + container_name: penpot-backend + restart: unless-stopped + depends_on: + - penpot-postgres + - penpot-redis + environment: + - PENPOT_FLAGS=disable-registration disable-login-with-password + - PENPOT_DATABASE_URI=postgresql://penpot-postgres/penpot + - PENPOT_DATABASE_USERNAME=penpot + - PENPOT_DATABASE_PASSWORD=penpot + - PENPOT_REDIS_URI=redis://penpot-redis/0 + - PENPOT_ASSETS_STORAGE_BACKEND=assets-fs + - PENPOT_STORAGE_ASSETS_FS_DIRECTORY=/opt/data/assets + - PENPOT_TELEMETRY_ENABLED=false + volumes: + - penpot_assets:/opt/data/assets + + penpot-exporter: + image: penpotapp/exporter:latest + container_name: penpot-exporter + restart: unless-stopped + environment: + - PENPOT_DATABASE_URI=postgresql://penpot-postgres/penpot + - PENPOT_DATABASE_USERNAME=penpot + - PENPOT_DATABASE_PASSWORD=penpot + - PENPOT_REDIS_URI=redis://penpot-redis/0 + + penpot-postgres: + image: postgres:15 + container_name: penpot-postgres + restart: unless-stopped + environment: + - POSTGRES_INITDB_ARGS=--data-checksums + - POSTGRES_DB=penpot + - POSTGRES_USER=penpot + - POSTGRES_PASSWORD=penpot + volumes: + - penpot_postgres_v15:/var/lib/postgresql/data + + penpot-redis: + image: redis:7 + container_name: penpot-redis + restart: unless-stopped + +volumes: + penpot_postgres_v15: + penpot_assets: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/penpot && cd /opt/penpot + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `PENPOT_FLAGS` | `disable-registration disable-login-with-password` | No | +| `PENPOT_DATABASE_URI` | `postgresql://penpot-postgres/penpot` | No | +| `PENPOT_DATABASE_USERNAME` | `penpot` | No | +| `PENPOT_DATABASE_PASSWORD` | `penpot` | No | +| `PENPOT_REDIS_URI` | `redis://penpot-redis/0` | No | +| `PENPOT_ASSETS_STORAGE_BACKEND` | `assets-fs` | No | +| `PENPOT_STORAGE_ASSETS_FS_DIRECTORY` | `/opt/data/assets` | No | +| `PENPOT_TELEMETRY_ENABLED` | `false` | No | +| `POSTGRES_INITDB_ARGS` | `--data-checksums` | No | +| `POSTGRES_DB` | `penpot` | No | +| `POSTGRES_USER` | `penpot` | No | +| `POSTGRES_PASSWORD` | `penpot` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs penpot | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Penpot on AltStack Directory](https://thealtstack.com/alternative-to/penpot) +- [Penpot Self-Hosted Guide](https://thealtstack.com/self-hosted/penpot) +- [Official Documentation](https://penpot.app) +- [GitHub Repository](https://github.com/penpot/penpot) diff --git a/docs/app/deploy/plane/page.mdx b/docs/app/deploy/plane/page.mdx new file mode 100644 index 0000000..7240a19 --- /dev/null +++ b/docs/app/deploy/plane/page.mdx @@ -0,0 +1,160 @@ +--- +title: "Deploy Plane Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Plane with Docker Compose. " +--- + +# Deploy Plane + +🔥🔥🔥 Open-source Jira, Linear, Monday, and ClickUp alternative. Plane is a modern project management platform to manage tasks, sprints, docs, and triage. + +
+ ⭐ 45.5k stars + 📜 GNU Affero General Public License v3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Plane instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Plane and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + plane-web: + image: makeplane/plane-frontend:latest + container_name: plane-frontend + restart: unless-stopped + depends_on: + - plane-backend + ports: + - "3000:80" + + plane-backend: + image: makeplane/plane-backend:latest + container_name: plane-backend + restart: unless-stopped + depends_on: + - plane-db + - plane-redis + ports: + - "8000:8000" + environment: + - DATABASE_URL=postgres://plane:plane@plane-db:5432/plane + - REDIS_URL=redis://plane-redis:6379/ + - SECRET_KEY=replace-me-with-a-secure-key + + plane-db: + image: postgres:15-alpine + container_name: plane-db + restart: unless-stopped + environment: + - POSTGRES_USER=plane + - POSTGRES_PASSWORD=plane + - POSTGRES_DB=plane + volumes: + - plane_db_data:/var/lib/postgresql/data + + plane-redis: + image: redis:7-alpine + container_name: plane-redis + restart: unless-stopped + volumes: + - plane_redis_data:/data + +volumes: + plane_db_data: + plane_redis_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/plane && cd /opt/plane + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `DATABASE_URL` | `postgres://plane:plane@plane-db:5432/plane` | No | +| `REDIS_URL` | `redis://plane-redis:6379/` | No | +| `SECRET_KEY` | `replace-me-with-a-secure-key` | No | +| `POSTGRES_USER` | `plane` | No | +| `POSTGRES_PASSWORD` | `plane` | No | +| `POSTGRES_DB` | `plane` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs plane | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Plane on AltStack Directory](https://thealtstack.com/alternative-to/plane) +- [Plane Self-Hosted Guide](https://thealtstack.com/self-hosted/plane) +- [Official Documentation](https://plane.so) +- [GitHub Repository](https://github.com/makeplane/plane) diff --git a/docs/app/deploy/plausible/page.mdx b/docs/app/deploy/plausible/page.mdx new file mode 100644 index 0000000..007337b --- /dev/null +++ b/docs/app/deploy/plausible/page.mdx @@ -0,0 +1,179 @@ +--- +title: "Deploy Plausible Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Plausible with Docker Compose. " +--- + +# Deploy Plausible + +Simple, open source, lightweight and privacy-friendly web analytics alternative to Google Analytics. + +
+ ⭐ 24.2k stars + 📜 GNU Affero General Public License v3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +A production-ready Plausible Analytics instance. Note that Plausible uses a two-database architecture: + +- **PostgreSQL:** Stores your users, sites, and metadata. +- **ClickHouse:** A high-performance columnar database that stores the millions of raw events (pageviews) you'll be collecting. + +> 🌍 **Geolocation Tip:** To see where your visitors are coming from, you'll need to download the free MaxMind GeoLite2 database after deployment and place it in the `./geoip` folder. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Plausible and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + plausible: + image: plausible/analytics:latest + container_name: plausible + restart: unless-stopped + command: sh -c "sleep 10 && /entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh run" + depends_on: + - plausible_db + - plausible_events_db + - mail + ports: + - "8000:8000" + environment: + - BASE_URL=http://localhost:8000 + - SECRET_KEY_BASE=ChangeMeChangeMeChangeMeChangeMeChangeMeChangeMeChangeMeChangeMe + - DATABASE_URL=postgres://postgres:postgres@plausible_db:5432/plausible_db + - CLICKHOUSE_DATABASE_URL=http://plausible_events_db:8123/plausible_events_db + - MAILER_EMAIL=admin@example.com + - SMTP_HOST_ADDR=mail + - SMTP_HOST_PORT=25 + - SMTP_USER_NAME= + - SMTP_USER_PWD= + - SMTP_SSL_Enabled=false + volumes: + - ./geoip:/geoip:ro + + plausible_db: + image: postgres:14-alpine + container_name: plausible_db + restart: unless-stopped + volumes: + - plausible_db_data:/var/lib/postgresql/data + environment: + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=plausible_db + + plausible_events_db: + image: clickhouse/clickhouse-server:24.3.3.102-alpine + container_name: plausible_events_db + restart: unless-stopped + volumes: + - plausible_events_data:/var/lib/clickhouse + - ./clickhouse/clickhouse-config.xml:/etc/clickhouse-server/config.d/logging.xml:ro + - ./clickhouse/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/logging.xml:ro + ulimits: + nofile: + soft: 262144 + hard: 262144 + + mail: + image: bytemark/smtp + container_name: plausible_mail + restart: unless-stopped + +volumes: + plausible_db_data: + plausible_events_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/plausible && cd /opt/plausible + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `BASE_URL` | `http://localhost:8000` | No | +| `SECRET_KEY_BASE` | `ChangeMeChangeMeChangeMeChangeMeChangeMeChangeMeChangeMeChangeMe` | No | +| `DATABASE_URL` | `postgres://postgres:postgres@plausible_db:5432/plausible_db` | No | +| `CLICKHOUSE_DATABASE_URL` | `http://plausible_events_db:8123/plausible_events_db` | No | +| `MAILER_EMAIL` | `admin@example.com` | No | +| `SMTP_HOST_ADDR` | `mail` | No | +| `SMTP_HOST_PORT` | `25` | No | +| `SMTP_SSL_Enabled` | `false` | No | +| `POSTGRES_PASSWORD` | `postgres` | No | +| `POSTGRES_DB` | `plausible_db` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs plausible | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Plausible on AltStack Directory](https://thealtstack.com/alternative-to/plausible) +- [Plausible Self-Hosted Guide](https://thealtstack.com/self-hosted/plausible) +- [Official Documentation](https://plausible.io) +- [GitHub Repository](https://github.com/plausible/analytics) diff --git a/docs/app/deploy/pocketbase/page.mdx b/docs/app/deploy/pocketbase/page.mdx new file mode 100644 index 0000000..fe276df --- /dev/null +++ b/docs/app/deploy/pocketbase/page.mdx @@ -0,0 +1,118 @@ +--- +title: "Deploy PocketBase Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting PocketBase with Docker Compose. " +--- + +# Deploy PocketBase + +Open Source realtime backend in 1 file + +
+ ⭐ 56.0k stars + 📜 MIT License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working PocketBase instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for PocketBase and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + pocketbase: + image: pocketbase/pocketbase:latest + container_name: pocketbase + restart: unless-stopped + command: serve --http=0.0.0.0:8090 + ports: + - "8090:8090" + volumes: + - pb_data:/pb/pb_data + +volumes: + pb_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/pocketbase && cd /opt/pocketbase + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs pocketbase | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [PocketBase on AltStack Directory](https://thealtstack.com/alternative-to/pocketbase) +- [PocketBase Self-Hosted Guide](https://thealtstack.com/self-hosted/pocketbase) +- [Official Documentation](https://pocketbase.io) +- [GitHub Repository](https://github.com/pocketbase/pocketbase) diff --git a/docs/app/deploy/postal/page.mdx b/docs/app/deploy/postal/page.mdx new file mode 100644 index 0000000..bb471d0 --- /dev/null +++ b/docs/app/deploy/postal/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Postal Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Postal with Docker Compose. " +--- + +# Deploy Postal + +A fully featured open source mail delivery platform for incoming & outgoing e-mail. + +
+ ⭐ 15.0k stars + 📜 MIT + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Postal instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Postal and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + postal: + image: postalserver/postal:latest + container_name: postal + restart: unless-stopped + ports: + - "5000:5000" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/postal && cd /opt/postal + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs postal | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Postal on AltStack Directory](https://thealtstack.com/alternative-to/postal) +- [Postal Self-Hosted Guide](https://thealtstack.com/self-hosted/postal) +- [Official Documentation](https://postalserver.io) +- [GitHub Repository](https://github.com/postalserver/postal) diff --git a/docs/app/deploy/posthog/page.mdx b/docs/app/deploy/posthog/page.mdx new file mode 100644 index 0000000..8c8d6e4 --- /dev/null +++ b/docs/app/deploy/posthog/page.mdx @@ -0,0 +1,199 @@ +--- +title: "Deploy PostHog Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting PostHog with Docker Compose. " +--- + +# Deploy PostHog + +🦔 PostHog is an all-in-one developer platform for building successful products. We offer product analytics, web analytics, session replay, error tracking, feature flags, experimentation, surveys, data warehouse, a CDP, and an AI product assistant to help debug your code, ship features faster, and keep all your usage and customer data in one stack. + +
+ ⭐ 31.2k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working PostHog instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for PostHog and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + db: + image: postgres:14-alpine + container_name: posthog-db + restart: unless-stopped + environment: + - POSTGRES_PASSWORD=posthog + - POSTGRES_DB=posthog + - POSTGRES_USER=posthog + volumes: + - posthog_postgres_data:/var/lib/postgresql/data + + redis: + image: redis:6-alpine + container_name: posthog-redis + restart: unless-stopped + volumes: + - posthog_redis_data:/data + + clickhouse: + image: clickhouse/clickhouse-server:22.3-alpine + container_name: posthog-clickhouse + restart: unless-stopped + environment: + - CLICKHOUSE_DB=posthog + - CLICKHOUSE_USER=default + - CLICKHOUSE_PASSWORD= + volumes: + - posthog_clickhouse_data:/var/lib/clickhouse + + kafka: + image: confluentinc/cp-kafka:7.5.3 + container_name: posthog-kafka + restart: unless-stopped + depends_on: + - zookeeper + environment: + - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092 + - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 + + zookeeper: + image: confluentinc/cp-zookeeper:7.5.3 + container_name: posthog-zookeeper + restart: unless-stopped + environment: + - ZOOKEEPER_CLIENT_PORT=2181 + - ZOOKEEPER_TICK_TIME=2000 + + posthog: + image: posthog/posthog:release-1.40.0 + container_name: posthog + restart: unless-stopped + depends_on: + - db + - redis + - clickhouse + - kafka + ports: + - "8000:8000" + environment: + - DATABASE_URL=postgres://posthog:posthog@db:5432/posthog + - REDIS_URL=redis://redis:6379/ + - CLICKHOUSE_HOST=clickhouse + - KAFKA_HOSTS=kafka:9092 + - SECRET_KEY=please-change-this-secret-key-in-production-12345 + - SKIP_SERVICE_VERSION_REQUIREMENTS=1 + volumes: + - ./uploads:/app/static/uploads + +volumes: + posthog_postgres_data: + posthog_redis_data: + posthog_clickhouse_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/posthog && cd /opt/posthog + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `POSTGRES_PASSWORD` | `posthog` | No | +| `POSTGRES_DB` | `posthog` | No | +| `POSTGRES_USER` | `posthog` | No | +| `CLICKHOUSE_DB` | `posthog` | No | +| `CLICKHOUSE_USER` | `default` | No | +| `KAFKA_ZOOKEEPER_CONNECT` | `zookeeper:2181` | No | +| `KAFKA_ADVERTISED_LISTENERS` | `PLAINTEXT://kafka:9092` | No | +| `KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR` | `1` | No | +| `ZOOKEEPER_CLIENT_PORT` | `2181` | No | +| `ZOOKEEPER_TICK_TIME` | `2000` | No | +| `DATABASE_URL` | `postgres://posthog:posthog@db:5432/posthog` | No | +| `REDIS_URL` | `redis://redis:6379/` | No | +| `CLICKHOUSE_HOST` | `clickhouse` | No | +| `KAFKA_HOSTS` | `kafka:9092` | No | +| `SECRET_KEY` | `please-change-this-secret-key-in-production-12345` | No | +| `SKIP_SERVICE_VERSION_REQUIREMENTS` | `1` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs posthog | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [PostHog on AltStack Directory](https://thealtstack.com/alternative-to/posthog) +- [PostHog Self-Hosted Guide](https://thealtstack.com/self-hosted/posthog) +- [Official Documentation](https://posthog.com) +- [GitHub Repository](https://github.com/PostHog/posthog) diff --git a/docs/app/deploy/qwen/page.mdx b/docs/app/deploy/qwen/page.mdx new file mode 100644 index 0000000..99da2e3 --- /dev/null +++ b/docs/app/deploy/qwen/page.mdx @@ -0,0 +1,117 @@ +--- +title: "Deploy Qwen 2.5 Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Qwen 2.5 with Docker Compose. " +--- + +# Deploy Qwen 2.5 + +Comprehensive LLM series from Alibaba Cloud, excelling in coding, math, and multilingual support. + +
+ ⭐ 50.0k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Qwen 2.5 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Qwen 2.5 and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + ollama-qwen: + image: ollama/ollama:latest + container_name: ollama-qwen + restart: unless-stopped + ports: + - "11438:11434" + volumes: + - ollama_qwen:/root/.ollama + +volumes: + ollama_qwen: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/qwen && cd /opt/qwen + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs qwen | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Qwen 2.5 on AltStack Directory](https://thealtstack.com/alternative-to/qwen) +- [Qwen 2.5 Self-Hosted Guide](https://thealtstack.com/self-hosted/qwen) +- [Official Documentation](https://qwenlm.github.io) +- [GitHub Repository](https://github.com/QwenLM/Qwen2.5) diff --git a/docs/app/deploy/rocketchat/page.mdx b/docs/app/deploy/rocketchat/page.mdx new file mode 100644 index 0000000..91f01c6 --- /dev/null +++ b/docs/app/deploy/rocketchat/page.mdx @@ -0,0 +1,144 @@ +--- +title: "Deploy Rocket.Chat Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Rocket.Chat with Docker Compose. " +--- + +# Deploy Rocket.Chat + +The Secure CommsOS™ for mission-critical operations + +
+ ⭐ 44.5k stars + 📜 Other + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Rocket.Chat instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Rocket.Chat and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + rocketchat: + image: registry.rocket.chat/rocketchat/rocket.chat:latest + container_name: rocketchat + restart: unless-stopped + depends_on: + - mongo + ports: + - "3002:3000" + environment: + - MONGO_URL=mongodb://mongo:27017/rocketchat + - ROOT_URL=http://localhost:3002 + - PORT=3000 + + mongo: + image: mongo:5.0 + container_name: rocketchat-mongo + restart: unless-stopped + command: mongod --oplogSize 128 --replSet rs0 --storageEngine=wiredTiger + volumes: + - ./data/db:/data/db + + mongo-init-replica: + image: mongo:5.0 + container_name: mongo-init-replica + restart: unless-stopped + command: bash /init-replica.sh + depends_on: + - mongo + volumes: + - ./init-replica.sh:/init-replica.sh +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/rocketchat && cd /opt/rocketchat + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `MONGO_URL` | `mongodb://mongo:27017/rocketchat` | No | +| `ROOT_URL` | `http://localhost:3002` | No | +| `PORT` | `3000` | No | +| `storageEngine` | `wiredTiger` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs rocketchat | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Rocket.Chat on AltStack Directory](https://thealtstack.com/alternative-to/rocketchat) +- [Rocket.Chat Self-Hosted Guide](https://thealtstack.com/self-hosted/rocketchat) +- [Official Documentation](https://rocket.chat) +- [GitHub Repository](https://github.com/RocketChat/Rocket.Chat) diff --git a/docs/app/deploy/signoz/page.mdx b/docs/app/deploy/signoz/page.mdx new file mode 100644 index 0000000..43bfe7b --- /dev/null +++ b/docs/app/deploy/signoz/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy SigNoz Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting SigNoz with Docker Compose. " +--- + +# Deploy SigNoz + +Open source observability platform. SigNoz helps developers monitor applications and troubleshoot problems. + +
+ ⭐ 18.0k stars + 📜 MIT + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working SigNoz instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for SigNoz and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + signoz-frontend: + image: signoz/frontend:latest + container_name: signoz-frontend + restart: unless-stopped + ports: + - "3301:3301" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/signoz && cd /opt/signoz + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs signoz | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [SigNoz on AltStack Directory](https://thealtstack.com/alternative-to/signoz) +- [SigNoz Self-Hosted Guide](https://thealtstack.com/self-hosted/signoz) +- [Official Documentation](https://signoz.io) +- [GitHub Repository](https://github.com/signoz/signoz) diff --git a/docs/app/deploy/stable-diffusion/page.mdx b/docs/app/deploy/stable-diffusion/page.mdx new file mode 100644 index 0000000..2173147 --- /dev/null +++ b/docs/app/deploy/stable-diffusion/page.mdx @@ -0,0 +1,112 @@ +--- +title: "Deploy Stable Diffusion 3.5 Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Stable Diffusion 3.5 with Docker Compose. " +--- + +# Deploy Stable Diffusion 3.5 + +The latest open-weights image generation model from Stability AI, offering superior prompt adherence. + +
+ ⭐ 10.0k stars + 📜 Stability Community License + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Stable Diffusion 3.5 instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Stable Diffusion 3.5 and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + sd-webui: + image: automatic1111/stable-diffusion-webui:latest + container_name: sd-webui + restart: unless-stopped + ports: + - "7860:7860" +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/stable-diffusion && cd /opt/stable-diffusion + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs stable-diffusion | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Stable Diffusion 3.5 on AltStack Directory](https://thealtstack.com/alternative-to/stable-diffusion) +- [Stable Diffusion 3.5 Self-Hosted Guide](https://thealtstack.com/self-hosted/stable-diffusion) +- [Official Documentation](https://stability.ai) +- [GitHub Repository](https://github.com/Stability-AI/sd3.5) diff --git a/docs/app/deploy/supabase/page.mdx b/docs/app/deploy/supabase/page.mdx new file mode 100644 index 0000000..6d209c2 --- /dev/null +++ b/docs/app/deploy/supabase/page.mdx @@ -0,0 +1,208 @@ +--- +title: "Deploy Supabase Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Supabase with Docker Compose. " +--- + +# Deploy Supabase + +The Postgres development platform. Supabase gives you a dedicated Postgres database to build your web, mobile, and AI applications. + +
+ ⭐ 97.4k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +A fully working Supabase instance running on your server. This isn't just a database; it's a full backend-as-a-service including: + +- **PostgreSQL:** The world's most advanced relational database. +- **GoTrue:** User management and JWT-based authentication. +- **PostgREST:** Turns your database into a RESTful API automatically. +- **Realtime:** Listen to database changes via WebSockets. +- **Storage:** S3-compatible file storage. + +> ⚠️ **Critical Security Note:** The default configuration uses "postgres" as the password and a temporary JWT secret. You MUST change these in your `.env` file before exposing this to the internet. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Supabase and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Supabase Production-Ready Docker Compose +# Note: Supabase is a collection of services. Official images are the standard. +# This setup includes the core services: PostgREST, GoTrue, Realtime, Storage, and PostgreSQL. + +version: '3.8' + +services: + db: + container_name: supabase-db + image: supabase/postgres:15.1.1.78 + command: postgres -c config_file=/etc/postgresql/postgresql.conf -c log_min_messages=fatal + healthcheck: + test: ["CMD", "pg_isready", "-U", "postgres"] + interval: 5s + timeout: 5s + retries: 3 + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres} + PGDATA: /var/lib/postgresql/data/pgdata + volumes: + - supabase_db_data:/var/lib/postgresql/data + networks: + - supabase_net + + auth: + container_name: supabase-auth + image: supabase/gotrue:v2.143.0 + depends_on: + db: + condition: service_healthy + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9999/health"] + interval: 5s + timeout: 5s + retries: 3 + environment: + GOTRUE_DB_DRIVER: postgres + GOTRUE_DB_DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres?sslmode=disable + GOTRUE_SITE_URL: ${SITE_URL:-http://localhost:3000} + GOTRUE_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-don-not-use-in-prod} + networks: + - supabase_net + + rest: + container_name: supabase-rest + image: postgrest/postgrest:v11.2.2 + depends_on: + db: + condition: service_healthy + environment: + PGRST_DB_URI: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres + PGRST_DB_SCHEMA: public + PGRST_DB_ANON_ROLE: anon + networks: + - supabase_net + + realtime: + container_name: supabase-realtime + image: supabase/realtime:v2.25.56 + depends_on: + db: + condition: service_healthy + environment: + DB_HOST: db + DB_PASSWORD: ${POSTGRES_PASSWORD:-postgres} + JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-don-not-use-in-prod} + networks: + - supabase_net + + storage: + container_name: supabase-storage + image: supabase/storage-api:v0.43.12 + depends_on: + db: + condition: service_healthy + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_KEY} + PGRST_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-don-not-use-in-prod} + DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres + networks: + - supabase_net + +networks: + supabase_net: + driver: bridge + +volumes: + supabase_db_data: + name: supabase_db_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/supabase && cd /opt/supabase + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `POSTGRES_PASSWORD` | `postgres` | No | +| `SITE_URL` | `http://localhost:3000` | No | +| `JWT_SECRET` | `super-secret-jwt-token-don-not-use-in-prod` | No | +| `ANON_KEY` | `—` | ✅ Yes | +| `SERVICE_KEY` | `—` | ✅ Yes | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs supabase | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Supabase on AltStack Directory](https://thealtstack.com/alternative-to/supabase) +- [Supabase Self-Hosted Guide](https://thealtstack.com/self-hosted/supabase) +- [Official Documentation](https://supabase.com) +- [GitHub Repository](https://github.com/supabase/supabase) diff --git a/docs/app/deploy/superset/page.mdx b/docs/app/deploy/superset/page.mdx new file mode 100644 index 0000000..c678499 --- /dev/null +++ b/docs/app/deploy/superset/page.mdx @@ -0,0 +1,171 @@ +--- +title: "Deploy Apache Superset Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Apache Superset with Docker Compose. " +--- + +# Deploy Apache Superset + +Enterprise-ready business intelligence web application. + +
+ ⭐ 59.0k stars + 📜 Apache 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Apache Superset instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Apache Superset and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for Apache Superset +version: '3.8' + +services: + superset: + build: + context: . + dockerfile: Dockerfile + container_name: superset + ports: + - "8088:8088" + environment: + - DATABASE_URL=postgresql://superset:superset@db:5432/superset + - REDIS_URL=redis://redis:6379 + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + networks: + - superset_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:8088/health" ] + interval: 30s + timeout: 10s + retries: 3 + + db: + image: postgres:15-alpine + container_name: superset-db + environment: + POSTGRES_USER: superset + POSTGRES_PASSWORD: superset + POSTGRES_DB: superset + volumes: + - superset_db_data:/var/lib/postgresql/data + networks: + - superset_net + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U superset" ] + interval: 5s + timeout: 5s + retries: 5 + + redis: + image: redis:7-alpine + container_name: superset-redis + networks: + - superset_net + healthcheck: + test: [ "CMD", "redis-cli", "ping" ] + interval: 5s + timeout: 5s + retries: 5 + +networks: + superset_net: + driver: bridge + +volumes: + superset_db_data: + name: superset_db_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/superset && cd /opt/superset + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `DATABASE_URL` | `postgresql://superset:superset@db:5432/superset` | No | +| `REDIS_URL` | `redis://redis:6379` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs superset | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Apache Superset on AltStack Directory](https://thealtstack.com/alternative-to/superset) +- [Apache Superset Self-Hosted Guide](https://thealtstack.com/self-hosted/superset) +- [Official Documentation](https://superset.apache.org) +- [GitHub Repository](https://github.com/apache/superset) diff --git a/docs/app/deploy/tabby/page.mdx b/docs/app/deploy/tabby/page.mdx new file mode 100644 index 0000000..596cc51 --- /dev/null +++ b/docs/app/deploy/tabby/page.mdx @@ -0,0 +1,117 @@ +--- +title: "Deploy TabbyML Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting TabbyML with Docker Compose. " +--- + +# Deploy TabbyML + +Self-hosted AI coding assistant. An open-source, self-hosted alternative to GitHub Copilot. + +
+ ⭐ 25.0k stars + 📜 Apache License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working TabbyML instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for TabbyML and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + tabby: + image: tabbyml/tabby:latest + container_name: tabby + restart: unless-stopped + ports: + - "8080:8080" + volumes: + - tabby-data:/data + +volumes: + tabby-data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/tabby && cd /opt/tabby + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs tabby | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [TabbyML on AltStack Directory](https://thealtstack.com/alternative-to/tabby) +- [TabbyML Self-Hosted Guide](https://thealtstack.com/self-hosted/tabby) +- [Official Documentation](https://tabby.tabbyml.com) +- [GitHub Repository](https://github.com/TabbyML/tabby) diff --git a/docs/app/deploy/taiga/page.mdx b/docs/app/deploy/taiga/page.mdx new file mode 100644 index 0000000..38e41af --- /dev/null +++ b/docs/app/deploy/taiga/page.mdx @@ -0,0 +1,172 @@ +--- +title: "Deploy Taiga Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Taiga with Docker Compose. " +--- + +# Deploy Taiga + +Self-host Taiga on your own server. + +
+ ⭐ 0.8k stars + 📜 Mozilla Public License 2.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Taiga instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Taiga and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + taiga-front: + image: taigaio/taiga-front:latest + container_name: taiga-front + restart: unless-stopped + depends_on: + - taiga-back + ports: + - "9000:80" + environment: + - TAIGA_URL=http://localhost:9000 + - TAIGA_WEBSOCKETS_URL=ws://localhost:9000 + + taiga-back: + image: taigaio/taiga-back:latest + container_name: taiga-back + restart: unless-stopped + depends_on: + - taiga-db + - taiga-redis + - taiga-async-rabbitmq + environment: + - POSTGRES_DB=taiga + - POSTGRES_USER=taiga + - POSTGRES_PASSWORD=taiga + - TAIGA_SECRET_KEY=exe3quu8Su2wohx0uNgo0eif4wohphah + + taiga-db: + image: postgres:13-alpine + container_name: taiga-db + restart: unless-stopped + environment: + - POSTGRES_DB=taiga + - POSTGRES_USER=taiga + - POSTGRES_PASSWORD=taiga + volumes: + - taiga_db_data:/var/lib/postgresql/data + + taiga-async-rabbitmq: + image: rabbitmq:3.8-management-alpine + container_name: taiga-rabbitmq + restart: unless-stopped + environment: + - RABBITMQ_ERLANG_COOKIE=secret-cookie + - RABBITMQ_DEFAULT_USER=taiga + - RABBITMQ_DEFAULT_PASS=taiga + + taiga-redis: + image: redis:6-alpine + container_name: taiga-redis + restart: unless-stopped + +volumes: + taiga_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/taiga && cd /opt/taiga + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `TAIGA_URL` | `http://localhost:9000` | No | +| `TAIGA_WEBSOCKETS_URL` | `ws://localhost:9000` | No | +| `POSTGRES_DB` | `taiga` | No | +| `POSTGRES_USER` | `taiga` | No | +| `POSTGRES_PASSWORD` | `taiga` | No | +| `TAIGA_SECRET_KEY` | `exe3quu8Su2wohx0uNgo0eif4wohphah` | No | +| `RABBITMQ_ERLANG_COOKIE` | `secret-cookie` | No | +| `RABBITMQ_DEFAULT_USER` | `taiga` | No | +| `RABBITMQ_DEFAULT_PASS` | `taiga` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs taiga | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Taiga on AltStack Directory](https://thealtstack.com/alternative-to/taiga) +- [Taiga Self-Hosted Guide](https://thealtstack.com/self-hosted/taiga) +- [Official Documentation](https://taiga.io) +- [GitHub Repository](https://github.com/taigaio/taiga-back) diff --git a/docs/app/deploy/twenty/page.mdx b/docs/app/deploy/twenty/page.mdx new file mode 100644 index 0000000..510ced1 --- /dev/null +++ b/docs/app/deploy/twenty/page.mdx @@ -0,0 +1,140 @@ +--- +title: "Deploy Twenty Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Twenty with Docker Compose. " +--- + +# Deploy Twenty + +A modern open-source CRM alternative to Salesforce and Pipedrive. + +
+ ⭐ 15.0k stars + 📜 AGPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Twenty instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Twenty and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + twenty: + image: twentyhq/twenty:latest + container_name: twenty + restart: unless-stopped + depends_on: + - db + ports: + - "3000:3000" + environment: + - PG_DATABASE_URL=postgres://twenty:twenty@db:5432/twenty + - FRONTEND_URL=http://localhost:3000 + + db: + image: postgres:15-alpine + container_name: twenty-db + restart: unless-stopped + environment: + - POSTGRES_USER=twenty + - POSTGRES_PASSWORD=twenty + - POSTGRES_DB=twenty + volumes: + - twenty_db_data:/var/lib/postgresql/data + +volumes: + twenty_db_data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/twenty && cd /opt/twenty + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `PG_DATABASE_URL` | `postgres://twenty:twenty@db:5432/twenty` | No | +| `FRONTEND_URL` | `http://localhost:3000` | No | +| `POSTGRES_USER` | `twenty` | No | +| `POSTGRES_PASSWORD` | `twenty` | No | +| `POSTGRES_DB` | `twenty` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs twenty | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Twenty on AltStack Directory](https://thealtstack.com/alternative-to/twenty) +- [Twenty Self-Hosted Guide](https://thealtstack.com/self-hosted/twenty) +- [Official Documentation](https://twenty.com) +- [GitHub Repository](https://github.com/twentyhq/twenty) diff --git a/docs/app/deploy/uptime-kuma/page.mdx b/docs/app/deploy/uptime-kuma/page.mdx new file mode 100644 index 0000000..5dd6061 --- /dev/null +++ b/docs/app/deploy/uptime-kuma/page.mdx @@ -0,0 +1,130 @@ +--- +title: "Deploy Uptime Kuma Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Uptime Kuma with Docker Compose. " +--- + +# Deploy Uptime Kuma + +A fancy self-hosted monitoring tool. + +
+ ⭐ 55.0k stars + 📜 MIT + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Uptime Kuma instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Uptime Kuma and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +# Docker Compose for Uptime Kuma +version: '3.8' + +services: + uptime-kuma: + image: louislam/uptime-kuma:1 # Official image is standard + container_name: uptime-kuma + ports: + - "3001:3001" + volumes: + - uptime_kuma_data:/app/data + networks: + - uptime_net + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:3001/" ] + interval: 30s + timeout: 10s + retries: 3 + restart: unless-stopped + +networks: + uptime_net: + driver: bridge + +volumes: + uptime_kuma_data: + name: uptime_kuma_data +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/uptime-kuma && cd /opt/uptime-kuma + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs uptime-kuma | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Uptime Kuma on AltStack Directory](https://thealtstack.com/alternative-to/uptime-kuma) +- [Uptime Kuma Self-Hosted Guide](https://thealtstack.com/self-hosted/uptime-kuma) +- [Official Documentation](https://uptime.kuma.pet) +- [GitHub Repository](https://github.com/louislam/uptime-kuma) diff --git a/docs/app/deploy/vaultwarden/page.mdx b/docs/app/deploy/vaultwarden/page.mdx new file mode 100644 index 0000000..8dce328 --- /dev/null +++ b/docs/app/deploy/vaultwarden/page.mdx @@ -0,0 +1,126 @@ +--- +title: "Deploy Vaultwarden Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Vaultwarden with Docker Compose. " +--- + +# Deploy Vaultwarden + +Unofficial Bitwarden compatible server written in Rust, formerly known as bitwarden_rs. + +
+ ⭐ 32.0k stars + 📜 AGPL-3.0 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Vaultwarden instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Vaultwarden and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + vaultwarden: + image: vaultwarden/server:latest + container_name: vaultwarden + restart: unless-stopped + ports: + - "8080:80" + volumes: + - vw-data:/data + environment: + - WEBSOCKET_ENABLED=true + - SIGNUPS_ALLOWED=true + +volumes: + vw-data: +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/vaultwarden && cd /opt/vaultwarden + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `WEBSOCKET_ENABLED` | `true` | No | +| `SIGNUPS_ALLOWED` | `true` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs vaultwarden | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Vaultwarden on AltStack Directory](https://thealtstack.com/alternative-to/vaultwarden) +- [Vaultwarden Self-Hosted Guide](https://thealtstack.com/self-hosted/vaultwarden) +- [Official Documentation](https://github.com/dani-garcia/vaultwarden) +- [GitHub Repository](https://github.com/dani-garcia/vaultwarden) diff --git a/docs/app/deploy/zammad/page.mdx b/docs/app/deploy/zammad/page.mdx new file mode 100644 index 0000000..a09735b --- /dev/null +++ b/docs/app/deploy/zammad/page.mdx @@ -0,0 +1,142 @@ +--- +title: "Deploy Zammad Self-Hosted (Docker)" +description: "Step-by-step guide to self-hosting Zammad with Docker Compose. " +--- + +# Deploy Zammad + +A web-based, open source helpdesk/customer support system with many features. + +
+ ⭐ 5.0k stars + 📜 AGPLv3 + 🔴 Advanced + ⏱ ~20 minutes + +
+ +
+ + 🚀 Deploy on DigitalOcean ($200 Free Credit) + +
+ + +## What You'll Get + +A fully working Zammad instance running on your server. Your data stays on your hardware — no third-party access, no usage limits, no surprise invoices. + +## Prerequisites + +- A server with Docker and Docker Compose installed ([setup guide](/quick-start/choosing-a-server)) +- A domain name pointed to your server (optional but recommended) +- Basic terminal access (SSH) + +## The Config + +Create a directory for Zammad and add this `docker-compose.yml`: + +```yaml +# ------------------------------------------------------------------------- +# 🚀 Created and distributed by The AltStack +# 🌍 https://thealtstack.com +# ------------------------------------------------------------------------- + +version: '3.8' + +services: + zammad: + image: zammad/zammad-docker-compose:zammad-6.3.1-23 + container_name: zammad + restart: unless-stopped + depends_on: + - zammad-postgresql + - zammad-elasticsearch + - zammad-redis + ports: + - "8080:8080" + + zammad-elasticsearch: + image: bitnami/elasticsearch:8.12.2 + container_name: zammad-elasticsearch + restart: unless-stopped + environment: + - discovery.type=single-node + + zammad-postgresql: + image: postgres:15-alpine + container_name: zammad-postgresql + restart: unless-stopped + environment: + - POSTGRES_USER=zammad + - POSTGRES_PASSWORD=zammad + + zammad-redis: + image: redis:7.2-alpine + container_name: zammad-redis + restart: unless-stopped +``` + +## Let's Ship It + +```bash +# Create a directory +mkdir -p /opt/zammad && cd /opt/zammad + +# Create the docker-compose.yml (paste the config above) +nano docker-compose.yml + +# Pull images and start +docker compose up -d + +# Watch the logs +docker compose logs -f +``` + +## Environment Variables + +| Variable | Default | Required | +|---|---|---| +| `POSTGRES_USER` | `zammad` | No | +| `POSTGRES_PASSWORD` | `zammad` | No | + + +## Post-Deployment Checklist + +- [ ] Service is accessible on the configured port +- [ ] Admin account created (if applicable) +- [ ] Reverse proxy configured ([Caddy guide](/concepts/reverse-proxies)) +- [ ] SSL/HTTPS working +- [ ] Backup script set up ([backup guide](/concepts/backups)) +- [ ] Uptime monitor added ([Uptime Kuma](/deploy/uptime-kuma)) + +## The "I Broke It" Section + +**Container won't start?** +```bash +docker compose logs zammad | tail -50 +``` + +**Port already in use?** +```bash +# Find what's using the port +lsof -i :PORT_NUMBER +``` + +**Need to start fresh?** +```bash +docker compose down -v # ⚠️ This deletes volumes/data! +docker compose up -d +``` + +## Going Further + +- [Zammad on AltStack Directory](https://thealtstack.com/alternative-to/zammad) +- [Zammad Self-Hosted Guide](https://thealtstack.com/self-hosted/zammad) +- [Official Documentation](https://zammad.org) +- [GitHub Repository](https://github.com/zammad/zammad) diff --git a/docs/app/globals.css b/docs/app/globals.css new file mode 100644 index 0000000..15458fe --- /dev/null +++ b/docs/app/globals.css @@ -0,0 +1,378 @@ +@import "tailwindcss"; + +/* ============================================ + AltStack Docs — Custom Theme Overrides + ============================================ */ + +/* ---- Font Stack ---- */ +:root { + --font-sans: var(--font-outfit), 'Outfit', system-ui, -apple-system, sans-serif; + --font-mono: var(--font-mono), 'JetBrains Mono', 'Fira Code', monospace; +} + +/* ---- AltStack Brand Colors ---- */ +:root { + --altstack-red: #ef4444; + --altstack-orange: #f97316; + --altstack-bg: #050505; + --altstack-surface: #0a0a0a; + --altstack-surface-elevated: #111111; + --altstack-border: rgba(255, 255, 255, 0.08); + --altstack-glass: rgba(10, 10, 10, 0.7); + --altstack-text-dim: rgba(255, 255, 255, 0.5); + --hero-from: #ffffff; + --hero-to: #94a3b8; +} + +/* ---- Dark mode as default feel ---- */ +html { + color-scheme: dark; +} + +:root { + --nextra-primary-hue: 10deg; +} + +/* Light mode overrides for high contrast */ +html[class~="light"] { + --nextra-bg: #ffffff; + --altstack-bg: #ffffff; + --altstack-surface: #f8fafc; + --altstack-surface-elevated: #f1f5f9; + --altstack-border: rgba(0, 0, 0, 0.08); + --altstack-text-dim: #64748b; + --altstack-glass: rgba(255, 255, 255, 0.8); + --hero-from: #0f172a; + --hero-to: #334155; +} + +html[class~="dark"] { + --nextra-bg: var(--altstack-bg); +} + +/* ---- Logo Styling & Animations ---- */ +@keyframes float-red { + + 0%, + 100% { + transform: translateY(0); + } + + 50% { + transform: translateY(-8px); + } +} + +@keyframes float-glass { + + 0%, + 100% { + transform: translateY(0); + } + + 50% { + transform: translateY(-5px); + } +} + +@keyframes float-slate { + + 0%, + 100% { + transform: translateY(0); + } + + 50% { + transform: translateY(-2px); + } +} + +.animate-float-red { + animation: float-red 3s ease-in-out infinite; +} + +.animate-float-glass { + animation: float-glass 3.5s ease-in-out infinite; +} + +.animate-float-slate { + animation: float-slate 4s ease-in-out infinite; +} + +.altstack-logo { + display: flex; + align-items: center; + gap: 0.75rem; + font-weight: 900; + font-size: 1.3rem; + letter-spacing: -0.03em; + color: white; +} + +html[class~="light"] .altstack-logo { + color: #0f172a; +} + +/* ---- Navbar & Sidebar Glassmorphism ---- */ +.nextra-nav-container { + background-color: var(--altstack-glass) !important; + backdrop-filter: blur(12px) !important; + -webkit-backdrop-filter: blur(12px) !important; + border-bottom: 1px solid var(--altstack-border); +} + +.nextra-sidebar-container { + background-color: transparent !important; +} + +/* ---- Home Page Card Overrides (Legacy for now) ---- */ +.nextra-cards { + margin-top: 2rem !important; +} + +/* ---- Custom Grid Classes ---- */ +.premium-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(280px, 1fr)); + gap: 1.5rem; + margin-top: 2.5rem; +} + +.premium-card { + position: relative; + padding: 1.75rem; + background: var(--altstack-surface); + border: 1px solid var(--altstack-border); + border-radius: 20px; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + text-decoration: none !important; + overflow: hidden; +} + +.premium-card:hover { + background: var(--altstack-surface-elevated); + border-color: rgba(259, 68, 68, 0.3); + transform: translateY(-4px); + box-shadow: 0 12px 24px -12px rgba(0, 0, 0, 0.5); +} + +.premium-card::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + height: 2px; + background: linear-gradient(90deg, var(--altstack-red), var(--altstack-orange)); + opacity: 0; + transition: opacity 0.3s ease; +} + +.premium-card:hover::before { + opacity: 1; +} + +.premium-card-title { + display: flex; + align-items: center; + gap: 0.75rem; + font-size: 1.15rem; + font-weight: 700; + color: white; + margin-bottom: 0.75rem; +} + +html[class~="light"] .premium-card-title { + color: #0f172a; +} + +.premium-card-description { + font-size: 0.95rem; + line-height: 1.6; + color: var(--altstack-text-dim); +} + +/* ---- Footer ---- */ +.altstack-footer { + display: flex; + justify-content: center; + align-items: center; + padding: 3rem 0 2rem; + font-size: 0.85rem; + color: var(--altstack-text-dim); + border-top: 1px solid var(--altstack-border); + margin-top: 4rem; +} + +.altstack-footer a { + color: var(--altstack-red); + text-decoration: none; + font-weight: 600; +} + +.altstack-footer a:hover { + text-decoration: underline; +} + +.footer-header { + color: #0f172a; + /* slate-900 */ +} + +html[class~="dark"] .footer-header { + color: #ffffff !important; +} + +html[class~="light"] .footer-header { + color: #0f172a !important; +} + +/* ---- Difficulty badges ---- */ +.badge-beginner { + display: inline-flex; + align-items: center; + gap: 0.3rem; + padding: 0.15rem 0.6rem; + font-size: 0.75rem; + font-weight: 700; + color: #22c55e; + background: rgba(34, 197, 94, 0.1); + border: 1px solid rgba(34, 197, 94, 0.2); + border-radius: 9999px; +} + +.badge-intermediate { + display: inline-flex; + align-items: center; + gap: 0.3rem; + padding: 0.15rem 0.6rem; + font-size: 0.75rem; + font-weight: 700; + color: #eab308; + background: rgba(234, 179, 8, 0.1); + border: 1px solid rgba(234, 179, 8, 0.2); + border-radius: 9999px; +} + +.badge-advanced { + display: inline-flex; + align-items: center; + gap: 0.3rem; + padding: 0.15rem 0.6rem; + font-size: 0.75rem; + font-weight: 700; + color: #ef4444; + background: rgba(239, 68, 68, 0.1); + border: 1px solid rgba(239, 68, 68, 0.2); + border-radius: 9999px; +} + +/* ---- Hero info bar for deploy guides ---- */ +.deploy-hero { + display: flex; + flex-wrap: wrap; + gap: 0.75rem; + margin: 1rem 0 1.5rem; + padding: 1rem 1.25rem; + background: var(--altstack-surface); + border: 1px solid var(--altstack-border); + border-radius: 16px; + font-size: 0.85rem; + color: rgba(255, 255, 255, 0.6); +} + +.deploy-hero-item { + display: flex; + align-items: center; + gap: 0.35rem; +} + +/* ---- Manual Logo Fix (Robust Override) ---- */ +html[class~="dark"] .manual-logo-text { + color: #ffffff !important; +} + +html[class~="light"] .manual-logo-text { + color: #0f172a !important; + /* slate-900 */ +} + +/* Fill overrides */ +html[class~="dark"] .manual-logo-fill { + fill: rgba(255, 255, 255, 0.1) !important; +} + +html[class~="light"] .manual-logo-fill { + fill: rgba(15, 23, 42, 0.1) !important; +} + +html[class~="dark"] .manual-logo-fill-secondary { + fill: rgba(255, 255, 255, 0.2) !important; +} + +html[class~="light"] .manual-logo-fill-secondary { + fill: rgba(15, 23, 42, 0.2) !important; +} + +/* Stroke overrides */ +html[class~="dark"] .manual-logo-stroke { + stroke: rgba(255, 255, 255, 0.2) !important; +} + +html[class~="light"] .manual-logo-stroke { + stroke: rgba(15, 23, 42, 0.2) !important; +} + +html[class~="dark"] .manual-logo-stroke-secondary { + stroke: rgba(255, 255, 255, 0.3) !important; +} + +html[class~="light"] .manual-logo-stroke-secondary { + stroke: rgba(15, 23, 42, 0.3) !important; +} + +html[class~="dark"] .manual-logo-stroke-tertiary { + stroke: rgba(255, 255, 255, 0.4) !important; +} + +html[class~="light"] .manual-logo-stroke-tertiary { + stroke: rgba(15, 23, 42, 0.4) !important; +} + +/* ============================================ + Mobile UI Fixes + ============================================ */ + +/* Fix mobile menu z-index issues to ensure it sits above content */ +.nextra-nav-container, +.nextra-navbar { + z-index: 60 !important; +} + +/* Ensure search and other elements don't overlap the menu */ +.nextra-search-container { + z-index: 40; +} + +/* Adjust mobile menu spacing to prevent overlap with navbar */ +@media (max-width: 768px) { + .nextra-menu-mobile { + padding-top: 4rem; + z-index: 45; + } + + /* Ensure the mobile menu content is scrollable and visible */ + .nextra-menu-mobile .nextra-scrollbar { + padding-bottom: 5rem; + } +} + +/* Force solid background on mobile menu */ +@media (max-width: 768px) { + + .nextra-menu-mobile, + .nextra-mobile-nav { + background-color: var(--altstack-bg) !important; + z-index: 50 !important; + } +} \ No newline at end of file diff --git a/docs/app/icon.tsx b/docs/app/icon.tsx new file mode 100644 index 0000000..efedb85 --- /dev/null +++ b/docs/app/icon.tsx @@ -0,0 +1,60 @@ +import { ImageResponse } from 'next/og'; + + + +export const size = { + width: 32, + height: 32, +}; +export const contentType = 'image/png'; + +export default function Icon() { + return new ImageResponse( + ( +
+ + + + + +
+ ), + { + ...size, + } + ); +} diff --git a/docs/app/layout.tsx b/docs/app/layout.tsx new file mode 100644 index 0000000..036b19c --- /dev/null +++ b/docs/app/layout.tsx @@ -0,0 +1,292 @@ +import { Footer, Layout, Navbar } from 'nextra-theme-docs' +import Link from 'next/link' +import Script from 'next/script' +import { Head } from 'nextra/components' +import { getPageMap } from 'nextra/page-map' +import { Outfit, JetBrains_Mono } from 'next/font/google' +import type { Metadata, Viewport } from 'next' +import type { ReactNode } from 'react' +import 'nextra-theme-docs/style.css' +import './globals.css' + +const outfit = Outfit({ + subsets: ['latin'], + variable: '--font-outfit', +}) + +const jetbrainsMono = JetBrains_Mono({ + subsets: ['latin'], + variable: '--font-mono', +}) + +export const metadata: Metadata = { + metadataBase: new URL('https://docs.thealtstack.com'), + title: { + default: 'AltStack Docs — Self-Hosting Guides & Deploy Recipes', + template: '%s — AltStack Docs', + }, + description: + 'Step-by-step guides to self-host open source software. Docker Compose configs, deployment recipes, and stack-building guides for developers and teams.', + openGraph: { + title: 'AltStack Docs', + description: + 'Self-hosting guides, deploy configs, and stack-building recipes for open source software.', + url: 'https://docs.thealtstack.com', + siteName: 'AltStack Docs', + locale: 'en_US', + type: 'website', + }, + twitter: { + card: 'summary_large_image', + title: 'AltStack Docs', + description: + 'Self-hosting guides, deploy configs, and stack recipes.', + }, +} + +export const viewport: Viewport = { + themeColor: '#050505', +} + +import AIChatLinks from '../components/AIChatLinks' + +function Logo() { + return ( +
+
+ + {/* Bottom Layer (Slate) */} + + + + + + {/* Middle Layer (Glass) */} + + + + + + {/* Top Layer (Red) */} + + + + + + + + + + + + + + + +
+ + AltStackdocs + +
+ ) +} + +const navbar = ( + } + > + + +) + +const footer = ( +
+
+
+
+
+
+ +
+

+ Step-by-step guides to self-host open source software with real configs and zero filler. +

+ +
+ +
+

Documentation

+
    +
  • Quick Start
  • +
  • Deploy Guides
  • +
  • Curated Stacks
  • +
+
+ +
+

About

+ +
+ +
+

Support

+ +
+
+ +
+

+ © {new Date().getFullYear()} The AltStack. Empowering through Open Source. +

+
+
+ + Systems Operational +
+
+
+
+
+
+) + +export default async function RootLayout({ + children, +}: { + children: ReactNode +}) { + return ( + + + + {/* Google Analytics */} + +