mirror of
https://github.com/altstackHQ/altstack-data.git
synced 2026-04-17 21:53:12 +02:00
18302 lines
468 KiB
JSON
18302 lines
468 KiB
JSON
[
|
|
{
|
|
"slug": "firebase",
|
|
"name": "Firebase",
|
|
"category": "Backend as a Service",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid/Freemium",
|
|
"website": "https://firebase.google.com",
|
|
"description": "Google's app development platform.",
|
|
"alternatives": [
|
|
"supabase",
|
|
"appwrite",
|
|
"pocketbase"
|
|
],
|
|
"tags": [
|
|
"Cloud",
|
|
"Database",
|
|
"Auth"
|
|
],
|
|
"logo_url": "/logos/firebase.svg",
|
|
"avg_monthly_cost": 25,
|
|
"pros": [
|
|
"Seamless Google ecosystem integration",
|
|
"Generous free tier (Spark plan)",
|
|
"Real-time database out of the box",
|
|
"Excellent mobile SDK support",
|
|
"Cloud Functions for serverless logic"
|
|
],
|
|
"cons": [
|
|
"Vendor lock-in to Google",
|
|
"Pricing can spike unpredictably at scale",
|
|
"Limited query capabilities vs SQL"
|
|
]
|
|
},
|
|
{
|
|
"slug": "supabase",
|
|
"name": "Supabase",
|
|
"category": "Backend as a Service",
|
|
"is_open_source": true,
|
|
"github_repo": "supabase/supabase",
|
|
"stars": 97401,
|
|
"website": "https://supabase.com",
|
|
"description": "The Postgres development platform. Supabase gives you a dedicated Postgres database to build your web, mobile, and AI applications.",
|
|
"pros": [
|
|
"Postgres under the hood",
|
|
"No vendor lock-in"
|
|
],
|
|
"cons": [
|
|
"Self-hosting can be complex"
|
|
],
|
|
"last_commit": "2026-02-09T16:09:10Z",
|
|
"language": "TypeScript",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"Database",
|
|
"Realtime",
|
|
"Postgres",
|
|
"AI"
|
|
],
|
|
"logo_url": "/logos/supabase.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/supabase"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "appwrite",
|
|
"name": "Appwrite",
|
|
"category": "Backend as a Service",
|
|
"is_open_source": true,
|
|
"github_repo": "appwrite/appwrite",
|
|
"stars": 54727,
|
|
"website": "https://appwrite.io",
|
|
"description": "Appwrite\u00ae - complete cloud infrastructure for your web, mobile and AI apps. Including Auth, Databases, Storage, Functions, Messaging, Hosting, Realtime and more",
|
|
"pros": [
|
|
"Self-hosted with a single Docker command",
|
|
"Modular architecture \u2014 use only what you need"
|
|
],
|
|
"cons": [
|
|
"Smaller ecosystem than Firebase or Supabase",
|
|
"Limited built-in analytics and reporting"
|
|
],
|
|
"last_commit": "2026-02-09T16:12:32Z",
|
|
"language": "TypeScript",
|
|
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
|
|
"tags": [
|
|
"Database",
|
|
"Auth",
|
|
"Self-Hosted"
|
|
],
|
|
"logo_url": "/logos/appwrite.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/appwrite"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "pocketbase",
|
|
"name": "PocketBase",
|
|
"category": "Backend as a Service",
|
|
"is_open_source": true,
|
|
"github_repo": "pocketbase/pocketbase",
|
|
"website": "https://pocketbase.io",
|
|
"description": "Open Source realtime backend in 1 file",
|
|
"pros": [
|
|
"Ships as a single binary \u2014 no dependencies",
|
|
"Deploy anywhere in seconds with zero config",
|
|
"Embedded SQLite with realtime subscriptions"
|
|
],
|
|
"cons": [
|
|
"SQLite only (for now)"
|
|
],
|
|
"stars": 55980,
|
|
"last_commit": "2026-02-01T08:09:48Z",
|
|
"language": "Go",
|
|
"license": "MIT License",
|
|
"logo_url": "/logos/pocketbase.svg",
|
|
"deployment": {
|
|
"image": "pocketbase/pocketbase:latest",
|
|
"port": 8090,
|
|
"volumes": [
|
|
"./pb_data:/pb/pb_data"
|
|
],
|
|
"command": "serve --http=0.0.0.0:8090",
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/pocketbase"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "salesforce",
|
|
"name": "Salesforce",
|
|
"category": "CRM",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid",
|
|
"avg_monthly_cost": 25,
|
|
"website": "https://salesforce.com",
|
|
"description": "The world's #1 CRM.",
|
|
"alternatives": [
|
|
"odoo",
|
|
"erpnext"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=salesforce.com",
|
|
"pros": [
|
|
"Industry-leading CRM platform",
|
|
"Massive app marketplace (AppExchange)",
|
|
"Highly customizable workflows",
|
|
"Enterprise-grade security and compliance"
|
|
],
|
|
"cons": [
|
|
"Expensive per-seat licensing",
|
|
"Steep learning curve",
|
|
"Heavy and complex for small teams"
|
|
]
|
|
},
|
|
{
|
|
"slug": "slack",
|
|
"name": "Slack",
|
|
"category": "Communication",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid/Freemium",
|
|
"website": "https://slack.com",
|
|
"description": "Team communication platform.",
|
|
"alternatives": [
|
|
"mattermost",
|
|
"rocketchat"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=slack.com",
|
|
"avg_monthly_cost": 12,
|
|
"pros": [
|
|
"Best-in-class team communication UX",
|
|
"Huge integration ecosystem (2,000+ apps)",
|
|
"Powerful search across conversations",
|
|
"Thread-based discussions reduce noise"
|
|
],
|
|
"cons": [
|
|
"Expensive at scale ($8.75+/user/mo)",
|
|
"Can become a constant distraction",
|
|
"Message history limits on free plan"
|
|
]
|
|
},
|
|
{
|
|
"slug": "mattermost",
|
|
"name": "Mattermost",
|
|
"category": "Communication",
|
|
"is_open_source": true,
|
|
"github_repo": "mattermost/mattermost",
|
|
"website": "https://mattermost.com",
|
|
"description": "Mattermost is an open source platform for secure collaboration across the entire software development lifecycle..",
|
|
"pros": [
|
|
"Enterprise-grade security with SOC2 and HIPAA compliance",
|
|
"Granular access control and audit logging",
|
|
"Slack-compatible webhook and bot ecosystem"
|
|
],
|
|
"cons": [
|
|
"Self-hosting maintenance"
|
|
],
|
|
"stars": 35213,
|
|
"last_commit": "2026-02-09T16:03:54Z",
|
|
"language": "TypeScript",
|
|
"license": "Other",
|
|
"logo_url": "/logos/mattermost.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/mattermost"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "rocketchat",
|
|
"name": "Rocket.Chat",
|
|
"category": "Communication",
|
|
"is_open_source": true,
|
|
"github_repo": "RocketChat/Rocket.Chat",
|
|
"website": "https://rocket.chat",
|
|
"description": "The Secure CommsOS\u2122 for mission-critical operations",
|
|
"pros": [
|
|
"Unified inbox with omnichannel support for live chat, email, and social",
|
|
"Highly customizable with white-labeling options",
|
|
"End-to-end encrypted messaging available"
|
|
],
|
|
"cons": [
|
|
"Resource intensive"
|
|
],
|
|
"stars": 44546,
|
|
"last_commit": "2026-02-09T16:20:40Z",
|
|
"language": "TypeScript",
|
|
"license": "Other",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=rocket.chat",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/rocketchat"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "jira",
|
|
"name": "Jira",
|
|
"category": "Project Management",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid",
|
|
"avg_monthly_cost": 15,
|
|
"website": "https://www.atlassian.com/software/jira",
|
|
"description": "Issue tracking and project management tool.",
|
|
"alternatives": [
|
|
"plane",
|
|
"taiga"
|
|
],
|
|
"logo_url": "/logos/jira.svg",
|
|
"pros": [
|
|
"Industry standard for project management",
|
|
"Deep Agile/Scrum/Kanban support",
|
|
"Powerful custom workflows and automation",
|
|
"Extensive integration ecosystem"
|
|
],
|
|
"cons": [
|
|
"Notoriously complex UI",
|
|
"Slow performance with large projects",
|
|
"Expensive for growing teams"
|
|
]
|
|
},
|
|
{
|
|
"slug": "plane",
|
|
"name": "Plane",
|
|
"category": "Project Management",
|
|
"is_open_source": true,
|
|
"github_repo": "makeplane/plane",
|
|
"website": "https://plane.so",
|
|
"description": "\ud83d\udd25\ud83d\udd25\ud83d\udd25 Open-source Jira, Linear, Monday, and ClickUp alternative. Plane is a modern project management platform to manage tasks, sprints, docs, and triage.",
|
|
"pros": [
|
|
"Clean, modern interface inspired by Linear",
|
|
"Blazing fast \u2014 sub-100ms interactions",
|
|
"Built-in cycles, modules, and views"
|
|
],
|
|
"cons": [
|
|
"Still relatively new"
|
|
],
|
|
"stars": 45490,
|
|
"last_commit": "2026-02-09T13:56:47Z",
|
|
"language": "TypeScript",
|
|
"license": "GNU Affero General Public License v3.0",
|
|
"logo_url": "/logos/plane.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/plane"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "taiga",
|
|
"name": "Taiga",
|
|
"category": "Project Management",
|
|
"is_open_source": true,
|
|
"github_repo": "taigaio/taiga-back",
|
|
"website": "https://taiga.io",
|
|
"description": null,
|
|
"pros": [
|
|
"Beautiful, kanban and scrum boards with drag-and-drop",
|
|
"Full Agile toolkit: epics, sprints, user stories",
|
|
"Built-in wiki and project documentation"
|
|
],
|
|
"cons": [
|
|
"Complex setup"
|
|
],
|
|
"stars": 807,
|
|
"last_commit": "2026-01-09T07:28:59Z",
|
|
"language": "Python",
|
|
"license": "Mozilla Public License 2.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=taiga.io",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/taiga"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "zoom",
|
|
"name": "Zoom",
|
|
"category": "Communication",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid/Freemium",
|
|
"avg_monthly_cost": 15,
|
|
"website": "https://zoom.us",
|
|
"description": "Video conferencing platform, cloud phone, webinars, and chat.",
|
|
"alternatives": [
|
|
"jitsi-meet"
|
|
],
|
|
"logo_url": "/logos/zoom.svg",
|
|
"pros": [
|
|
"Reliable video quality even on poor connections",
|
|
"Easy to join without creating an account",
|
|
"Breakout rooms and webinar support for large events",
|
|
"Cross-platform with desktop, mobile, and web apps"
|
|
],
|
|
"cons": [
|
|
"Free plan limited to 40-minute meetings",
|
|
"Privacy concerns and past security issues",
|
|
"Zoom fatigue is real"
|
|
]
|
|
},
|
|
{
|
|
"slug": "jitsi-meet",
|
|
"name": "Jitsi Meet",
|
|
"category": "Communication",
|
|
"is_open_source": true,
|
|
"github_repo": "jitsi/jitsi-meet",
|
|
"website": "https://jitsi.org",
|
|
"description": "Jitsi Meet - Secure, Simple and Scalable Video Conferences that you use as a standalone app or embed in your web application.",
|
|
"pros": [
|
|
"Join calls without creating an account",
|
|
"End-to-end encrypted video conferencing",
|
|
"Scales to hundreds of participants with Jitsi Videobridge"
|
|
],
|
|
"cons": [
|
|
"Performance on large calls"
|
|
],
|
|
"stars": 28562,
|
|
"last_commit": "2026-02-09T12:49:10Z",
|
|
"language": "TypeScript",
|
|
"license": "Apache License 2.0",
|
|
"logo_url": "/logos/jitsi-meet.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/jitsi-meet"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "photoshop",
|
|
"name": "Adobe Photoshop",
|
|
"category": "Design",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Monthly)",
|
|
"avg_monthly_cost": 60,
|
|
"website": "https://www.adobe.com/products/photoshop.html",
|
|
"description": "Industry standard image editing software.",
|
|
"alternatives": [
|
|
"gimp",
|
|
"krita"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.adobe.com",
|
|
"pros": [
|
|
"Industry gold standard for image editing",
|
|
"Unmatched feature depth and precision",
|
|
"Huge plugin and template ecosystem",
|
|
"AI-powered generative fill and selection"
|
|
],
|
|
"cons": [
|
|
"Subscription-only pricing ($22.99/mo)",
|
|
"Steep learning curve for beginners",
|
|
"Resource-heavy \u2014 needs powerful hardware"
|
|
]
|
|
},
|
|
{
|
|
"slug": "gimp",
|
|
"name": "GIMP",
|
|
"category": "Design",
|
|
"is_open_source": true,
|
|
"github_repo": "GNOME/gimp",
|
|
"website": "https://www.gimp.org",
|
|
"description": "Read-only mirror of https://gitlab.gnome.org/GNOME/gimp",
|
|
"pros": [
|
|
"Professional-grade photo editing tools rivaling Photoshop",
|
|
"Extensible with Python and Script-Fu plugins",
|
|
"Cross-platform with native support for PSD, TIFF, and RAW"
|
|
],
|
|
"cons": [
|
|
"Steep learning curve",
|
|
"Dated UI"
|
|
],
|
|
"stars": 5960,
|
|
"last_commit": "2026-02-09T16:20:25Z",
|
|
"language": "C",
|
|
"license": "Other",
|
|
"logo_url": "/logos/gimp.svg",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "krita",
|
|
"name": "Krita",
|
|
"category": "Design",
|
|
"is_open_source": true,
|
|
"github_repo": "KDE/krita",
|
|
"website": "https://krita.org",
|
|
"description": "Krita is a free and open source cross-platform application that offers an end-to-end solution for creating digital art files from scratch built on the KDE and Qt frameworks.",
|
|
"pros": [
|
|
"Modern brush engine with 100+ built-in presets",
|
|
"HDR painting and animation timeline support",
|
|
"Optimized for drawing tablets with pressure sensitivity"
|
|
],
|
|
"cons": [
|
|
"Less focused on photo manipulation"
|
|
],
|
|
"stars": 9333,
|
|
"last_commit": "2026-02-09T13:47:56Z",
|
|
"language": "C++",
|
|
"license": "GNU General Public License v3.0",
|
|
"logo_url": "/logos/krita.svg",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "figma",
|
|
"name": "Figma",
|
|
"category": "Design",
|
|
"is_open_source": false,
|
|
"pricing_model": "Freemium/Paid",
|
|
"website": "https://www.figma.com",
|
|
"description": "Collaborative interface design tool.",
|
|
"alternatives": [
|
|
"penpot"
|
|
],
|
|
"logo_url": "/logos/figma.svg",
|
|
"avg_monthly_cost": 15,
|
|
"pros": [
|
|
"Real-time multiplayer collaboration",
|
|
"Runs entirely in the browser",
|
|
"Excellent component and design system support",
|
|
"Free tier is generous for individuals"
|
|
],
|
|
"cons": [
|
|
"Owned by Adobe (future pricing concerns)",
|
|
"Offline support is limited",
|
|
"Performance with very large files can lag"
|
|
]
|
|
},
|
|
{
|
|
"slug": "penpot",
|
|
"name": "Penpot",
|
|
"category": "Design",
|
|
"is_open_source": true,
|
|
"github_repo": "penpot/penpot",
|
|
"website": "https://penpot.app",
|
|
"description": "Penpot: The open-source design tool for design and code collaboration",
|
|
"pros": [
|
|
"Runs entirely in the browser \u2014 no desktop app needed",
|
|
"SVG-native design \u2014 exports are pixel-perfect at any scale",
|
|
"Real-time multiplayer collaboration"
|
|
],
|
|
"cons": [
|
|
"Newer ecosystem"
|
|
],
|
|
"stars": 44155,
|
|
"last_commit": "2026-02-09T15:47:35Z",
|
|
"language": "Clojure",
|
|
"license": "Mozilla Public License 2.0",
|
|
"logo_url": "/logos/penpot.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/penpot"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "notion",
|
|
"name": "Notion",
|
|
"category": "Productivity",
|
|
"is_open_source": false,
|
|
"pricing_model": "Freemium/Paid",
|
|
"website": "https://www.notion.so",
|
|
"description": "All-in-one workspace.",
|
|
"alternatives": [
|
|
"appflowy",
|
|
"affine"
|
|
],
|
|
"logo_url": "/logos/notion.svg",
|
|
"avg_monthly_cost": 10,
|
|
"pros": [
|
|
"All-in-one workspace (docs, wikis, databases)",
|
|
"Beautiful and intuitive interface",
|
|
"Powerful database views and relations",
|
|
"Great template gallery"
|
|
],
|
|
"cons": [
|
|
"Can be slow with large workspaces",
|
|
"Offline mode is unreliable",
|
|
"No true end-to-end encryption"
|
|
]
|
|
},
|
|
{
|
|
"slug": "appflowy",
|
|
"name": "AppFlowy",
|
|
"category": "Productivity",
|
|
"is_open_source": true,
|
|
"github_repo": "AppFlowy-IO/AppFlowy",
|
|
"website": "https://www.appflowy.io",
|
|
"description": "Bring projects, wikis, and teams together with AI. AppFlowy is the AI collaborative workspace where you achieve more without losing control of your data. The leading open source Notion alternative.",
|
|
"pros": [
|
|
"Local-first architecture \u2014 your data never leaves your machine",
|
|
"Privacy-focused alternative to Notion",
|
|
"Built in Rust for native desktop performance"
|
|
],
|
|
"cons": [
|
|
"No web version (yet)"
|
|
],
|
|
"stars": 68006,
|
|
"last_commit": "2026-01-28T09:20:38Z",
|
|
"language": "Dart",
|
|
"license": "GNU Affero General Public License v3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=www.appflowy.io",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/appflowy"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "affine",
|
|
"name": "AFFiNE",
|
|
"category": "Productivity",
|
|
"is_open_source": true,
|
|
"github_repo": "toeverything/AFFiNE",
|
|
"website": "https://affine.pro",
|
|
"description": "There can be more than Notion and Miro. AFFiNE(pronounced [\u0259\u2018fain]) is a next-gen knowledge base that brings planning, sorting and creating all together. Privacy first, open-source, customizable and ready to use. ",
|
|
"pros": [
|
|
"Modern block editor with Notion-like feel",
|
|
"Spatial canvas for whiteboarding and visual thinking",
|
|
"Hybrid local-first and cloud sync architecture"
|
|
],
|
|
"cons": [
|
|
"Still in beta"
|
|
],
|
|
"stars": 62693,
|
|
"last_commit": "2026-02-09T11:16:50Z",
|
|
"language": "TypeScript",
|
|
"license": "Other",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=affine.pro",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/affine"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "google-analytics",
|
|
"name": "Google Analytics",
|
|
"category": "Analytics",
|
|
"is_open_source": false,
|
|
"pricing_model": "Free/Paid",
|
|
"website": "https://analytics.google.com",
|
|
"description": "Web analytics service.",
|
|
"alternatives": [
|
|
"plausible",
|
|
"posthog",
|
|
"matomo"
|
|
],
|
|
"logo_url": "/logos/google-analytics.svg",
|
|
"avg_monthly_cost": 150,
|
|
"pros": [
|
|
"Industry-standard reporting with Google Ads and Search Console integration",
|
|
"Advanced audience segmentation and cohort analysis",
|
|
"Free tier handles up to 10M hits per month"
|
|
],
|
|
"cons": [
|
|
"Privacy concerns \u2014 data goes to Google",
|
|
"GA4 migration frustrated many users",
|
|
"Blocked by most ad blockers",
|
|
"Complex for beginners"
|
|
]
|
|
},
|
|
{
|
|
"slug": "plausible",
|
|
"name": "Plausible",
|
|
"category": "Analytics",
|
|
"is_open_source": true,
|
|
"github_repo": "plausible/analytics",
|
|
"website": "https://plausible.io",
|
|
"description": "Simple, open source, lightweight and privacy-friendly web analytics alternative to Google Analytics.",
|
|
"pros": [
|
|
"Fully GDPR compliant with no cookies required",
|
|
"Lightweight script under 1KB \u2014 zero impact on page speed",
|
|
"Clean dashboard that shows what matters, nothing more"
|
|
],
|
|
"cons": [
|
|
"Limited advanced features"
|
|
],
|
|
"stars": 24198,
|
|
"last_commit": "2026-02-09T16:20:52Z",
|
|
"language": "Elixir",
|
|
"license": "GNU Affero General Public License v3.0",
|
|
"tags": [
|
|
"Analytics",
|
|
"Privacy",
|
|
"GDPR"
|
|
],
|
|
"logo_url": "/logos/plausible.svg",
|
|
"deployment": {
|
|
"image": "plausible/analytics:latest",
|
|
"port": 8000,
|
|
"env": [
|
|
{
|
|
"key": "BASE_URL",
|
|
"value": "http://localhost:8000"
|
|
},
|
|
{
|
|
"key": "SECRET_KEY_BASE",
|
|
"value": "REPLACE_WITH_RANDOM_STRING"
|
|
}
|
|
],
|
|
"volumes": [
|
|
"./plausible_db:/var/lib/clickhouse",
|
|
"./plausible_events:/var/lib/postgresql/data"
|
|
],
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/plausible"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "posthog",
|
|
"name": "PostHog",
|
|
"category": "Analytics",
|
|
"is_open_source": true,
|
|
"github_repo": "PostHog/posthog",
|
|
"website": "https://posthog.com",
|
|
"description": "\ud83e\udd94 PostHog is an all-in-one developer platform for building successful products. We offer product analytics, web analytics, session replay, error tracking, feature flags, experimentation, surveys, data warehouse, a CDP, and an AI product assistant to help debug your code, ship features faster, and keep all your usage and customer data in one stack.",
|
|
"pros": [
|
|
"Session recording with heatmaps and click tracking",
|
|
"Built-in feature flags, A/B testing, and surveys",
|
|
"Warehouse-native \u2014 query your data with SQL"
|
|
],
|
|
"cons": [
|
|
"Complex to self-host"
|
|
],
|
|
"stars": 31181,
|
|
"last_commit": "2026-02-09T16:25:10Z",
|
|
"language": "Python",
|
|
"license": "Other",
|
|
"logo_url": "/logos/posthog.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/posthog"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "matomo",
|
|
"name": "Matomo",
|
|
"category": "Analytics",
|
|
"is_open_source": true,
|
|
"github_repo": "matomo-org/matomo",
|
|
"website": "https://matomo.org",
|
|
"description": "Empowering People Ethically \ud83d\ude80 \u2014 Matomo is hiring! Join us \u2192 https://matomo.org/jobs Matomo is the leading open-source alternative to Google Analytics, giving you complete control and built-in privacy. Easily collect, visualise, and analyse data from websites & apps. Star us on GitHub \u2b50\ufe0f \u2013 Pull Requests welcome! ",
|
|
"pros": [
|
|
"Feature-rich analytics rivaling Google Analytics",
|
|
"GDPR and CCPA compliant out of the box",
|
|
"Heatmaps, session recordings, and funnel analysis included"
|
|
],
|
|
"cons": [
|
|
"UI feels dated"
|
|
],
|
|
"stars": 21270,
|
|
"last_commit": "2026-02-09T15:36:30Z",
|
|
"language": "PHP",
|
|
"license": "GNU General Public License v3.0",
|
|
"logo_url": "/logos/matomo.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/matomo"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "1password",
|
|
"name": "1Password",
|
|
"category": "Security",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid",
|
|
"website": "https://1password.com",
|
|
"description": "Password manager.",
|
|
"alternatives": [
|
|
"bitwarden",
|
|
"keepassxc"
|
|
],
|
|
"logo_url": "/logos/1password.svg",
|
|
"avg_monthly_cost": 8,
|
|
"pros": [
|
|
"Excellent cross-platform support",
|
|
"Travel Mode hides sensitive vaults",
|
|
"Watchtower alerts for compromised passwords",
|
|
"Family and team sharing built in"
|
|
],
|
|
"cons": [
|
|
"No free tier ($2.99/mo minimum)",
|
|
"Cloud-only \u2014 no self-hosting option",
|
|
"Subscription model with no lifetime option"
|
|
]
|
|
},
|
|
{
|
|
"slug": "bitwarden",
|
|
"name": "Bitwarden",
|
|
"category": "Security",
|
|
"is_open_source": true,
|
|
"github_repo": "bitwarden/server",
|
|
"website": "https://bitwarden.com",
|
|
"description": "Bitwarden infrastructure/backend (API, database, Docker, etc).",
|
|
"pros": [
|
|
"Independently audited security with full transparency reports",
|
|
"Cross-platform apps for every OS, browser, and device",
|
|
"Organization vaults with fine-grained sharing controls"
|
|
],
|
|
"cons": [
|
|
"UI is functional but basic"
|
|
],
|
|
"stars": 18027,
|
|
"last_commit": "2026-02-09T15:52:04Z",
|
|
"language": "C#",
|
|
"license": "Other",
|
|
"logo_url": "/logos/bitwarden.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/bitwarden"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "keepassxc",
|
|
"name": "KeePassXC",
|
|
"category": "Security",
|
|
"is_open_source": true,
|
|
"github_repo": "keepassxreboot/keepassxc",
|
|
"website": "https://keepassxc.org",
|
|
"description": "KeePassXC is a cross-platform community-driven port of the Windows application \u201cKeePass Password Safe\u201d.",
|
|
"pros": [
|
|
"Fully offline \u2014 database stored locally with AES-256 encryption",
|
|
"No cloud dependency \u2014 you control the sync method",
|
|
"Browser integration via KeePassXC-Browser extension"
|
|
],
|
|
"cons": [
|
|
"No automatic sync (requires Dropbox/Syncthing)"
|
|
],
|
|
"stars": 25810,
|
|
"last_commit": "2026-01-18T15:46:48Z",
|
|
"language": "C++",
|
|
"license": "Other",
|
|
"logo_url": "/logos/keepassxc.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/keepassxc"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "heroku",
|
|
"name": "Heroku",
|
|
"category": "DevOps",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid",
|
|
"avg_monthly_cost": 8,
|
|
"website": "https://heroku.com",
|
|
"description": "Platform as a service.",
|
|
"alternatives": [
|
|
"coolify",
|
|
"dokku"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=heroku.com",
|
|
"pros": [
|
|
"Dead-simple deployment (git push)",
|
|
"Great for prototypes and MVPs",
|
|
"Managed Postgres included",
|
|
"Add-ons marketplace for common services"
|
|
],
|
|
"cons": [
|
|
"Eliminated free tier in 2022",
|
|
"Expensive at scale vs VPS",
|
|
"Limited container customization",
|
|
"Owned by Salesforce (less innovation)"
|
|
]
|
|
},
|
|
{
|
|
"slug": "coolify",
|
|
"name": "Coolify",
|
|
"category": "DevOps",
|
|
"is_open_source": true,
|
|
"github_repo": "coollabsio/coolify",
|
|
"website": "https://coolify.io",
|
|
"description": "An open-source, self-hostable PaaS alternative to Vercel, Heroku & Netlify that lets you easily deploy static sites, databases, full-stack applications and 280+ one-click services on your own servers.",
|
|
"pros": [
|
|
"Polished, beautiful dashboard that rivals Vercel and Netlify",
|
|
"Deploy anything \u2014 Docker, static sites, databases, services",
|
|
"Automatic SSL, backups, and monitoring included"
|
|
],
|
|
"cons": [
|
|
"One-man project (mostly)"
|
|
],
|
|
"stars": 50421,
|
|
"last_commit": "2026-02-09T16:01:12Z",
|
|
"language": "PHP",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"DevOps",
|
|
"PaaS",
|
|
"Self-Hosted"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coolify.io",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/coolify"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "sap",
|
|
"name": "SAP S/4HANA",
|
|
"category": "ERP",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Enterprise)",
|
|
"avg_monthly_cost": 100,
|
|
"website": "https://www.sap.com",
|
|
"description": "The world leader in enterprise resource planning software.",
|
|
"alternatives": [
|
|
"odoo",
|
|
"erpnext"
|
|
],
|
|
"logo_url": "/logos/sap.svg",
|
|
"pros": [
|
|
"Enterprise ERP market leader",
|
|
"Handles massive organizational complexity",
|
|
"Deep industry-specific solutions",
|
|
"Strong compliance and audit trails"
|
|
],
|
|
"cons": [
|
|
"Extremely expensive to implement",
|
|
"Implementation takes months to years",
|
|
"Requires specialized consultants",
|
|
"Overkill for SMBs"
|
|
]
|
|
},
|
|
{
|
|
"slug": "odoo",
|
|
"name": "Odoo",
|
|
"category": "ERP",
|
|
"is_open_source": true,
|
|
"github_repo": "odoo/odoo",
|
|
"stars": 48919,
|
|
"website": "https://www.odoo.com",
|
|
"description": "A suite of open source business apps: CRM, eCommerce, accounting, manufacturing, warehouse, and more.",
|
|
"pros": [
|
|
"All-in-one suite covering CRM, HR, inventory, and accounting",
|
|
"Modular app marketplace with 30,000+ extensions",
|
|
"Dual licensing \u2014 Community (free) and Enterprise"
|
|
],
|
|
"cons": [
|
|
"Can be complex to customize",
|
|
"Enterprise features are paid"
|
|
],
|
|
"last_commit": "2026-02-09T16:18:46Z",
|
|
"language": "Python",
|
|
"license": "LGPL-3.0",
|
|
"logo_url": "/logos/odoo.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/odoo"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "erpnext",
|
|
"name": "ERPNext",
|
|
"category": "ERP",
|
|
"is_open_source": true,
|
|
"github_repo": "frappe/erpnext",
|
|
"website": "https://erpnext.com",
|
|
"description": "A free and open-source integrated Enterprise Resource Planning (ERP) software.",
|
|
"pros": [
|
|
"Fully open source",
|
|
"No licensing fees"
|
|
],
|
|
"cons": [
|
|
"Steep learning curve"
|
|
],
|
|
"stars": 31635,
|
|
"last_commit": "2026-02-09T15:52:29Z",
|
|
"language": "Python",
|
|
"license": "GNU General Public License v3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=erpnext.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/erpnext"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "autocad",
|
|
"name": "AutoCAD",
|
|
"category": "CAD",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Subscription)",
|
|
"website": "https://www.autodesk.com/products/autocad",
|
|
"description": "Professional computer-aided design (CAD) and drafting software.",
|
|
"alternatives": [
|
|
"librecad",
|
|
"freecad"
|
|
],
|
|
"logo_url": "/logos/autocad.svg",
|
|
"avg_monthly_cost": 75,
|
|
"pros": [
|
|
"Industry standard for CAD/engineering",
|
|
"Precise 2D and 3D modeling",
|
|
"Extensive library of tools and templates",
|
|
"Strong file format compatibility"
|
|
],
|
|
"cons": [
|
|
"Expensive subscription ($1,975/yr)",
|
|
"Steep learning curve",
|
|
"Resource-intensive \u2014 needs workstation hardware"
|
|
]
|
|
},
|
|
{
|
|
"slug": "librecad",
|
|
"name": "LibreCAD",
|
|
"category": "CAD",
|
|
"is_open_source": true,
|
|
"github_repo": "LibreCAD/LibreCAD",
|
|
"stars": 6500,
|
|
"website": "https://librecad.org",
|
|
"description": "A mature, feature-rich 2D CAD application with a loyal user community.",
|
|
"pros": [
|
|
"Purpose-built lightweight 2D CAD application",
|
|
"Native DXF support for industry-standard file exchange",
|
|
"Cross-platform with minimal system requirements"
|
|
],
|
|
"cons": [
|
|
"2D only"
|
|
],
|
|
"last_commit": "2026-02-05T10:00:00Z",
|
|
"language": "C++",
|
|
"license": "GPLv2",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=librecad.org",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "freecad",
|
|
"name": "FreeCAD",
|
|
"category": "CAD",
|
|
"is_open_source": true,
|
|
"github_repo": "FreeCAD/FreeCAD",
|
|
"stars": 21000,
|
|
"website": "https://www.freecad.org",
|
|
"description": "A general-purpose parametric 3D CAD modeler and a BIM software application.",
|
|
"pros": [
|
|
"Full parametric 3D modeling with constraint-based sketcher",
|
|
"Extensible 3D capabilities for mechanical engineering, architecture, and BIM",
|
|
"Python scripting and macro system for automation"
|
|
],
|
|
"cons": [
|
|
"UI learning curve"
|
|
],
|
|
"last_commit": "2026-02-08T14:00:00Z",
|
|
"language": "C++",
|
|
"license": "LGPLv2+",
|
|
"logo_url": "/logos/freecad.svg",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "zapier",
|
|
"name": "Zapier",
|
|
"category": "Automation",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Task-based)",
|
|
"website": "https://zapier.com",
|
|
"description": "The pioneer in workflow automation for everyone.",
|
|
"alternatives": [
|
|
"n8n",
|
|
"activepieces"
|
|
],
|
|
"logo_url": "/logos/zapier.svg",
|
|
"avg_monthly_cost": 20,
|
|
"pros": [
|
|
"Connect 6,000+ apps without code",
|
|
"Easy visual workflow builder",
|
|
"Reliable trigger-based automation",
|
|
"Good for non-technical users"
|
|
],
|
|
"cons": [
|
|
"Gets expensive fast (per-task pricing)",
|
|
"Limited logic and branching on lower tiers",
|
|
"5-minute polling delay on some triggers"
|
|
]
|
|
},
|
|
{
|
|
"slug": "n8n",
|
|
"name": "n8n",
|
|
"category": "Automation",
|
|
"is_open_source": true,
|
|
"github_repo": "n8n-io/n8n",
|
|
"stars": 49000,
|
|
"website": "https://n8n.io",
|
|
"description": "Fair-code workflow automation tool. Easily automate tasks across different services.",
|
|
"pros": [
|
|
"Self-hosted workflow automation with 400+ integrations",
|
|
"Visual node-based editor for complex multi-step workflows",
|
|
"JavaScript/Python code nodes for custom logic"
|
|
],
|
|
"cons": [
|
|
"Requires hosting knowledge"
|
|
],
|
|
"last_commit": "2026-02-09T15:00:00Z",
|
|
"language": "TypeScript",
|
|
"license": "Sustainable Use License",
|
|
"logo_url": "/logos/n8n.svg",
|
|
"deployment": {
|
|
"image": "n8nio/n8n",
|
|
"port": 5678,
|
|
"env": [
|
|
{
|
|
"key": "N8N_BASIC_AUTH_ACTIVE",
|
|
"value": "true"
|
|
},
|
|
{
|
|
"key": "N8N_BASIC_AUTH_USER",
|
|
"value": "admin"
|
|
},
|
|
{
|
|
"key": "N8N_BASIC_AUTH_PASSWORD",
|
|
"value": "password"
|
|
}
|
|
],
|
|
"volumes": [
|
|
"./n8n_data:/home/node/.n8n"
|
|
],
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/n8n"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "activepieces",
|
|
"name": "Activepieces",
|
|
"category": "Automation",
|
|
"is_open_source": true,
|
|
"github_repo": "activepieces/activepieces",
|
|
"stars": 11000,
|
|
"website": "https://www.activepieces.com",
|
|
"description": "Open source alternative to Zapier. Automate your work with 200+ apps.",
|
|
"pros": [
|
|
"Beginner-friendly UI with a low learning curve",
|
|
"Open-source and self-hostable with Docker",
|
|
"Growing library of community-built connectors"
|
|
],
|
|
"cons": [
|
|
"Smaller connector library than Zapier"
|
|
],
|
|
"last_commit": "2026-02-09T16:00:00Z",
|
|
"language": "TypeScript",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=activepieces.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/activepieces"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "tableau",
|
|
"name": "Tableau",
|
|
"category": "Analytics",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Seat-based)",
|
|
"avg_monthly_cost": 70,
|
|
"website": "https://www.tableau.com",
|
|
"description": "Powerful data visualization and business intelligence platform.",
|
|
"alternatives": [
|
|
"metabase",
|
|
"superset"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tableau.com",
|
|
"pros": [
|
|
"Best-in-class data visualization",
|
|
"Drag-and-drop dashboard creation",
|
|
"Handles massive datasets well",
|
|
"Strong community and learning resources"
|
|
],
|
|
"cons": [
|
|
"Expensive licensing ($70+/user/mo)",
|
|
"Requires a data warehouse setup",
|
|
"Desktop app feels dated"
|
|
]
|
|
},
|
|
{
|
|
"slug": "metabase",
|
|
"name": "Metabase",
|
|
"category": "Analytics",
|
|
"is_open_source": true,
|
|
"github_repo": "metabase/metabase",
|
|
"stars": 38000,
|
|
"website": "https://www.metabase.com",
|
|
"description": "The simplest, fastest way to get business intelligence and analytics throughout your company.",
|
|
"pros": [
|
|
"Extremely user friendly",
|
|
"Easy query builder"
|
|
],
|
|
"cons": [
|
|
"Advanced visualizations can be limited"
|
|
],
|
|
"last_commit": "2026-02-09T14:30:00Z",
|
|
"language": "Clojure",
|
|
"license": "AGPLv3",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=metabase.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/metabase"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "superset",
|
|
"name": "Apache Superset",
|
|
"category": "Analytics",
|
|
"is_open_source": true,
|
|
"github_repo": "apache/superset",
|
|
"stars": 59000,
|
|
"website": "https://superset.apache.org",
|
|
"description": "Enterprise-ready business intelligence web application.",
|
|
"pros": [
|
|
"Scaling to petabytes",
|
|
"Huge variety of charts"
|
|
],
|
|
"cons": [
|
|
"Complex configuration"
|
|
],
|
|
"last_commit": "2026-02-09T12:00:00Z",
|
|
"language": "Python",
|
|
"license": "Apache 2.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=superset.apache.org",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/superset"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "auth0",
|
|
"name": "Auth0",
|
|
"category": "Security",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (MAU-based)",
|
|
"website": "https://auth0.com",
|
|
"description": "The leading authentication and authorization platform.",
|
|
"alternatives": [
|
|
"keycloak",
|
|
"authentik"
|
|
],
|
|
"logo_url": "/logos/auth0.svg",
|
|
"avg_monthly_cost": 23,
|
|
"pros": [
|
|
"Feature-rich authentication platform",
|
|
"Social login, MFA, and SSO out of the box",
|
|
"Extensive SDK support across languages",
|
|
"Rules and hooks for custom auth logic"
|
|
],
|
|
"cons": [
|
|
"Pricing jumps sharply after free tier",
|
|
"Can be complex to configure properly",
|
|
"Owned by Okta \u2014 consolidation concerns"
|
|
]
|
|
},
|
|
{
|
|
"slug": "keycloak",
|
|
"name": "Keycloak",
|
|
"category": "Security",
|
|
"is_open_source": true,
|
|
"github_repo": "keycloak/keycloak",
|
|
"stars": 23000,
|
|
"website": "https://www.keycloak.org",
|
|
"description": "Open source identity and access management for modern applications and services.",
|
|
"pros": [
|
|
"Enterprise-standard identity provider supporting SAML and OIDC",
|
|
"Federated identity with social login and LDAP integration",
|
|
"Battle-tested by Red Hat in production environments"
|
|
],
|
|
"cons": [
|
|
"UI can be clunky",
|
|
"Heavy resource usage"
|
|
],
|
|
"last_commit": "2026-02-09T16:30:00Z",
|
|
"language": "Java",
|
|
"license": "Apache 2.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=keycloak.org",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/keycloak"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "okta",
|
|
"name": "Okta",
|
|
"category": "Security",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (User-based)",
|
|
"website": "https://okta.com",
|
|
"description": "The World's Identity Company, providing enterprise-grade IAM.",
|
|
"alternatives": [
|
|
"authentik",
|
|
"keycloak"
|
|
],
|
|
"logo_url": "/logos/okta.svg",
|
|
"avg_monthly_cost": 6,
|
|
"pros": [
|
|
"Enterprise SSO and identity management leader",
|
|
"Strong security and compliance certifications",
|
|
"Universal directory for user management",
|
|
"Extensive pre-built integrations"
|
|
],
|
|
"cons": [
|
|
"Very expensive for small teams",
|
|
"Admin interface has a learning curve",
|
|
"Overkill for simple auth needs"
|
|
]
|
|
},
|
|
{
|
|
"slug": "authentik",
|
|
"name": "Authentik",
|
|
"category": "Security",
|
|
"is_open_source": true,
|
|
"github_repo": "goauthentik/authentik",
|
|
"stars": 15000,
|
|
"website": "https://goauthentik.io",
|
|
"description": "The overall-best open-source identity provider, focused on flexibility and versatility.",
|
|
"pros": [
|
|
"Modern, intuitive admin interface with drag-and-drop flows",
|
|
"Easy customization of login pages and branding",
|
|
"Supports SAML, OAuth2, LDAP proxy, and SCIM"
|
|
],
|
|
"cons": [
|
|
"Smaller community than Keycloak"
|
|
],
|
|
"last_commit": "2026-02-09T17:00:00Z",
|
|
"language": "Python",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=goauthentik.io",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/authentik"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "s3",
|
|
"name": "Amazon S3",
|
|
"category": "Cloud Infrastructure",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Usage-based)",
|
|
"website": "https://aws.amazon.com/s3",
|
|
"description": "Object storage built to retrieve any amount of data from anywhere.",
|
|
"alternatives": [
|
|
"garage",
|
|
"seaweedfs",
|
|
"ceph",
|
|
"rustfs"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=aws.amazon.com",
|
|
"avg_monthly_cost": 23,
|
|
"pros": [
|
|
"99.999999999% durability (11 nines)",
|
|
"Scales to virtually unlimited storage",
|
|
"Pay only for what you use",
|
|
"Industry standard \u2014 everything integrates with it"
|
|
],
|
|
"cons": [
|
|
"Egress costs can surprise you",
|
|
"Complex IAM/bucket policy configuration",
|
|
"Vendor lock-in to AWS ecosystem"
|
|
]
|
|
},
|
|
{
|
|
"slug": "garage",
|
|
"name": "Garage",
|
|
"category": "Cloud Infrastructure",
|
|
"is_open_source": true,
|
|
"github_repo": "deuxfleurs-org/garage",
|
|
"stars": 3500,
|
|
"website": "https://garagehq.deuxfleurs.fr/",
|
|
"description": "An open-source distributed object storage service tailored for self-hosting.",
|
|
"pros": [
|
|
"True open-source (AGPLv3)",
|
|
"Lightweight and runs anywhere",
|
|
"Built-in replication and cluster management"
|
|
],
|
|
"cons": [
|
|
"Lacks some enterprise features of MinIO"
|
|
],
|
|
"last_commit": "2024-03-01T00:00:00Z",
|
|
"language": "Rust",
|
|
"license": "AGPLv3",
|
|
"logo_url": "/logos/garage.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/garage"
|
|
}
|
|
},
|
|
{
|
|
"slug": "zendesk",
|
|
"name": "Zendesk",
|
|
"category": "Support",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Agent-based)",
|
|
"avg_monthly_cost": 19,
|
|
"website": "https://www.zendesk.com",
|
|
"description": "The leader in customer service and engagement software.",
|
|
"alternatives": [
|
|
"zammad"
|
|
],
|
|
"logo_url": "/logos/zendesk.svg",
|
|
"pros": [
|
|
"Comprehensive customer support platform",
|
|
"Omnichannel support (email, chat, phone)",
|
|
"Powerful ticket management and routing",
|
|
"Large marketplace of integrations"
|
|
],
|
|
"cons": [
|
|
"Expensive per-agent pricing",
|
|
"UI can feel bloated and slow",
|
|
"Basic plans lack important features"
|
|
]
|
|
},
|
|
{
|
|
"slug": "zammad",
|
|
"name": "Zammad",
|
|
"category": "Support",
|
|
"is_open_source": true,
|
|
"github_repo": "zammad/zammad",
|
|
"stars": 5000,
|
|
"website": "https://zammad.org",
|
|
"description": "A web-based, open source helpdesk/customer support system with many features.",
|
|
"pros": [
|
|
"Omnichannel helpdesk with email, phone, chat, and social media",
|
|
"Full-text search with Elasticsearch integration",
|
|
"Customizable ticket workflows and SLA management"
|
|
],
|
|
"cons": [
|
|
"Ruby hosting can be tricky"
|
|
],
|
|
"last_commit": "2026-02-09T11:00:00Z",
|
|
"language": "Ruby",
|
|
"license": "AGPLv3",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=zammad.org",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/zammad"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "workday",
|
|
"name": "Workday",
|
|
"category": "HR",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Enterprise)",
|
|
"avg_monthly_cost": 45,
|
|
"website": "https://www.workday.com",
|
|
"description": "Enterprise management cloud for finance and human resources.",
|
|
"alternatives": [
|
|
"orangehrm"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=workday.com",
|
|
"pros": [
|
|
"Leading cloud HR and finance platform",
|
|
"Strong workforce analytics",
|
|
"Regular feature updates included",
|
|
"Built for enterprise compliance"
|
|
],
|
|
"cons": [
|
|
"Extremely expensive to implement",
|
|
"Long implementation timelines",
|
|
"Complex for smaller organizations"
|
|
]
|
|
},
|
|
{
|
|
"slug": "orangehrm",
|
|
"name": "OrangeHRM",
|
|
"category": "HR",
|
|
"is_open_source": true,
|
|
"github_repo": "orangehrm/orangehrm",
|
|
"stars": 1200,
|
|
"website": "https://www.orangehrm.com",
|
|
"description": "The world's most popular open source human resource management software.",
|
|
"pros": [
|
|
"Comprehensive HR suite covering recruitment, leave, and performance",
|
|
"Highly customizable with module-based architecture",
|
|
"Employee self-service portal for time-off and expenses"
|
|
],
|
|
"cons": [
|
|
"UI feels a bit dated",
|
|
"Enterprise features are paid"
|
|
],
|
|
"last_commit": "2026-02-09T10:00:00Z",
|
|
"language": "PHP",
|
|
"license": "GPLv2",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=orangehrm.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/orangehrm"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "m365",
|
|
"name": "Microsoft 365",
|
|
"category": "Productivity",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Subscription)",
|
|
"website": "https://www.office.com",
|
|
"description": "The world's most popular office suite and cloud collaboration platform.",
|
|
"alternatives": [
|
|
"onlyoffice"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=office.com",
|
|
"avg_monthly_cost": 12,
|
|
"pros": [
|
|
"Full productivity suite (Word, Excel, Teams)",
|
|
"Deep enterprise integration",
|
|
"1TB OneDrive storage included",
|
|
"Regular AI feature updates (Copilot)"
|
|
],
|
|
"cons": [
|
|
"Subscription fatigue \u2014 perpetual payments",
|
|
"Teams can be resource-heavy",
|
|
"Complex licensing tiers"
|
|
]
|
|
},
|
|
{
|
|
"slug": "onlyoffice",
|
|
"name": "ONLYOFFICE",
|
|
"category": "Productivity",
|
|
"is_open_source": true,
|
|
"github_repo": "ONLYOFFICE/DocumentServer",
|
|
"stars": 11000,
|
|
"website": "https://www.onlyoffice.com",
|
|
"description": "Powerful online document editors for text, spreadsheets, and presentations. Highly compatible with MS Office.",
|
|
"pros": [
|
|
"Full-featured collaborative editing for docs, sheets, and slides",
|
|
"Drop-in MS Office compatibility with high-fidelity rendering",
|
|
"Self-hosted integration with Nextcloud, Seafile, and more"
|
|
],
|
|
"cons": [
|
|
"Self-hosting can be complex"
|
|
],
|
|
"last_commit": "2026-02-09T15:30:00Z",
|
|
"language": "JavaScript",
|
|
"license": "AGPLv3",
|
|
"logo_url": "/logos/onlyoffice.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/onlyoffice"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "shopify",
|
|
"name": "Shopify",
|
|
"category": "E-commerce",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Subscription)",
|
|
"website": "https://www.shopify.com",
|
|
"description": "Commerical platform that allows anyone to set up an online store.",
|
|
"alternatives": [
|
|
"medusa"
|
|
],
|
|
"logo_url": "/logos/shopify.svg",
|
|
"avg_monthly_cost": 39,
|
|
"pros": [
|
|
"Easiest way to start selling online",
|
|
"Beautiful themes and fast checkout",
|
|
"Apps for almost any e-commerce need",
|
|
"Handles payments, shipping, and taxes"
|
|
],
|
|
"cons": [
|
|
"Transaction fees unless using Shopify Payments",
|
|
"Monthly costs add up with apps",
|
|
"Limited customization vs self-hosted solutions"
|
|
]
|
|
},
|
|
{
|
|
"slug": "medusa",
|
|
"name": "Medusa.js",
|
|
"category": "E-commerce",
|
|
"is_open_source": true,
|
|
"github_repo": "medusajs/medusa",
|
|
"stars": 24000,
|
|
"website": "https://medusajs.com",
|
|
"description": "The open-source alternative to Shopify. Building blocks for digital commerce.",
|
|
"pros": [
|
|
"Headless commerce with extreme flexibility for custom storefronts",
|
|
"Plugin architecture for payments, fulfillment, and CMS",
|
|
"Multi-region and multi-currency support built in"
|
|
],
|
|
"cons": [
|
|
"Requires developer knowledge"
|
|
],
|
|
"last_commit": "2026-02-09T16:45:00Z",
|
|
"language": "TypeScript",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=medusajs.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/medusa"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "docusign",
|
|
"name": "DocuSign",
|
|
"category": "Legal",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Envelope-based)",
|
|
"website": "https://www.docusign.com",
|
|
"description": "The world's #1 way to sign electronically on practically any device, from almost anywhere, at any time.",
|
|
"alternatives": [
|
|
"documenso"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=docusign.com",
|
|
"avg_monthly_cost": 25,
|
|
"pros": [
|
|
"Industry standard for e-signatures",
|
|
"Legally binding in most countries",
|
|
"Workflow automation for document routing",
|
|
"Strong mobile experience"
|
|
],
|
|
"cons": [
|
|
"Expensive for occasional use",
|
|
"UI feels dated compared to competitors",
|
|
"Limited free tier"
|
|
]
|
|
},
|
|
{
|
|
"slug": "documenso",
|
|
"name": "Documenso",
|
|
"category": "Legal",
|
|
"is_open_source": true,
|
|
"github_repo": "documenso/documenso",
|
|
"stars": 8000,
|
|
"website": "https://documenso.com",
|
|
"description": "The open-source DocuSign alternative. We aim to be the world's most trusted document signing platform.",
|
|
"pros": [
|
|
"Self-hosted digital signatures with full audit trail",
|
|
"Developer-friendly API and webhook integration",
|
|
"Beautiful, modern signing experience"
|
|
],
|
|
"cons": [
|
|
"Newer ecosystem"
|
|
],
|
|
"last_commit": "2026-02-10T09:00:00Z",
|
|
"language": "TypeScript",
|
|
"license": "AGPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=documenso.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/documenso"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "mailchimp",
|
|
"name": "Mailchimp",
|
|
"category": "Marketing",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Contact-based)",
|
|
"website": "https://mailchimp.com",
|
|
"description": "All-in-one marketing platform that helps you manage and talk to your clients, customers, and other interested parties.",
|
|
"alternatives": [
|
|
"listmonk",
|
|
"mautic"
|
|
],
|
|
"logo_url": "/logos/mailchimp.svg",
|
|
"avg_monthly_cost": 13,
|
|
"pros": [
|
|
"Beginner-friendly email marketing",
|
|
"Good free tier for small lists",
|
|
"Built-in landing page builder",
|
|
"Detailed campaign analytics"
|
|
],
|
|
"cons": [
|
|
"Pricing increases steeply with list size",
|
|
"Owned by Intuit (less indie-friendly)",
|
|
"Template editor is limiting"
|
|
]
|
|
},
|
|
{
|
|
"slug": "listmonk",
|
|
"name": "Listmonk",
|
|
"category": "Marketing",
|
|
"is_open_source": true,
|
|
"github_repo": "knadh/listmonk",
|
|
"stars": 19000,
|
|
"website": "https://listmonk.app",
|
|
"description": "High performance, self-hosted newsletter and mailing list manager with a modern dashboard.",
|
|
"pros": [
|
|
"Handles millions of subscribers with blazing fast performance",
|
|
"Templating engine with rich media and personalization",
|
|
"Manages bounces, unsubscribes, and analytics automatically"
|
|
],
|
|
"cons": [
|
|
"No built-in sending (needs SMTP/SES)"
|
|
],
|
|
"last_commit": "2026-02-05T12:00:00Z",
|
|
"language": "Go",
|
|
"license": "AGPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=listmonk.app",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/listmonk"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "mautic",
|
|
"name": "Mautic",
|
|
"category": "Marketing",
|
|
"is_open_source": true,
|
|
"github_repo": "mautic/mautic",
|
|
"stars": 7000,
|
|
"website": "https://www.mautic.org",
|
|
"description": "World's largest open source marketing automation project.",
|
|
"pros": [
|
|
"Full marketing automation with CRM-grade contact management",
|
|
"Visual campaign builder with multi-channel triggers",
|
|
"Email, SMS, and social media campaign support"
|
|
],
|
|
"cons": [
|
|
"Complex setup and maintenance"
|
|
],
|
|
"last_commit": "2026-02-09T18:00:00Z",
|
|
"language": "PHP",
|
|
"license": "GPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mautic.org",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/mautic"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "statuspage",
|
|
"name": "Statuspage",
|
|
"category": "Monitoring",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Atlassian)",
|
|
"website": "https://www.atlassian.com/software/statuspage",
|
|
"description": "The best way to communicate status and downtime to your customers.",
|
|
"alternatives": [
|
|
"uptime-kuma"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=atlassian.com",
|
|
"avg_monthly_cost": 29,
|
|
"pros": [
|
|
"Clean, professional status pages",
|
|
"Integrated incident management",
|
|
"Email/SMS subscriber notifications",
|
|
"Atlassian ecosystem integration"
|
|
],
|
|
"cons": [
|
|
"Expensive for what it does ($29+/mo)",
|
|
"Limited customization options",
|
|
"Overkill if you just need a simple status page"
|
|
]
|
|
},
|
|
{
|
|
"slug": "uptime-kuma",
|
|
"name": "Uptime Kuma",
|
|
"category": "Monitoring",
|
|
"is_open_source": true,
|
|
"github_repo": "louislam/uptime-kuma",
|
|
"stars": 55000,
|
|
"website": "https://uptime.kuma.pet",
|
|
"description": "A fancy self-hosted monitoring tool.",
|
|
"pros": [
|
|
"Beautiful, real-time monitoring dashboard",
|
|
"Multi-protocol support: HTTP, TCP, DNS, Docker, and more",
|
|
"Notification integrations with 90+ services including Slack, Discord, and Telegram"
|
|
],
|
|
"cons": [
|
|
"Self-hosted only (usually)"
|
|
],
|
|
"last_commit": "2026-02-10T08:00:00Z",
|
|
"language": "JavaScript",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=uptime.kuma.pet",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/uptime-kuma"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "datadog",
|
|
"name": "Datadog",
|
|
"category": "Monitoring",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Usage-based)",
|
|
"website": "https://www.datadoghq.com",
|
|
"description": "Modern monitoring and security that gives you full visibility into your applications and infrastructure.",
|
|
"alternatives": [
|
|
"signoz"
|
|
],
|
|
"logo_url": "/logos/datadog.svg",
|
|
"avg_monthly_cost": 23,
|
|
"pros": [
|
|
"Comprehensive observability platform",
|
|
"APM, logs, metrics in one place",
|
|
"Excellent dashboards and alerting",
|
|
"Supports 750+ integrations"
|
|
],
|
|
"cons": [
|
|
"Notoriously expensive at scale",
|
|
"Complex pricing model (per host, per GB)",
|
|
"Can become a significant budget item"
|
|
]
|
|
},
|
|
{
|
|
"slug": "signoz",
|
|
"name": "SigNoz",
|
|
"category": "Monitoring",
|
|
"is_open_source": true,
|
|
"github_repo": "signoz/signoz",
|
|
"stars": 18000,
|
|
"website": "https://signoz.io",
|
|
"description": "Open source observability platform. SigNoz helps developers monitor applications and troubleshoot problems.",
|
|
"pros": [
|
|
"Unified metrics, traces, and logs in a single platform",
|
|
"OpenTelemetry native \u2014 no proprietary agents required",
|
|
"ClickHouse-powered for fast queries at scale"
|
|
],
|
|
"cons": [
|
|
"High resource usage (ClickHouse)"
|
|
],
|
|
"last_commit": "2026-02-09T20:00:00Z",
|
|
"language": "Go",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=signoz.io",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/signoz"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "typeform",
|
|
"name": "Typeform",
|
|
"category": "Productivity",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Response-based)",
|
|
"website": "https://www.typeform.com",
|
|
"description": "Build beautiful, interactive forms, surveys, quizzes, and something else entirely.",
|
|
"alternatives": [
|
|
"tally"
|
|
],
|
|
"logo_url": "/logos/typeform.svg",
|
|
"avg_monthly_cost": 25,
|
|
"pros": [
|
|
"Beautiful, conversational form experience",
|
|
"High completion rates vs traditional forms",
|
|
"Logic jumps and conditional flows",
|
|
"Great integrations (Zapier, webhooks)"
|
|
],
|
|
"cons": [
|
|
"Expensive for the response limits",
|
|
"Limited free tier (10 responses/mo)",
|
|
"Not ideal for complex multi-page forms"
|
|
]
|
|
},
|
|
{
|
|
"slug": "tally",
|
|
"name": "Tally",
|
|
"category": "Productivity",
|
|
"is_open_source": false,
|
|
"is_free_tier_generous": true,
|
|
"pricing_model": "Free/Paid",
|
|
"website": "https://tally.so",
|
|
"description": "The simplest way to create forms. Tally is a new type of form builder that works like a doc.",
|
|
"pros": [
|
|
"Notion-like form building experience with no-code simplicity",
|
|
"Unlimited forms and responses on the free tier",
|
|
"Conditional logic, hidden fields, and payment collection"
|
|
],
|
|
"cons": [
|
|
"Wait, it's not open source (but highly OS-friendly community)"
|
|
],
|
|
"tags": [
|
|
"Forms",
|
|
"Surveys",
|
|
"No-code"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tally.so"
|
|
},
|
|
{
|
|
"slug": "confluence",
|
|
"name": "Confluence",
|
|
"category": "Productivity",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Atlassian)",
|
|
"website": "https://www.atlassian.com/software/confluence",
|
|
"description": "Your remote-friendly team workspace where knowledge and collaboration meet.",
|
|
"alternatives": [
|
|
"outline"
|
|
],
|
|
"logo_url": "/logos/confluence.svg",
|
|
"avg_monthly_cost": 10,
|
|
"pros": [
|
|
"Deep Jira integration for dev teams",
|
|
"Structured knowledge base with spaces",
|
|
"Templates for common documentation",
|
|
"Permissions and access control"
|
|
],
|
|
"cons": [
|
|
"Slow and bloated interface",
|
|
"Search is frustratingly poor",
|
|
"Editing experience lags behind Notion"
|
|
]
|
|
},
|
|
{
|
|
"slug": "outline",
|
|
"name": "Outline",
|
|
"category": "Productivity",
|
|
"is_open_source": true,
|
|
"github_repo": "outline/outline",
|
|
"stars": 24000,
|
|
"website": "https://www.getoutline.com",
|
|
"description": "Fast, collaborative, knowledge base for your team built using React and Markdown.",
|
|
"pros": [
|
|
"Sub-second search across all documents",
|
|
"Beautifully designed editor with Markdown shortcuts",
|
|
"Integrates with Slack, Figma, and 20+ tools out of the box"
|
|
],
|
|
"cons": [
|
|
"Hard to self-host (complex storage requirements)"
|
|
],
|
|
"last_commit": "2026-02-10T12:00:00Z",
|
|
"language": "TypeScript",
|
|
"license": "Other",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=getoutline.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/outline"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "hootsuite",
|
|
"name": "Hootsuite",
|
|
"category": "Marketing",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Seat-based)",
|
|
"website": "https://www.hootsuite.com",
|
|
"description": "Social media marketing and management dashboard.",
|
|
"alternatives": [
|
|
"mixpost"
|
|
],
|
|
"logo_url": "/logos/hootsuite.svg",
|
|
"avg_monthly_cost": 49,
|
|
"pros": [
|
|
"Manage multiple social accounts in one place",
|
|
"Post scheduling across platforms",
|
|
"Team collaboration and approval workflows",
|
|
"Analytics and reporting dashboard"
|
|
],
|
|
"cons": [
|
|
"Expensive plans ($99+/mo)",
|
|
"UI feels cluttered and dated",
|
|
"Free plan was eliminated"
|
|
]
|
|
},
|
|
{
|
|
"slug": "mixpost",
|
|
"name": "Mixpost",
|
|
"category": "Marketing",
|
|
"is_open_source": true,
|
|
"github_repo": "inovector/mixpost",
|
|
"stars": 3000,
|
|
"website": "https://mixpost.app",
|
|
"description": "Self-hosted social media management software.",
|
|
"pros": [
|
|
"Own your data",
|
|
"No monthly subscription"
|
|
],
|
|
"cons": [
|
|
"Newer, fewer social connectors"
|
|
],
|
|
"last_commit": "2026-02-01T15:00:00Z",
|
|
"language": "PHP",
|
|
"license": "Other",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=mixpost.app",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/mixpost"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "codespaces",
|
|
"name": "GitHub Codespaces",
|
|
"category": "DevOps",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Usage-based)",
|
|
"website": "https://github.com/features/codespaces",
|
|
"description": "Fast, cloud-hosted developer environments.",
|
|
"alternatives": [
|
|
"coder"
|
|
],
|
|
"logo_url": "/logos/codespaces.svg",
|
|
"avg_monthly_cost": 15,
|
|
"pros": [
|
|
"Full VS Code in the browser",
|
|
"Pre-configured dev environments",
|
|
"Instant onboarding for new contributors",
|
|
"Deep GitHub integration"
|
|
],
|
|
"cons": [
|
|
"Usage-based pricing adds up",
|
|
"Requires stable internet connection",
|
|
"Limited GPU/compute options"
|
|
]
|
|
},
|
|
{
|
|
"slug": "coder",
|
|
"name": "Coder",
|
|
"category": "DevOps",
|
|
"is_open_source": true,
|
|
"github_repo": "coder/coder",
|
|
"stars": 20000,
|
|
"website": "https://coder.com",
|
|
"description": "Provision software development environments as code on your infrastructure.",
|
|
"pros": [
|
|
"Run dev environments on any infrastructure \u2014 cloud, on-prem, or hybrid",
|
|
"Self-hosted remote development with VS Code and JetBrains support",
|
|
"Ephemeral workspaces with Terraform-based provisioning"
|
|
],
|
|
"cons": [
|
|
"Requires K8s or Terraform knowledge"
|
|
],
|
|
"last_commit": "2026-02-09T22:00:00Z",
|
|
"language": "Go",
|
|
"license": "AGPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=coder.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/coder"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "quickbooks",
|
|
"name": "QuickBooks",
|
|
"category": "Financial",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Monthly Subscription)",
|
|
"website": "https://quickbooks.intuit.com",
|
|
"description": "Smart, simple online accounting software for small businesses.",
|
|
"alternatives": [
|
|
"akaunting",
|
|
"erpnext"
|
|
],
|
|
"logo_url": "/logos/quickbooks.svg",
|
|
"avg_monthly_cost": 25,
|
|
"pros": [
|
|
"Industry standard for small business accounting",
|
|
"Easy invoicing and expense tracking",
|
|
"Bank feed integration",
|
|
"Tax preparation features"
|
|
],
|
|
"cons": [
|
|
"Subscription pricing keeps increasing",
|
|
"Performance issues with large files",
|
|
"Limited multi-currency support"
|
|
]
|
|
},
|
|
{
|
|
"slug": "akaunting",
|
|
"name": "Akaunting",
|
|
"category": "Financial",
|
|
"is_open_source": true,
|
|
"github_repo": "akaunting/akaunting",
|
|
"stars": 12000,
|
|
"website": "https://akaunting.com",
|
|
"description": "Free and open source online accounting software for small businesses and freelancers.",
|
|
"pros": [
|
|
"Modular app store",
|
|
"Multilingual and multicurrency"
|
|
],
|
|
"cons": [
|
|
"Some essential apps are paid"
|
|
],
|
|
"last_commit": "2026-02-08T14:00:00Z",
|
|
"language": "PHP",
|
|
"license": "GPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=akaunting.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/akaunting"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "premiere",
|
|
"name": "Adobe Premiere Pro",
|
|
"category": "Creative",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Creative Cloud)",
|
|
"website": "https://www.adobe.com/products/premiere.html",
|
|
"description": "Industry-leading video editing software for film, TV, and the web.",
|
|
"alternatives": [
|
|
"kdenlive"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=adobe.com",
|
|
"avg_monthly_cost": 35,
|
|
"pros": [
|
|
"Professional-grade video editing",
|
|
"Excellent integration with After Effects",
|
|
"Industry standard in film and media",
|
|
"AI-powered features (scene detection, auto-reframe)"
|
|
],
|
|
"cons": [
|
|
"Subscription-only ($22.99/mo)",
|
|
"Resource-intensive \u2014 needs powerful hardware",
|
|
"Steep learning curve"
|
|
]
|
|
},
|
|
{
|
|
"slug": "kdenlive",
|
|
"name": "Kdenlive",
|
|
"category": "Creative",
|
|
"is_open_source": true,
|
|
"github_repo": "KDE/kdenlive",
|
|
"stars": 3500,
|
|
"website": "https://kdenlive.org",
|
|
"description": "Open source video editing software based on the MLT Framework and KDE.",
|
|
"pros": [
|
|
"Truly free forever",
|
|
"Powerful multi-track editing"
|
|
],
|
|
"cons": [
|
|
"UI can be intimidating for beginners"
|
|
],
|
|
"last_commit": "2026-02-10T11:00:00Z",
|
|
"language": "C++",
|
|
"license": "GPL-3.0",
|
|
"logo_url": "/logos/kdenlive.svg",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "dashlane",
|
|
"name": "Dashlane",
|
|
"category": "Security",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Subscription)",
|
|
"website": "https://www.dashlane.com",
|
|
"description": "Cloud-based password manager and digital wallet.",
|
|
"alternatives": [
|
|
"vaultwarden",
|
|
"bitwarden"
|
|
],
|
|
"logo_url": "/logos/dashlane.svg",
|
|
"avg_monthly_cost": 8,
|
|
"pros": [
|
|
"Clean, intuitive interface",
|
|
"Built-in VPN on premium plans",
|
|
"Dark web monitoring alerts",
|
|
"Secure sharing for teams"
|
|
],
|
|
"cons": [
|
|
"More expensive than competitors",
|
|
"Free tier limited to 25 passwords",
|
|
"Desktop app was discontinued"
|
|
]
|
|
},
|
|
{
|
|
"slug": "vaultwarden",
|
|
"name": "Vaultwarden",
|
|
"category": "Security",
|
|
"is_open_source": true,
|
|
"github_repo": "dani-garcia/vaultwarden",
|
|
"stars": 32000,
|
|
"website": "https://github.com/dani-garcia/vaultwarden",
|
|
"description": "Unofficial Bitwarden compatible server written in Rust, formerly known as bitwarden_rs.",
|
|
"pros": [
|
|
"Full Bitwarden API compatibility in a lightweight Rust binary",
|
|
"Runs on 50MB of RAM \u2014 perfect for Raspberry Pi or small VPS",
|
|
"Supports organizations, attachments, and Bitwarden Send"
|
|
],
|
|
"cons": [
|
|
"Third-party implementation (not security audited officially)"
|
|
],
|
|
"last_commit": "2026-02-09T10:00:00Z",
|
|
"language": "Rust",
|
|
"license": "AGPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=bitwarden.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/vaultwarden"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "pipedrive",
|
|
"name": "Pipedrive",
|
|
"category": "CRM",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Seat-based)",
|
|
"website": "https://www.pipedrive.com",
|
|
"description": "Sales CRM & pipeline management software that helps you get more organized.",
|
|
"alternatives": [
|
|
"twenty"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=pipedrive.com",
|
|
"avg_monthly_cost": 15,
|
|
"pros": [
|
|
"Simple, visual sales pipeline",
|
|
"Easy to set up and use",
|
|
"Good automation for follow-ups",
|
|
"Affordable entry-level pricing"
|
|
],
|
|
"cons": [
|
|
"Limited features vs Salesforce",
|
|
"Reporting could be more powerful",
|
|
"No free tier"
|
|
]
|
|
},
|
|
{
|
|
"slug": "twenty",
|
|
"name": "Twenty",
|
|
"category": "CRM",
|
|
"is_open_source": true,
|
|
"github_repo": "twentyhq/twenty",
|
|
"stars": 15000,
|
|
"website": "https://twenty.com",
|
|
"description": "A modern open-source CRM alternative to Salesforce and Pipedrive.",
|
|
"pros": [
|
|
"Clean, Notion-like interface for CRM workflows",
|
|
"Deeply customizable data models and views",
|
|
"GraphQL API for flexible integrations"
|
|
],
|
|
"cons": [
|
|
"Still in early development"
|
|
],
|
|
"last_commit": "2026-02-10T14:00:00Z",
|
|
"language": "TypeScript",
|
|
"license": "AGPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=twenty.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/twenty"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "sentry",
|
|
"name": "Sentry",
|
|
"category": "Monitoring",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Usage-based)",
|
|
"website": "https://sentry.io",
|
|
"description": "Developer-first error tracking and performance monitoring.",
|
|
"alternatives": [
|
|
"glitchtip"
|
|
],
|
|
"logo_url": "/logos/sentry.svg",
|
|
"avg_monthly_cost": 26,
|
|
"pros": [
|
|
"Best-in-class error tracking",
|
|
"Stack traces with source maps",
|
|
"Performance monitoring built in",
|
|
"Supports 100+ platforms and languages"
|
|
],
|
|
"cons": [
|
|
"Can be noisy without proper filtering",
|
|
"Pricing based on error volume",
|
|
"Self-hosting is complex"
|
|
]
|
|
},
|
|
{
|
|
"slug": "glitchtip",
|
|
"name": "GlitchTip",
|
|
"category": "Monitoring",
|
|
"is_open_source": true,
|
|
"github_repo": "glitchtip/glitchtip",
|
|
"stars": 3000,
|
|
"website": "https://glitchtip.com",
|
|
"description": "Open source error tracking that's compatible with Sentry SDKs.",
|
|
"pros": [
|
|
"Sentry-compatible error tracking that simplifies self-hosting",
|
|
"Lightweight alternative requiring minimal server resources",
|
|
"Performance monitoring with transaction tracking"
|
|
],
|
|
"cons": [
|
|
"Less polished UI than Sentry"
|
|
],
|
|
"last_commit": "2026-02-05T09:00:00Z",
|
|
"language": "Python",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=glitchtip.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/glitchtip"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "calendly",
|
|
"name": "Calendly",
|
|
"category": "Productivity",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Seat-based)",
|
|
"website": "https://calendly.com",
|
|
"description": "The modern scheduling platform that makes 'finding time' a breeze.",
|
|
"alternatives": [
|
|
"calcom",
|
|
"tymeslot"
|
|
],
|
|
"logo_url": "/logos/calendly.svg",
|
|
"avg_monthly_cost": 10,
|
|
"pros": [
|
|
"Frictionless scheduling experience",
|
|
"Integrates with Google/Outlook calendars",
|
|
"Team scheduling and round-robin",
|
|
"Customizable booking pages"
|
|
],
|
|
"cons": [
|
|
"Free plan limited to one event type",
|
|
"Premium features locked behind $10+/mo",
|
|
"Branding on free tier"
|
|
]
|
|
},
|
|
{
|
|
"slug": "calcom",
|
|
"name": "Cal.com",
|
|
"category": "Productivity",
|
|
"is_open_source": true,
|
|
"github_repo": "calcom/cal.com",
|
|
"stars": 30000,
|
|
"website": "https://cal.com",
|
|
"description": "The open-source Calendly alternative. Take control of your scheduling.",
|
|
"pros": [
|
|
"Self-hosted scheduling \u2014 no data leaves your server",
|
|
"Deeply extensible with a plugin architecture and API",
|
|
"Round-robin, collective, and managed event types"
|
|
],
|
|
"cons": [
|
|
"Can be overkill for simple use cases"
|
|
],
|
|
"last_commit": "2026-02-10T07:00:00Z",
|
|
"language": "TypeScript",
|
|
"license": "AGPL-3.0",
|
|
"logo_url": "/logos/calcom.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/calcom"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "intercom",
|
|
"name": "Intercom",
|
|
"category": "Support",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Seat-based)",
|
|
"website": "https://www.intercom.com",
|
|
"description": "The business messenger that builds real-time connections.",
|
|
"alternatives": [
|
|
"chaskiq"
|
|
],
|
|
"logo_url": "/logos/intercom.svg",
|
|
"avg_monthly_cost": 39,
|
|
"pros": [
|
|
"Best-in-class live chat and messaging",
|
|
"AI chatbot (Fin) handles common questions",
|
|
"Product tours and onboarding flows",
|
|
"Unified inbox for support"
|
|
],
|
|
"cons": [
|
|
"Very expensive ($74+/mo starting)",
|
|
"Pricing model is complex and confusing",
|
|
"Can be overkill for small teams"
|
|
]
|
|
},
|
|
{
|
|
"slug": "chaskiq",
|
|
"name": "Chaskiq",
|
|
"category": "Support",
|
|
"is_open_source": true,
|
|
"github_repo": "chaskiq/chaskiq",
|
|
"stars": 4000,
|
|
"website": "https://chaskiq.io",
|
|
"description": "Open source conversational marketing platform alternative to Intercom and Drift.",
|
|
"pros": [
|
|
"Self-hosted customer messaging that replaces Intercom",
|
|
"Bot automation with visual workflow builder",
|
|
"Multi-channel support including web chat, email, and WhatsApp"
|
|
],
|
|
"cons": [
|
|
"Smaller community than Chatwoot"
|
|
],
|
|
"last_commit": "2026-01-28T12:00:00Z",
|
|
"language": "Ruby",
|
|
"license": "GPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=chaskiq.io",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/chaskiq"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "mailgun",
|
|
"name": "Mailgun",
|
|
"category": "Marketing",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Usage-based)",
|
|
"website": "https://www.mailgun.com",
|
|
"description": "Electronic mail delivery service for developers.",
|
|
"alternatives": [
|
|
"postal"
|
|
],
|
|
"logo_url": "/logos/mailgun.svg",
|
|
"avg_monthly_cost": 15,
|
|
"pros": [
|
|
"Reliable transactional email delivery",
|
|
"Powerful email API and SMTP relay",
|
|
"Detailed delivery analytics",
|
|
"Good documentation"
|
|
],
|
|
"cons": [
|
|
"No visual email builder",
|
|
"Pricing increased significantly",
|
|
"Support quality has declined"
|
|
]
|
|
},
|
|
{
|
|
"slug": "postal",
|
|
"name": "Postal",
|
|
"category": "Marketing",
|
|
"is_open_source": true,
|
|
"github_repo": "postalserver/postal",
|
|
"stars": 15000,
|
|
"website": "https://postalserver.io",
|
|
"description": "A fully featured open source mail delivery platform for incoming & outgoing e-mail.",
|
|
"pros": [
|
|
"High-performance mail delivery server built for throughput",
|
|
"Detailed delivery tracking with click and open analytics",
|
|
"IP pool management and DKIM/SPF configuration"
|
|
],
|
|
"cons": [
|
|
"Extremely complex to manage delivery (IP warm-up)"
|
|
],
|
|
"last_commit": "2026-02-09T13:00:00Z",
|
|
"language": "Ruby",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=postalserver.io",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/postal"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "segment",
|
|
"name": "Segment",
|
|
"category": "Marketing",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Usage-based)",
|
|
"website": "https://segment.com",
|
|
"description": "The leading customer data platform (CDP).",
|
|
"alternatives": [
|
|
"jitsu"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=segment.com",
|
|
"avg_monthly_cost": 120,
|
|
"pros": [
|
|
"Single API for all analytics tools",
|
|
"Customer data platform (CDP) capabilities",
|
|
"200+ destination integrations",
|
|
"Clean data pipeline management"
|
|
],
|
|
"cons": [
|
|
"Extremely expensive ($120+/mo to start)",
|
|
"Complex to set up properly",
|
|
"Overkill for simple tracking needs"
|
|
]
|
|
},
|
|
{
|
|
"slug": "jitsu",
|
|
"name": "Jitsu",
|
|
"category": "Marketing",
|
|
"is_open_source": true,
|
|
"github_repo": "jitsucom/jitsu",
|
|
"stars": 5000,
|
|
"website": "https://jitsu.com",
|
|
"description": "High-performance data collection platform and open-source Segment alternative.",
|
|
"pros": [
|
|
"Unlimited data volume",
|
|
"Real-time data streaming"
|
|
],
|
|
"cons": [
|
|
"Fewer destinations than Segment"
|
|
],
|
|
"last_commit": "2026-02-10T16:00:00Z",
|
|
"language": "TypeScript",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jitsu.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/jitsu"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "dokku",
|
|
"name": "Dokku",
|
|
"category": "DevOps",
|
|
"is_open_source": true,
|
|
"github_repo": "dokku/dokku",
|
|
"website": "https://dokku.com",
|
|
"description": "A docker-powered PaaS that helps you build and manage the lifecycle of applications",
|
|
"pros": [
|
|
"Rock-solid stability \u2014 battle-tested since 2013",
|
|
"Heroku-compatible buildpacks and Procfile workflow",
|
|
"Zero-downtime deploys with simple git push"
|
|
],
|
|
"cons": [
|
|
"CLI driven"
|
|
],
|
|
"stars": 31874,
|
|
"last_commit": "2026-02-09T15:40:31Z",
|
|
"language": "Shell",
|
|
"license": "MIT License",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=dokku.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/dokku"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "chatgpt",
|
|
"name": "ChatGPT / OpenAI",
|
|
"category": "AI Models",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid/Freemium",
|
|
"website": "https://openai.com",
|
|
"description": "The leading commercial AI assistant and API platform (GPT-4o, o1).",
|
|
"alternatives": [
|
|
"llama",
|
|
"deepseek",
|
|
"mistral"
|
|
],
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Chat"
|
|
],
|
|
"hosting_type": "cloud",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openai.com",
|
|
"avg_monthly_cost": 20,
|
|
"pros": [
|
|
"Most capable general-purpose AI assistant",
|
|
"Excellent at writing, coding, and reasoning",
|
|
"Plugin ecosystem and GPT store",
|
|
"Supports image, voice, and file inputs"
|
|
],
|
|
"cons": [
|
|
"$20/mo for GPT-4 access",
|
|
"Can hallucinate confidently",
|
|
"No self-hosting option",
|
|
"Data privacy concerns for sensitive info"
|
|
]
|
|
},
|
|
{
|
|
"slug": "llama",
|
|
"name": "Meta Llama 3.1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "meta-llama/llama3",
|
|
"website": "https://llama.meta.com",
|
|
"description": "Meta's flagship open-weight model with 128K context. Supports 8B, 70B, and 405B parameters.",
|
|
"pros": [
|
|
"Massive 128K token context window for long documents",
|
|
"Strong multilingual support across 8+ languages",
|
|
"SOTA 405B variant competing with GPT-4 at a fraction of the cost"
|
|
],
|
|
"cons": [
|
|
"405B requires massive hardware",
|
|
"Llama Community License"
|
|
],
|
|
"stars": 65000,
|
|
"language": "Python",
|
|
"license": "Llama 3.1 Community License",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"128K Context"
|
|
],
|
|
"hardware_req": "8GB VRAM (8B), 40GB+ VRAM (70B), 800GB+ VRAM (405B)",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 8,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 405,
|
|
"is_multimodal": false
|
|
},
|
|
"logo_url": "/logos/meta.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/llama"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "deepseek",
|
|
"name": "DeepSeek-V3 / R1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "deepseek-ai/DeepSeek-V3",
|
|
"website": "https://deepseek.com",
|
|
"description": "Powerful open-source models including V3 (671B) and R1 (Reasoning). Rivals GPT-4o and o1.",
|
|
"pros": [
|
|
"State-of-the-art reasoning (R1)",
|
|
"Extremely cost efficient",
|
|
"MIT License (V3/R1)"
|
|
],
|
|
"cons": [
|
|
"Full model requires huge VRAM",
|
|
"Newer ecosystem"
|
|
],
|
|
"stars": 110000,
|
|
"language": "Python",
|
|
"license": "MIT License",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Reasoning"
|
|
],
|
|
"alternatives": [
|
|
"llama",
|
|
"mistral",
|
|
"qwen",
|
|
"deepseek-v3-1"
|
|
],
|
|
"hardware_req": "8GB VRAM (Distilled), 160GB+ VRAM (Full)",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 160,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 671,
|
|
"parameters_active_b": 37,
|
|
"is_multimodal": false
|
|
},
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=deepseek.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/deepseek"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "mistral",
|
|
"name": "Mistral Large 2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "mistralai/mistral-inference",
|
|
"website": "https://mistral.ai",
|
|
"description": "Flagship 123B model from Mistral AI. Optimized for multilingual, reasoning, and coding tasks.",
|
|
"pros": [
|
|
"State-of-the-art performance per parameter on benchmarks",
|
|
"128K context window with function-calling support",
|
|
"Efficient Mixture-of-Experts architecture for fast inference"
|
|
],
|
|
"cons": [
|
|
"Mistral Research License",
|
|
"Requires high VRAM (80GB+)"
|
|
],
|
|
"stars": 20000,
|
|
"language": "Python",
|
|
"license": "Mistral Research License",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"EU"
|
|
],
|
|
"hardware_req": "80GB+ VRAM (FP16), 40GB+ (8-bit)",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 80,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 123,
|
|
"is_multimodal": false
|
|
},
|
|
"logo_url": "/logos/mistral.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/mistral"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "gemma",
|
|
"name": "Google Gemma 2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "google/gemma-2",
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Google's open-weight models (9B, 27B) with class-leading performance and efficient architecture.",
|
|
"pros": [
|
|
"Distilled for performance",
|
|
"Excellent 27B variant",
|
|
"Google AI ecosystem"
|
|
],
|
|
"cons": [
|
|
"8K context window",
|
|
"Gemma Terms of Use"
|
|
],
|
|
"stars": 20000,
|
|
"language": "Python",
|
|
"license": "Gemma License",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Google"
|
|
],
|
|
"hardware_req": "8GB VRAM (9B), 24GB+ VRAM (27B)",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 18,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 27,
|
|
"is_multimodal": false
|
|
},
|
|
"logo_url": "/logos/gemma.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/gemma"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "qwen",
|
|
"name": "Qwen 2.5",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "QwenLM/Qwen2.5",
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Comprehensive LLM series from Alibaba Cloud, excelling in coding, math, and multilingual support.",
|
|
"pros": [
|
|
"128K context window",
|
|
"Top-tier coding ability",
|
|
"Apache 2.0 (mostly)"
|
|
],
|
|
"cons": [
|
|
"72B requires significant VRAM"
|
|
],
|
|
"stars": 50000,
|
|
"language": "Python",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Coding"
|
|
],
|
|
"hardware_req": "8GB VRAM (7B), 40GB+ VRAM (32B), 140GB+ VRAM (72B)",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 40,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 72,
|
|
"is_multimodal": false
|
|
},
|
|
"logo_url": "/logos/qwen.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/qwen"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "midjourney",
|
|
"name": "Midjourney",
|
|
"category": "AI Image Generation",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Subscription)",
|
|
"website": "https://midjourney.com",
|
|
"description": "Leading AI image generation tool, known for artistic and photorealistic outputs.",
|
|
"alternatives": [
|
|
"stable-diffusion",
|
|
"flux"
|
|
],
|
|
"tags": [
|
|
"AI",
|
|
"Image",
|
|
"Art"
|
|
],
|
|
"hosting_type": "cloud",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=midjourney.com",
|
|
"avg_monthly_cost": 10,
|
|
"pros": [
|
|
"Best-in-class AI image generation quality",
|
|
"Stunning artistic and photorealistic outputs",
|
|
"Active community for inspiration",
|
|
"V6 handles text in images well"
|
|
],
|
|
"cons": [
|
|
"Discord-only interface (no standalone app)",
|
|
"No free tier ($10/mo minimum)",
|
|
"Limited control over exact outputs",
|
|
"No API for automation"
|
|
]
|
|
},
|
|
{
|
|
"slug": "stable-diffusion",
|
|
"name": "Stable Diffusion 3.5",
|
|
"category": "AI Image Generation",
|
|
"is_open_source": true,
|
|
"github_repo": "Stability-AI/sd3.5",
|
|
"website": "https://stability.ai",
|
|
"description": "The latest open-weights image generation model from Stability AI, offering superior prompt adherence.",
|
|
"pros": [
|
|
"Run image generation entirely on your own GPU",
|
|
"Extensive community with thousands of fine-tuned models",
|
|
"ControlNet, inpainting, and img2img for precise creative control"
|
|
],
|
|
"cons": [
|
|
"Stability Community License",
|
|
"Requires 8GB+ VRAM"
|
|
],
|
|
"stars": 10000,
|
|
"language": "Python",
|
|
"license": "Stability Community License",
|
|
"tags": [
|
|
"AI",
|
|
"Image",
|
|
"Prompt Adherence"
|
|
],
|
|
"hardware_req": "8GB VRAM (Medium), 16GB+ VRAM (Large)",
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "/logos/stability.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/stable-diffusion"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "mochi-1",
|
|
"name": "Mochi-1",
|
|
"category": "AI Video Generation",
|
|
"is_open_source": true,
|
|
"github_repo": "genmoai/mochi1",
|
|
"website": "https://www.genmo.ai",
|
|
"description": "High-fidelity open-weights video generation model from Genmo, rivaling closed-source alternatives.",
|
|
"pros": [
|
|
"Realistic motion",
|
|
"Adobe-like quality",
|
|
"Apache 2.0 license"
|
|
],
|
|
"cons": [
|
|
"Extreme hardware requirements",
|
|
"Memory intensive"
|
|
],
|
|
"stars": 5000,
|
|
"language": "Python",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"Video",
|
|
"Motion"
|
|
],
|
|
"hardware_req": "24GB VRAM (Minimal), 80GB VRAM (Recommended)",
|
|
"hosting_type": "both",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=genmo.ai",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "hunyuan-video",
|
|
"name": "HunyuanVideo 1.5",
|
|
"category": "AI Video Generation",
|
|
"is_open_source": true,
|
|
"github_repo": "Tencent/HunyuanVideo",
|
|
"website": "https://github.com/Tencent/HunyuanVideo",
|
|
"description": "Tencent's state-of-the-art open-source video generation model with 13B parameters.",
|
|
"pros": [
|
|
"Native 720p output",
|
|
"Long sequences support",
|
|
"Stable and clean motion"
|
|
],
|
|
"cons": [
|
|
"High compute cost"
|
|
],
|
|
"stars": 8000,
|
|
"language": "Python",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"Video",
|
|
"HD"
|
|
],
|
|
"hardware_req": "14GB VRAM (v1.5/distilled), 45GB+ VRAM (Base)",
|
|
"hosting_type": "both",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tencent.com",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "flux",
|
|
"name": "FLUX",
|
|
"category": "AI Image Generation",
|
|
"is_open_source": true,
|
|
"github_repo": "black-forest-labs/flux",
|
|
"website": "https://blackforestlabs.ai",
|
|
"description": "Next-gen open image generation model from Black Forest Labs. State-of-the-art quality rivaling Midjourney.",
|
|
"pros": [
|
|
"Outstanding image quality",
|
|
"Open weights available",
|
|
"Rapid community adoption"
|
|
],
|
|
"cons": [
|
|
"High VRAM requirement",
|
|
"Newer (less tooling)"
|
|
],
|
|
"stars": 20000,
|
|
"language": "Python",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"Image",
|
|
"New"
|
|
],
|
|
"hardware_req": "12GB+ VRAM (Schnell), 24GB+ (Dev)",
|
|
"hosting_type": "both",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=blackforestlabs.ai",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "github-copilot",
|
|
"name": "GitHub Copilot",
|
|
"category": "AI Coding",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid (Subscription)",
|
|
"website": "https://github.com/features/copilot",
|
|
"description": "AI pair programmer by GitHub/OpenAI. Integrates into VS Code and JetBrains.",
|
|
"alternatives": [
|
|
"continue-dev",
|
|
"tabby"
|
|
],
|
|
"tags": [
|
|
"AI",
|
|
"Coding",
|
|
"IDE"
|
|
],
|
|
"hosting_type": "cloud",
|
|
"logo_url": "/logos/github-copilot.svg",
|
|
"avg_monthly_cost": 10,
|
|
"pros": [
|
|
"Best AI code completion in the market",
|
|
"Deep IDE integration (VS Code, JetBrains)",
|
|
"Understands project context",
|
|
"Copilot Chat for code explanations"
|
|
],
|
|
"cons": [
|
|
"$10/mo per user",
|
|
"Can suggest insecure or outdated patterns",
|
|
"Privacy concerns with code telemetry",
|
|
"Dependent on GitHub/Microsoft"
|
|
]
|
|
},
|
|
{
|
|
"slug": "continue-dev",
|
|
"name": "Continue",
|
|
"category": "AI Coding",
|
|
"is_open_source": true,
|
|
"github_repo": "continuedev/continue",
|
|
"website": "https://continue.dev",
|
|
"description": "Open-source AI code assistant for VS Code and JetBrains. Use any model (local or API).",
|
|
"pros": [
|
|
"Highly customizable AI coding assistant \u2014 bring your own model",
|
|
"Works with VS Code and JetBrains natively",
|
|
"Context-aware with codebase indexing and retrieval"
|
|
],
|
|
"cons": [
|
|
"Requires model setup"
|
|
],
|
|
"stars": 25000,
|
|
"language": "TypeScript",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"Coding",
|
|
"IDE",
|
|
"Self-Hosted"
|
|
],
|
|
"hardware_req": "Depends on chosen model",
|
|
"hosting_type": "both",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=continue.dev",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "tabby",
|
|
"name": "TabbyML",
|
|
"category": "AI Coding",
|
|
"is_open_source": true,
|
|
"github_repo": "TabbyML/tabby",
|
|
"website": "https://tabby.tabbyml.com",
|
|
"description": "Self-hosted AI coding assistant. An open-source, self-hosted alternative to GitHub Copilot.",
|
|
"pros": [
|
|
"Enterprise-ready self-hosted code completion",
|
|
"Supports multiple model backends including local GGUF",
|
|
"IDE extensions for VS Code, Vim, and IntelliJ"
|
|
],
|
|
"cons": [
|
|
"Needs GPU for best results"
|
|
],
|
|
"stars": 25000,
|
|
"language": "Rust",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"Coding",
|
|
"Self-Hosted"
|
|
],
|
|
"hardware_req": "8GB+ VRAM recommended",
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=tabby.tabbyml.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/tabby"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "ollama",
|
|
"name": "Ollama",
|
|
"category": "AI Runners",
|
|
"is_open_source": true,
|
|
"github_repo": "ollama/ollama",
|
|
"website": "https://ollama.com",
|
|
"description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models locally.",
|
|
"pros": [
|
|
"Run any open model locally with a single command",
|
|
"OpenAI-compatible API for drop-in integration",
|
|
"Automatic model management with quantization support"
|
|
],
|
|
"cons": [
|
|
"Command line focused (needs UI)"
|
|
],
|
|
"stars": 60000,
|
|
"language": "Go",
|
|
"license": "MIT License",
|
|
"tags": [
|
|
"AI",
|
|
"Local",
|
|
"Runner"
|
|
],
|
|
"hardware_req": "8GB+ RAM",
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "/logos/ollama.svg",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/ollama"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "open-webui",
|
|
"name": "Open WebUI",
|
|
"category": "AI Interfaces",
|
|
"is_open_source": true,
|
|
"github_repo": "open-webui/open-webui",
|
|
"website": "https://openwebui.com",
|
|
"description": "User-friendly WebUI for LLMs (Formerly Ollama WebUI). Supports Ollama and OpenAI-compatible APIs.",
|
|
"pros": [
|
|
"ChatGPT-like UI",
|
|
"Multi-model chat",
|
|
"RAG support"
|
|
],
|
|
"cons": [
|
|
"Requires backend (like Ollama)"
|
|
],
|
|
"stars": 15000,
|
|
"language": "Svelte",
|
|
"license": "MIT License",
|
|
"tags": [
|
|
"AI",
|
|
"UI",
|
|
"Chat"
|
|
],
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=openwebui.com",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "jan",
|
|
"name": "Jan",
|
|
"category": "AI Interfaces",
|
|
"is_open_source": true,
|
|
"github_repo": "janhq/jan",
|
|
"website": "https://jan.ai",
|
|
"description": "Jan is an open source alternative to ChatGPT that runs 100% offline on your computer.",
|
|
"pros": [
|
|
"Runs offline",
|
|
"Native app (no Docker)",
|
|
"Local model manager"
|
|
],
|
|
"cons": [
|
|
"Heavy resource usage"
|
|
],
|
|
"stars": 18000,
|
|
"language": "TypeScript",
|
|
"license": "AGPL-3.0",
|
|
"tags": [
|
|
"AI",
|
|
"Desktop",
|
|
"Offline"
|
|
],
|
|
"hardware_req": "Apple Silicon or NVIDIA GPU",
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=jan.ai",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "lm-studio",
|
|
"name": "LM Studio",
|
|
"category": "AI Runners",
|
|
"is_open_source": false,
|
|
"pricing_model": "Free (Proprietary)",
|
|
"website": "https://lmstudio.ai",
|
|
"description": "Discover, download, and run local LLMs. Easy GUI for GGUF models.",
|
|
"alternatives": [
|
|
"ollama",
|
|
"gpt4all"
|
|
],
|
|
"tags": [
|
|
"AI",
|
|
"Desktop",
|
|
"GUI"
|
|
],
|
|
"hardware_req": "Apple Silicon or NVIDIA/AMD GPU",
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=lmstudio.ai",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"pros": [
|
|
"Run LLMs locally with a clean GUI",
|
|
"No cloud dependency \u2014 fully offline",
|
|
"Supports GGUF and other quantized formats",
|
|
"Built-in model discovery and download"
|
|
],
|
|
"cons": [
|
|
"Requires decent hardware (8GB+ RAM)",
|
|
"Closed source despite local-first approach",
|
|
"Limited compared to CLI tools like Ollama"
|
|
]
|
|
},
|
|
{
|
|
"slug": "gpt4all",
|
|
"name": "GPT4All",
|
|
"category": "AI Runners",
|
|
"is_open_source": true,
|
|
"github_repo": "nomic-ai/gpt4all",
|
|
"website": "https://gpt4all.io",
|
|
"description": "Run open-source LLMs locally on your CPU and GPU. No internet required.",
|
|
"pros": [
|
|
"One-click desktop installer \u2014 no terminal needed",
|
|
"Built-in RAG for chatting with your local documents",
|
|
"Runs on CPU \u2014 no GPU required for basic models"
|
|
],
|
|
"cons": [
|
|
"Slower on CPU"
|
|
],
|
|
"stars": 65000,
|
|
"language": "C++",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"Desktop",
|
|
"CPU"
|
|
],
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=gpt4all.io",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/gpt4all"
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "localai",
|
|
"name": "LocalAI",
|
|
"category": "AI Runners",
|
|
"is_open_source": true,
|
|
"github_repo": "mudler/LocalAI",
|
|
"website": "https://localai.io",
|
|
"description": "The specific build of LocalAI, the free, Open Source OpenAI alternative. Drop-in replacement for OpenAI API.",
|
|
"pros": [
|
|
"OpenAI API compatible",
|
|
"Runs on consumer hardware",
|
|
"No GPU required"
|
|
],
|
|
"cons": [
|
|
"Configuration heavy"
|
|
],
|
|
"stars": 20000,
|
|
"language": "Go",
|
|
"license": "MIT License",
|
|
"tags": [
|
|
"AI",
|
|
"API",
|
|
"Backend"
|
|
],
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=localai.io",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "flowise",
|
|
"name": "Flowise",
|
|
"category": "AI Tools",
|
|
"is_open_source": true,
|
|
"github_repo": "FlowiseAI/Flowise",
|
|
"website": "https://flowiseai.com",
|
|
"description": "Drag & drop UI to build your customized LLM flow using LangChainJS.",
|
|
"pros": [
|
|
"Low-code",
|
|
"Visual builder",
|
|
"Rich integrations"
|
|
],
|
|
"cons": [
|
|
"Node.js dependency"
|
|
],
|
|
"stars": 28000,
|
|
"language": "TypeScript",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"Low-Code",
|
|
"LangChain"
|
|
],
|
|
"hosting_type": "self-hosted",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=flowiseai.com",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "llama-4",
|
|
"name": "Meta Llama 4",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "meta-llama/llama4",
|
|
"website": "https://llama.meta.com",
|
|
"description": "The latest generation of Llama. 'Maverick' architecture with 256K context. The new standard for open weights.",
|
|
"pros": [
|
|
"Next-gen Maverick architecture \u2014 faster and smarter than Llama 3",
|
|
"256K context window \u2014 double that of most competitors",
|
|
"Native multimodal support for images, video, and text"
|
|
],
|
|
"cons": [
|
|
"High VRAM for top tiers"
|
|
],
|
|
"stars": 45000,
|
|
"language": "Python",
|
|
"license": "Llama Community License",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"2026",
|
|
"SOTA"
|
|
],
|
|
"hardware_req": "12GB VRAM (Medium), 48GB+ VRAM (Large)",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 12,
|
|
"context_window_tokens": 256000,
|
|
"parameters_total_b": 65,
|
|
"is_multimodal": true
|
|
},
|
|
"logo_url": "/logos/meta.svg",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "gemma-3",
|
|
"name": "Google Gemma 3",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "google/gemma-3",
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Gemma 3 (27B) delivers GPT-5 class performance on a single GPU. Optimized for reasoning and agents.",
|
|
"pros": [
|
|
"Incredible 27B performance",
|
|
"Agent-centric design",
|
|
"JAX/PyTorch native"
|
|
],
|
|
"cons": [
|
|
"limited to 27B size currently"
|
|
],
|
|
"stars": 15000,
|
|
"language": "Python",
|
|
"license": "Gemma License",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Google",
|
|
"2026"
|
|
],
|
|
"hardware_req": "24GB VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 24,
|
|
"context_window_tokens": 1000000,
|
|
"parameters_total_b": 27,
|
|
"is_multimodal": true
|
|
},
|
|
"logo_url": "/logos/gemma.svg",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "qwen-3",
|
|
"name": "Qwen 3 (235B)",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "QwenLM/Qwen3",
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Massive 235B param model. The absolute king of coding and mathematics benchmarks in 2026.",
|
|
"pros": [
|
|
"Unmatched coding performance",
|
|
"Excellent math/reasoning",
|
|
"MoE efficiency"
|
|
],
|
|
"cons": [
|
|
"Requires multi-GPU setup"
|
|
],
|
|
"stars": 35000,
|
|
"language": "Python",
|
|
"license": "Apache License 2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Coding",
|
|
"MoE"
|
|
],
|
|
"hardware_req": "140GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 140,
|
|
"context_window_tokens": 1000000,
|
|
"parameters_total_b": 235,
|
|
"is_multimodal": false
|
|
},
|
|
"logo_url": "/logos/qwen.svg",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "deepseek-v3-1",
|
|
"name": "DeepSeek V3.1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"github_repo": "deepseek-ai/DeepSeek-V3.1",
|
|
"website": "https://deepseek.com",
|
|
"description": "Refined V3 architecture with improved instruction following and reduced hallucination rates.",
|
|
"pros": [
|
|
"API pricing 10-50x cheaper than GPT-4 equivalents",
|
|
"Open weights with full model access \u2014 no API lock-in",
|
|
"Top-tier reasoning that rivals closed-source frontier models"
|
|
],
|
|
"cons": [
|
|
"Complex serving stack"
|
|
],
|
|
"stars": 120000,
|
|
"language": "Python",
|
|
"license": "MIT License",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Reasoning"
|
|
],
|
|
"alternatives": [
|
|
"deepseek",
|
|
"llama",
|
|
"mistral",
|
|
"qwen"
|
|
],
|
|
"hardware_req": "80GB VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 80,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 685,
|
|
"is_multimodal": false
|
|
},
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=deepseek.com",
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "llama-3-1-8b",
|
|
"name": "Llama 3.1 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "The latest 8B parameter model from Meta, optimized for efficiency and edge devices.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3-1-70b",
|
|
"name": "Llama 3.1 70B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "A powerful 70B model by Meta, rivaling closed-source top-tier models.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "49GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3-1-405b",
|
|
"name": "Llama 3.1 405B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "Meta's massive 405B frontier-class open weights model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "284GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 284,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 405,
|
|
"parameters_active_b": 405,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3-8b",
|
|
"name": "Llama 3 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "Meta's highly capable 8B model, a standard for local LLM inference.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3-70b",
|
|
"name": "Llama 3 70B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "Meta's previous generation 70B heavy-hitter.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "49GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-2-7b",
|
|
"name": "Llama 2 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "The classic 7B model that started the open-weight revolution.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-2-13b",
|
|
"name": "Llama 2 13B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "A balanced 13B model from the Llama 2 series.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-2-70b",
|
|
"name": "Llama 2 70B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "The largest Llama 2 model, widely used for fine-tuning.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "49GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "code-llama-7b",
|
|
"name": "Code Llama 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "Specialized coding model based on Llama 2.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 100000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "code-llama-13b",
|
|
"name": "Code Llama 13B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "Mid-sized specialized coding model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 100000,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "code-llama-34b",
|
|
"name": "Code Llama 34B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "Large coding model with excellent performance.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 24,
|
|
"context_window_tokens": 100000,
|
|
"parameters_total_b": 34,
|
|
"parameters_active_b": 34,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "code-llama-70b",
|
|
"name": "Code Llama 70B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llama.meta.com",
|
|
"description": "The most powerful Code Llama variant.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Open Weights",
|
|
"AI",
|
|
"LLM",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "49GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 100000,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "mistral-7b-v0-3",
|
|
"name": "Mistral 7B v0.3",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://mistral.ai",
|
|
"description": "Updated 7B model from Mistral AI with extended vocabulary and function calling.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Europe",
|
|
"Mistral AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "mistral-nemo-12b",
|
|
"name": "Mistral Nemo 12B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://mistral.ai",
|
|
"description": "A native 12B model built in collaboration with NVIDIA, fitting in 24GB VRAM.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Europe",
|
|
"Mistral AI"
|
|
],
|
|
"hardware_req": "8GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 8,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 12,
|
|
"parameters_active_b": 12,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "mixtral-8x7b",
|
|
"name": "Mixtral 8x7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://mistral.ai",
|
|
"description": "The first high-performance open sparse Mixture-of-Experts (MoE) model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Europe",
|
|
"Mistral AI"
|
|
],
|
|
"hardware_req": "33GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 33,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 47,
|
|
"parameters_active_b": 47,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "mixtral-8x22b",
|
|
"name": "Mixtral 8x22B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://mistral.ai",
|
|
"description": "A massive MoE model setting new standards for open weights.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Europe",
|
|
"Mistral AI"
|
|
],
|
|
"hardware_req": "99GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 99,
|
|
"context_window_tokens": 65000,
|
|
"parameters_total_b": 141,
|
|
"parameters_active_b": 141,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "codestral-22b",
|
|
"name": "Codestral 22B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://mistral.ai",
|
|
"description": "Mistral's first dedicated code model, proficient in 80+ languages.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Europe",
|
|
"Mistral AI"
|
|
],
|
|
"hardware_req": "15GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 15,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 22,
|
|
"parameters_active_b": 22,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "mathstral-7b",
|
|
"name": "Mathstral 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://mistral.ai",
|
|
"description": "Specialized model for math and reasoning tasks.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Europe",
|
|
"Mistral AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "ministral-3b",
|
|
"name": "Ministral 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://mistral.ai",
|
|
"description": "Mistral's efficient edge model for mobile and low-latency use cases.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Europe",
|
|
"Mistral AI"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "ministral-8b",
|
|
"name": "Ministral 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://mistral.ai",
|
|
"description": "A powerful edge model bridging the gap between small and medium LLMs.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Europe",
|
|
"Mistral AI"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-0-5b",
|
|
"name": "Qwen 2.5 0.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Tiny but capable model for extreme edge analytics.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 0.5,
|
|
"parameters_active_b": 0.5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-1-5b",
|
|
"name": "Qwen 2.5 1.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Small footprint model punching above its weight.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 1.5,
|
|
"parameters_active_b": 1.5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-3b",
|
|
"name": "Qwen 2.5 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Balanced 3B model, great for mobile inference.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-7b",
|
|
"name": "Qwen 2.5 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "The 7B workhorse of the Qwen 2.5 family, beating Llama 3.1 8B in many benchmarks.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-14b",
|
|
"name": "Qwen 2.5 14B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "A sweet-spot size for dual-GPU or high VRAM consumer cards.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "10GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-32b",
|
|
"name": "Qwen 2.5 32B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Ideally sized for 24GB VRAM cards like the RTX 3090/4090.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "22GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-72b",
|
|
"name": "Qwen 2.5 72B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Top-tier open weights model, consistently ranking high on leaderboards.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "50GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 50,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 72,
|
|
"parameters_active_b": 72,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-coder-1-5b",
|
|
"name": "Qwen 2.5 Coder 1.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Tiny coding assistant.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 1.5,
|
|
"parameters_active_b": 1.5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-coder-7b",
|
|
"name": "Qwen 2.5 Coder 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "State-of-the-art 7B coding model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-5-coder-32b",
|
|
"name": "Qwen 2.5 Coder 32B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Powerful coding model fitting in consumer hardware.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "22GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-vl-7b",
|
|
"name": "Qwen 2 VL 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Vision-Language model capable of understanding images and video.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": true
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-2-vl-72b",
|
|
"name": "Qwen 2 VL 72B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://qwenlm.github.io",
|
|
"description": "Massive Vision-Language model for complex visual reasoning.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Alibaba Cloud",
|
|
"Qwen",
|
|
"LLM",
|
|
"AI",
|
|
"Alibaba"
|
|
],
|
|
"hardware_req": "50GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 50,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 72,
|
|
"parameters_active_b": 72,
|
|
"is_multimodal": true
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "gemma-2-2b",
|
|
"name": "Gemma 2 2B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Efficient 2B model by Google, distilled for high performance.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Gemma",
|
|
"Google",
|
|
"LLM",
|
|
"Google DeepMind",
|
|
"AI"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "gemma-2-9b",
|
|
"name": "Gemma 2 9B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Google's powerful 9B open model, outperforming larger predecessors.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Gemma",
|
|
"Google",
|
|
"LLM",
|
|
"Google DeepMind",
|
|
"AI"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 9,
|
|
"parameters_active_b": 9,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "gemma-2-27b",
|
|
"name": "Gemma 2 27B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Large-scale open model from Google designed for complex reasoning.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Gemma",
|
|
"Google",
|
|
"LLM",
|
|
"Google DeepMind",
|
|
"AI"
|
|
],
|
|
"hardware_req": "19GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 19,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 27,
|
|
"parameters_active_b": 27,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "codegemma-2b",
|
|
"name": "CodeGemma 2B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Fast, lightweight code completion model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Gemma",
|
|
"Google",
|
|
"LLM",
|
|
"Google DeepMind",
|
|
"AI"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "codegemma-7b",
|
|
"name": "CodeGemma 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Instruction-tuned coding model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Gemma",
|
|
"Google",
|
|
"LLM",
|
|
"Google DeepMind",
|
|
"AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "recurrentgemma-2b",
|
|
"name": "RecurrentGemma 2B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Gemma architecture with recurrent neural network efficiency.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Gemma",
|
|
"Google",
|
|
"LLM",
|
|
"Google DeepMind",
|
|
"AI"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "palette-2b",
|
|
"name": "Palette 2B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://ai.google.dev/gemma",
|
|
"description": "Specialized vision-language model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Gemma",
|
|
"Google",
|
|
"LLM",
|
|
"Google DeepMind",
|
|
"AI"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "phi-3-5-mini",
|
|
"name": "Phi 3.5 Mini",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://azure.microsoft.com/en-us/products/phi",
|
|
"description": "Latest lightweight powerhouse from Microsoft, beating many larger models.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Phi",
|
|
"AI",
|
|
"LLM",
|
|
"Microsoft"
|
|
],
|
|
"hardware_req": "3GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 3.8,
|
|
"parameters_active_b": 3.8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "phi-3-5-moe",
|
|
"name": "Phi 3.5 MoE",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://azure.microsoft.com/en-us/products/phi",
|
|
"description": "Mixture-of-Experts model combining 16x3.8B experts.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Phi",
|
|
"AI",
|
|
"LLM",
|
|
"Microsoft"
|
|
],
|
|
"hardware_req": "29GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 29,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 42,
|
|
"parameters_active_b": 42,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "phi-3-5-vision",
|
|
"name": "Phi 3.5 Vision",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://azure.microsoft.com/en-us/products/phi",
|
|
"description": "Multimodal version of Phi 3.5 capable of image analysis.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Phi",
|
|
"AI",
|
|
"LLM",
|
|
"Microsoft"
|
|
],
|
|
"hardware_req": "3GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 4.2,
|
|
"parameters_active_b": 4.2,
|
|
"is_multimodal": true
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "phi-3-mini",
|
|
"name": "Phi 3 Mini",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://azure.microsoft.com/en-us/products/phi",
|
|
"description": "Highly capable 3.8B model trained on textbook data.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Phi",
|
|
"AI",
|
|
"LLM",
|
|
"Microsoft"
|
|
],
|
|
"hardware_req": "3GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 3.8,
|
|
"parameters_active_b": 3.8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "phi-3-medium",
|
|
"name": "Phi 3 Medium",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://azure.microsoft.com/en-us/products/phi",
|
|
"description": "14B parameter version of the Phi-3 family.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Phi",
|
|
"AI",
|
|
"LLM",
|
|
"Microsoft"
|
|
],
|
|
"hardware_req": "10GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "orca-2-13b",
|
|
"name": "Orca 2 13B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://azure.microsoft.com/en-us/products/phi",
|
|
"description": "Microsoft's research model exploring reasoning capabilities.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Phi",
|
|
"AI",
|
|
"LLM",
|
|
"Microsoft"
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "yi-1-5-6b",
|
|
"name": "Yi 1.5 6B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://01.ai",
|
|
"description": "Strong 6B model from 01.AI.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"01.AI",
|
|
"Yi"
|
|
],
|
|
"hardware_req": "4GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 6,
|
|
"parameters_active_b": 6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "yi-1-5-9b",
|
|
"name": "Yi 1.5 9B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://01.ai",
|
|
"description": "9B parameter model optimized for coding and math.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"01.AI",
|
|
"Yi"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 9,
|
|
"parameters_active_b": 9,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "yi-1-5-34b",
|
|
"name": "Yi 1.5 34B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://01.ai",
|
|
"description": "Highly rated 34B model, popular in the community.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"01.AI",
|
|
"Yi"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 24,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 34,
|
|
"parameters_active_b": 34,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "yi-large",
|
|
"name": "Yi Large",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://01.ai",
|
|
"description": "Proprietary-class open weights model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"01.AI",
|
|
"Yi"
|
|
],
|
|
"hardware_req": "70GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 70,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 100,
|
|
"parameters_active_b": 100,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "command-r",
|
|
"name": "Command R",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://cohere.com",
|
|
"description": "Optimized for RAG (Retrieval Augmented Generation) and tool use.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Cohere For AI",
|
|
"Cohere",
|
|
"LLM",
|
|
"RAG",
|
|
"AI"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 24,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 35,
|
|
"parameters_active_b": 35,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "command-r-plus",
|
|
"name": "Command R+",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://cohere.com",
|
|
"description": "Massive RAG-optimized model with advanced reasoning.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Cohere For AI",
|
|
"Cohere",
|
|
"LLM",
|
|
"RAG",
|
|
"AI"
|
|
],
|
|
"hardware_req": "73GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 73,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 104,
|
|
"parameters_active_b": 104,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "dolphin-2-9-llama-3-8b",
|
|
"name": "Dolphin 2.9 Llama 3 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://erichartford.com",
|
|
"description": "Uncensored fine-tune of Llama 3 8B.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Roleplay",
|
|
"Uncensored",
|
|
"LLM",
|
|
"Cognitive Computations",
|
|
"AI"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "dolphin-2-9-2-qwen-2-72b",
|
|
"name": "Dolphin 2.9.2 Qwen 2 72B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://erichartford.com",
|
|
"description": "Powerful uncensored chat model based on Qwen 2.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Roleplay",
|
|
"Uncensored",
|
|
"LLM",
|
|
"Cognitive Computations",
|
|
"AI"
|
|
],
|
|
"hardware_req": "50GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 50,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 72,
|
|
"parameters_active_b": 72,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "dolphin-mixtral-8x7b",
|
|
"name": "Dolphin Mixtral 8x7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://erichartford.com",
|
|
"description": "One of the most popular uncensored MoE models.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Roleplay",
|
|
"Uncensored",
|
|
"LLM",
|
|
"Cognitive Computations",
|
|
"AI"
|
|
],
|
|
"hardware_req": "33GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 33,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 47,
|
|
"parameters_active_b": 47,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "hermes-3-llama-3-1-8b",
|
|
"name": "Hermes 3 Llama 3.1 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://nousresearch.com",
|
|
"description": "Unlock the full potential of Llama 3.1 with advanced agentic capabilities.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Fine-tune",
|
|
"AI",
|
|
"LLM",
|
|
"Nous Research"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "hermes-3-llama-3-1-70b",
|
|
"name": "Hermes 3 Llama 3.1 70B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://nousresearch.com",
|
|
"description": "70B version of the Hermes 3 agentic fine-tune.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Fine-tune",
|
|
"AI",
|
|
"LLM",
|
|
"Nous Research"
|
|
],
|
|
"hardware_req": "49GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "nous-hermes-2-mixtral-8x7b",
|
|
"name": "Nous Hermes 2 Mixtral 8x7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://nousresearch.com",
|
|
"description": "High-quality instruction tuned Mixtral.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Fine-tune",
|
|
"AI",
|
|
"LLM",
|
|
"Nous Research"
|
|
],
|
|
"hardware_req": "33GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 33,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 47,
|
|
"parameters_active_b": 47,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "solar-10-7b",
|
|
"name": "Solar 10.7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://upstage.ai",
|
|
"description": "Innovative 10.7B model created using depth up-scaling of Llama 2.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Upstage",
|
|
"Solar",
|
|
"LLM",
|
|
"Depth Upscaling",
|
|
"AI"
|
|
],
|
|
"hardware_req": "7GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 7,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 10.7,
|
|
"parameters_active_b": 10.7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "solar-pro",
|
|
"name": "Solar Pro",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://upstage.ai",
|
|
"description": "Advanced scale-up of the Solar architecture.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Upstage",
|
|
"Solar",
|
|
"LLM",
|
|
"Depth Upscaling",
|
|
"AI"
|
|
],
|
|
"hardware_req": "15GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 15,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 22,
|
|
"parameters_active_b": 22,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "deepseek-coder-v2-16b",
|
|
"name": "DeepSeek Coder V2 16B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://deepseek.com",
|
|
"description": "Powerful coding-specific MoE model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Coding",
|
|
"AI",
|
|
"LLM",
|
|
"DeepSeek"
|
|
],
|
|
"hardware_req": "11GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 11,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 16,
|
|
"parameters_active_b": 16,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "deepseek-coder-v2-236b",
|
|
"name": "DeepSeek Coder V2 236B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://deepseek.com",
|
|
"description": "Massive coding model rivaling GPT-4 across benchmarks.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Coding",
|
|
"AI",
|
|
"LLM",
|
|
"DeepSeek"
|
|
],
|
|
"hardware_req": "165GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 165,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 236,
|
|
"parameters_active_b": 236,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "deepseek-llm-7b",
|
|
"name": "DeepSeek LLM 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://deepseek.com",
|
|
"description": "General purpose 7B chat model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Coding",
|
|
"AI",
|
|
"LLM",
|
|
"DeepSeek"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "deepseek-llm-67b",
|
|
"name": "DeepSeek LLM 67B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://deepseek.com",
|
|
"description": "Large general purpose model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Coding",
|
|
"AI",
|
|
"LLM",
|
|
"DeepSeek"
|
|
],
|
|
"hardware_req": "47GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 47,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 67,
|
|
"parameters_active_b": 67,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "stable-lm-2-1-6b",
|
|
"name": "Stable LM 2 1.6B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://stability.ai",
|
|
"description": "Very small, efficient model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Stability AI",
|
|
"AI",
|
|
"LLM"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1.6,
|
|
"parameters_active_b": 1.6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "stable-lm-2-12b",
|
|
"name": "Stable LM 2 12B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://stability.ai",
|
|
"description": "Balanced 12B model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Stability AI",
|
|
"AI",
|
|
"LLM"
|
|
],
|
|
"hardware_req": "8GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 8,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 12,
|
|
"parameters_active_b": 12,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "stable-code-3b",
|
|
"name": "Stable Code 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://stability.ai",
|
|
"description": "Specialized 3B coding model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Stability AI",
|
|
"AI",
|
|
"LLM"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 16384,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "starling-lm-7b-alpha",
|
|
"name": "Starling LM 7B Alpha",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://nexusflow.ai",
|
|
"description": "RLHF fine-tune known for high quality responses.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Nexusflow",
|
|
"RLHF"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "starling-lm-7b-beta",
|
|
"name": "Starling LM 7B Beta",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://nexusflow.ai",
|
|
"description": "Improved beta version of the Starling RLHF model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Nexusflow",
|
|
"RLHF"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "openchat-3-5",
|
|
"name": "OpenChat 3.5",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://openchat.team",
|
|
"description": "Fine-tuned Mistral 7B using C-RLFT strategy.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"C-RLFT",
|
|
"AI",
|
|
"LLM",
|
|
"OpenChat"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "openchat-3-6",
|
|
"name": "OpenChat 3.6",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://openchat.team",
|
|
"description": "Updated version based on Llama 3 8B.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"C-RLFT",
|
|
"AI",
|
|
"LLM",
|
|
"OpenChat"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "tinyllama-1-1b",
|
|
"name": "TinyLlama 1.1B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/jzhang38/TinyLlama",
|
|
"description": "The most popular ~1B model, trained on 3T tokens.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Small",
|
|
"AI",
|
|
"LLM",
|
|
"TinyLlama"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 1.1,
|
|
"parameters_active_b": 1.1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "falcon-2-11b",
|
|
"name": "Falcon 2 11B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://falconllm.tii.ae",
|
|
"description": "TII's efficient 11B model with strong reasoning capabilities.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Falcon",
|
|
"LLM",
|
|
"AI",
|
|
"TII"
|
|
],
|
|
"hardware_req": "8GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 8,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 11,
|
|
"parameters_active_b": 11,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/falcon.svg"
|
|
},
|
|
{
|
|
"slug": "falcon-180b",
|
|
"name": "Falcon 180B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://falconllm.tii.ae",
|
|
"description": "Massive open model, one of the largest available.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Falcon",
|
|
"LLM",
|
|
"AI",
|
|
"TII"
|
|
],
|
|
"hardware_req": "126GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 126,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 180,
|
|
"parameters_active_b": 180,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/falcon.svg"
|
|
},
|
|
{
|
|
"slug": "falcon-40b",
|
|
"name": "Falcon 40B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://falconllm.tii.ae",
|
|
"description": "The original high-performance open model form TII.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Falcon",
|
|
"LLM",
|
|
"AI",
|
|
"TII"
|
|
],
|
|
"hardware_req": "28GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 28,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 40,
|
|
"parameters_active_b": 40,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/falcon.svg"
|
|
},
|
|
{
|
|
"slug": "falcon-7b",
|
|
"name": "Falcon 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://falconllm.tii.ae",
|
|
"description": "Smaller variant of the Falcon family.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Falcon",
|
|
"LLM",
|
|
"AI",
|
|
"TII"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/falcon.svg"
|
|
},
|
|
{
|
|
"slug": "glm-4-9b",
|
|
"name": "GLM 4 9B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/THUDM/GLM-4",
|
|
"description": "Powerful multilingual model from Zhipu AI.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"GLM",
|
|
"Zhipu AI",
|
|
"LLM",
|
|
"AI"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 128000,
|
|
"parameters_total_b": 9,
|
|
"parameters_active_b": 9,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "chatglm3-6b",
|
|
"name": "ChatGLM3 6B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/THUDM/GLM-4",
|
|
"description": "Optimized Chinese-English conversational model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"GLM",
|
|
"Zhipu AI",
|
|
"LLM",
|
|
"AI"
|
|
],
|
|
"hardware_req": "4GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 6,
|
|
"parameters_active_b": 6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "granite-3-0-8b-instruct",
|
|
"name": "Granite 3.0 8B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.ibm.com/granite",
|
|
"description": "IBM's enterprise-grade open model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"IBM",
|
|
"LLM",
|
|
"AI",
|
|
"Enterprise"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "granite-3-0-2b-instruct",
|
|
"name": "Granite 3.0 2B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.ibm.com/granite",
|
|
"description": "Efficient enterprise model for lower resource environments.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"IBM",
|
|
"LLM",
|
|
"AI",
|
|
"Enterprise"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "granite-code-3b",
|
|
"name": "Granite Code 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.ibm.com/granite",
|
|
"description": "IBM specialized code model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"IBM",
|
|
"LLM",
|
|
"AI",
|
|
"Enterprise"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "granite-code-8b",
|
|
"name": "Granite Code 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.ibm.com/granite",
|
|
"description": "Larger coding model from IBM.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"IBM",
|
|
"LLM",
|
|
"AI",
|
|
"Enterprise"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "flux-1-schnell",
|
|
"name": "Flux.1 Schnell",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://blackforestlabs.ai",
|
|
"description": "Fastest state-of-the-art open image generation model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Black Forest Labs",
|
|
"Image Generation",
|
|
"AI",
|
|
"Diffusion"
|
|
],
|
|
"hardware_req": "8GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 8,
|
|
"context_window_tokens": 77,
|
|
"parameters_total_b": 12,
|
|
"parameters_active_b": 12,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/flux.svg"
|
|
},
|
|
{
|
|
"slug": "flux-1-dev",
|
|
"name": "Flux.1 Dev",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://blackforestlabs.ai",
|
|
"description": "Developer version of the powerful Flux image model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Black Forest Labs",
|
|
"Image Generation",
|
|
"AI",
|
|
"Diffusion"
|
|
],
|
|
"hardware_req": "8GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 8,
|
|
"context_window_tokens": 77,
|
|
"parameters_total_b": 12,
|
|
"parameters_active_b": 12,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/flux.svg"
|
|
},
|
|
{
|
|
"slug": "sdxl-1-0",
|
|
"name": "SDXL 1.0",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://stability.ai",
|
|
"description": "The benchmark for open source image generation.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Image Generation",
|
|
"AI",
|
|
"Diffusion",
|
|
"Stability AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 77,
|
|
"parameters_total_b": 6.6,
|
|
"parameters_active_b": 6.6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "sd-3-medium",
|
|
"name": "SD 3 Medium",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://stability.ai",
|
|
"description": "Stability AI's latest medium-sized image model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Image Generation",
|
|
"AI",
|
|
"Diffusion",
|
|
"Stability AI"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 77,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "stable-cascade",
|
|
"name": "Stable Cascade",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://stability.ai",
|
|
"description": "Efficient cascade architecture for high detail images.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Image Generation",
|
|
"AI",
|
|
"Diffusion",
|
|
"Stability AI"
|
|
],
|
|
"hardware_req": "3GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 77,
|
|
"parameters_total_b": 3.6,
|
|
"parameters_active_b": 3.6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "internlm-2-5-7b",
|
|
"name": "InternLM 2.5 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://internlm.intern-ai.org.cn",
|
|
"description": "High performance 7B model with strong reasoning.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Shanghai AI Lab",
|
|
"LLM",
|
|
"AI",
|
|
"InternLM"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "internlm-2-5-20b",
|
|
"name": "InternLM 2.5 20B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://internlm.intern-ai.org.cn",
|
|
"description": "Balanced 20B model filling the gap between 7B and 70B.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Shanghai AI Lab",
|
|
"LLM",
|
|
"AI",
|
|
"InternLM"
|
|
],
|
|
"hardware_req": "14GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 14,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 20,
|
|
"parameters_active_b": 20,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "baichuan-2-7b",
|
|
"name": "Baichuan 2 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.baichuan-ai.com",
|
|
"description": "Top tier Chinese-English bilingual model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Baichuan",
|
|
"LLM",
|
|
"AI",
|
|
"Baichuan Inc."
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "baichuan-2-13b",
|
|
"name": "Baichuan 2 13B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.baichuan-ai.com",
|
|
"description": "Larger variant of the popular Baichuan series.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Baichuan",
|
|
"LLM",
|
|
"AI",
|
|
"Baichuan Inc."
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "minicpm-2-4b",
|
|
"name": "MiniCPM 2.4B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/OpenBMB/MiniCPM",
|
|
"description": "High efficiency edge model optimization.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"OpenBMB",
|
|
"Mobile",
|
|
"LLM",
|
|
"Edge",
|
|
"AI"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 2.4,
|
|
"parameters_active_b": 2.4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "minicpm-v-2-6",
|
|
"name": "MiniCPM V 2.6",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/OpenBMB/MiniCPM",
|
|
"description": "Powerful multimodal model for mobile devices.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"OpenBMB",
|
|
"Mobile",
|
|
"LLM",
|
|
"Edge",
|
|
"AI"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "exaone-3-0-7-8b",
|
|
"name": "Exaone 3.0 7.8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.lgresearch.ai",
|
|
"description": "LG's competitive open model entry.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LG",
|
|
"LG AI Research",
|
|
"LLM",
|
|
"AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7.8,
|
|
"parameters_active_b": 7.8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "jamba-v0-1",
|
|
"name": "Jamba v0.1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.ai21.com/jamba",
|
|
"description": "First production-grade Mamba-Transformer hybrid model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Hybrid",
|
|
"LLM",
|
|
"AI",
|
|
"Mamba",
|
|
"AI21 Labs"
|
|
],
|
|
"hardware_req": "36GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 36,
|
|
"context_window_tokens": 256000,
|
|
"parameters_total_b": 52,
|
|
"parameters_active_b": 52,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "grok-1",
|
|
"name": "Grok 1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://x.ai",
|
|
"description": "Massive 314B parameter open weights model from xAI.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Grok",
|
|
"LLM",
|
|
"AI",
|
|
"xAI"
|
|
],
|
|
"hardware_req": "220GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 220,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 314,
|
|
"parameters_active_b": 314,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/grok.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-vl-7b",
|
|
"name": "DeepSeek VL 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://deepseek.com",
|
|
"description": "Vision language model from DeepSeek.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"DeepSeek",
|
|
"Vision",
|
|
"AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": true
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "deepseek-vl-1-3b",
|
|
"name": "DeepSeek VL 1.3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://deepseek.com",
|
|
"description": "Small vision language model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"DeepSeek",
|
|
"Vision",
|
|
"AI"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1.3,
|
|
"parameters_active_b": 1.3,
|
|
"is_multimodal": true
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "whisper-large-v3",
|
|
"name": "Whisper Large v3",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/openai/whisper",
|
|
"description": "State-of-the-art automatic speech recognition model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"ASR",
|
|
"Audio",
|
|
"OpenAI"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 0,
|
|
"parameters_total_b": 1.5,
|
|
"parameters_active_b": 1.5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "whisper-medium",
|
|
"name": "Whisper Key",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/openai/whisper",
|
|
"description": "Balanced speech recognition model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"ASR",
|
|
"Audio",
|
|
"OpenAI"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 0,
|
|
"parameters_total_b": 0.7,
|
|
"parameters_active_b": 0.7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "seamless-m4t-large",
|
|
"name": "Seamless M4T Large",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://ai.meta.com/research/seamless-communication/",
|
|
"description": "Massive multilingual translation and transcription model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"Meta",
|
|
"Audio",
|
|
"Translation"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 0,
|
|
"parameters_total_b": 2.3,
|
|
"parameters_active_b": 2.3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "starcoder-2-15b",
|
|
"name": "StarCoder 2 15B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/bigcode",
|
|
"description": "The successor to the original StarCoder, trained on The Stack v2.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"ServiceNow",
|
|
"BigCode",
|
|
"AI",
|
|
"Coding"
|
|
],
|
|
"hardware_req": "10GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 16384,
|
|
"parameters_total_b": 15,
|
|
"parameters_active_b": 15,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "starcoder-2-7b",
|
|
"name": "StarCoder 2 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/bigcode",
|
|
"description": "Mid-sized coding model from the BigCode project.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"ServiceNow",
|
|
"BigCode",
|
|
"AI",
|
|
"Coding"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 16384,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "starcoder-2-3b",
|
|
"name": "StarCoder 2 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/bigcode",
|
|
"description": "Efficient coding assistant.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"ServiceNow",
|
|
"BigCode",
|
|
"AI",
|
|
"Coding"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 16384,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "llava-1-6-34b",
|
|
"name": "LLaVA 1.6 34B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llava-vl.github.io",
|
|
"description": "High performance large multimodal model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Multimodal",
|
|
"Vision",
|
|
"LLaVA Team",
|
|
"AI"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 24,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 34,
|
|
"parameters_active_b": 34,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "llava-1-6-13b",
|
|
"name": "LLaVA 1.6 13B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llava-vl.github.io",
|
|
"description": "Improved visual reasoning capabilities.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Multimodal",
|
|
"Vision",
|
|
"LLaVA Team",
|
|
"AI"
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "llava-1-6-7b",
|
|
"name": "LLaVA 1.6 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llava-vl.github.io",
|
|
"description": "Efficient multimodal model base on Vicuna.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Multimodal",
|
|
"Vision",
|
|
"LLaVA Team",
|
|
"AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "bakllava",
|
|
"name": "BakLLaVA",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://llava-vl.github.io",
|
|
"description": "Mistral-based LLaVA variant.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Multimodal",
|
|
"Vision",
|
|
"LLaVA Team",
|
|
"AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "bloom-176b",
|
|
"name": "BLOOM 176B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://bigscience.huggingface.co",
|
|
"description": "The world's largest open-multilingual language model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Multilingual",
|
|
"Open Science",
|
|
"BigScience",
|
|
"AI"
|
|
],
|
|
"hardware_req": "123GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 123,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 176,
|
|
"parameters_active_b": 176,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "bloomz-176b",
|
|
"name": "BLOOMZ 176B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://bigscience.huggingface.co",
|
|
"description": "Instruction tuned version of BLOOM.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Multilingual",
|
|
"Open Science",
|
|
"BigScience",
|
|
"AI"
|
|
],
|
|
"hardware_req": "123GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 123,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 176,
|
|
"parameters_active_b": 176,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "bloom-7b",
|
|
"name": "BLOOM 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://bigscience.huggingface.co",
|
|
"description": "Smaller variant of the BLOOM family.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Multilingual",
|
|
"Open Science",
|
|
"BigScience",
|
|
"AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "pythia-12b",
|
|
"name": "Pythia 12B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/EleutherAI/pythia",
|
|
"description": "Designed to interpret and analyze LLM training dynamics.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Research",
|
|
"EleutherAI",
|
|
"AI"
|
|
],
|
|
"hardware_req": "8GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 8,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 12,
|
|
"parameters_active_b": 12,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "pythia-6-9b",
|
|
"name": "Pythia 6.9B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/EleutherAI/pythia",
|
|
"description": "Standard research model size.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Research",
|
|
"EleutherAI",
|
|
"AI"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 6.9,
|
|
"parameters_active_b": 6.9,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "opt-175b",
|
|
"name": "OPT 175B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/facebookresearch/metaseq",
|
|
"description": "Meta's Open Pre-trained Transformer, matching GPT-3 performance.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"Legacy",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "122GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 122,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 175,
|
|
"parameters_active_b": 175,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "opt-66b",
|
|
"name": "OPT 66B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/facebookresearch/metaseq",
|
|
"description": "Large scale OPT model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"Legacy",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "46GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 46,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 66,
|
|
"parameters_active_b": 66,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "opt-30b",
|
|
"name": "OPT 30B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/facebookresearch/metaseq",
|
|
"description": "Mid-range OPT model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"Legacy",
|
|
"Meta"
|
|
],
|
|
"hardware_req": "21GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "h2o-danube-2-1-8b",
|
|
"name": "H2O Danube 2 1.8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://h2o.ai",
|
|
"description": "Highly efficient mobile-class model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"H2O",
|
|
"AI",
|
|
"H2O.ai"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 1.8,
|
|
"parameters_active_b": 1.8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "fuyu-8b",
|
|
"name": "Fuyu 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://adept.ai",
|
|
"description": "Simple architecture multimodal model for digital agents.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Multimodal",
|
|
"Adept",
|
|
"Agent",
|
|
"AI"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "nexusraven-v2-13b",
|
|
"name": "NexusRaven V2 13B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://nexusflow.ai",
|
|
"description": "Specialized in function calling and tool use.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"Nexusflow",
|
|
"Raven"
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "xverse-65b",
|
|
"name": "Xverse 65B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/xverse-ai",
|
|
"description": "Large multilingual model trained from scratch.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"Shenzhen Yuanxiang",
|
|
"Multilingual"
|
|
],
|
|
"hardware_req": "46GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 46,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 65,
|
|
"parameters_active_b": 65,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "xverse-13b",
|
|
"name": "Xverse 13B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/xverse-ai",
|
|
"description": "Efficient multilingual model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"Shenzhen Yuanxiang",
|
|
"Multilingual"
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "aquila2-34b",
|
|
"name": "Aquila2 34B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/FlagAI-Open/FlagAI",
|
|
"description": "Strong performance on reasoning benchmarks.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"BAAI",
|
|
"AI"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 24,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 34,
|
|
"parameters_active_b": 34,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "telechat-12b",
|
|
"name": "TeleChat 12B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/Tele-AI/Telechat",
|
|
"description": "Telecommunications oriented LLM.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Telecom",
|
|
"China Telecom",
|
|
"AI"
|
|
],
|
|
"hardware_req": "8GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 8,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 12,
|
|
"parameters_active_b": 12,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "orion-14b",
|
|
"name": "Orion 14B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/OrionStarAI/Orion",
|
|
"description": "Chat and conversational model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Orion",
|
|
"AI",
|
|
"OrionStar"
|
|
],
|
|
"hardware_req": "10GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "seallm-7b-v2-5",
|
|
"name": "SeaLLM 7B v2.5",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/SeaLLMs",
|
|
"description": "State-of-the-art multilingual LLM for Southeast Asian languages.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Southeast Asia",
|
|
"AI",
|
|
"Alibaba (sea-lion)"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "openbiollm-8b",
|
|
"name": "OpenBioLLM 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/saama",
|
|
"description": "Advanced medical LLM outperforming GPT-4 on biomedical benchmarks.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Saama AI",
|
|
"AI",
|
|
"LLM",
|
|
"Medical",
|
|
"Biology"
|
|
],
|
|
"hardware_req": "6GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "openbiollm-70b",
|
|
"name": "OpenBioLLM 70B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/saama",
|
|
"description": "Massive scale biomedical research model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Saama AI",
|
|
"AI",
|
|
"LLM",
|
|
"Medical",
|
|
"Biology"
|
|
],
|
|
"hardware_req": "49GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "meditron-70b",
|
|
"name": "Meditron 70B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/epfl-llm",
|
|
"description": "Open-access LLM adapted to the medical domain.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"EPFL",
|
|
"AI",
|
|
"Medical"
|
|
],
|
|
"hardware_req": "49GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "meditron-7b",
|
|
"name": "Meditron 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/epfl-llm",
|
|
"description": "Efficient medical assistant model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"EPFL",
|
|
"AI",
|
|
"Medical"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "gorilla-openfunctions-v2",
|
|
"name": "Gorilla OpenFunctions v2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://gorilla.cs.berkeley.edu",
|
|
"description": "The best open source model for API function calling.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"Agents",
|
|
"Berkeley",
|
|
"API"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "wizardlm-2-8x22b",
|
|
"name": "WizardLM 2 8x22B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/nlpxucan/WizardLM",
|
|
"description": "Top-tier reasoning model from Microsoft using Evol-Instruct.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Microsoft",
|
|
"AI",
|
|
"Evol-Instruct"
|
|
],
|
|
"hardware_req": "99GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 99,
|
|
"context_window_tokens": 65536,
|
|
"parameters_total_b": 141,
|
|
"parameters_active_b": 141,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "wizardlm-2-7b",
|
|
"name": "WizardLM 2 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/nlpxucan/WizardLM",
|
|
"description": "Fastest and most capable 7B model for complex instructions.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Microsoft",
|
|
"AI",
|
|
"Evol-Instruct"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 32000,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "vicuna-13b-v1-5",
|
|
"name": "Vicuna 13B v1.5",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://lmsys.org",
|
|
"description": "The classic open chat model based on Llama 2.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"LMSYS",
|
|
"Chatbot"
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 16384,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "vicuna-7b-v1-5",
|
|
"name": "Vicuna 7B v1.5",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://lmsys.org",
|
|
"description": "Highly efficient chat model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"LMSYS",
|
|
"Chatbot"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 16384,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "zephyr-7b-beta",
|
|
"name": "Zephyr 7B Beta",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/HuggingFaceH4",
|
|
"description": "Pioneered DPO (Direct Preference Optimization) for better alignment without RLHF.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"Hugging Face H4",
|
|
"DPO"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 8192,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "zephyr-141b-a39b",
|
|
"name": "Zephyr 141B A39B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/HuggingFaceH4",
|
|
"description": "Experimental DPO fine-tune of Mixtral 8x22B.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"Hugging Face H4",
|
|
"DPO"
|
|
],
|
|
"hardware_req": "99GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 99,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 141,
|
|
"parameters_active_b": 141,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "openelm-3b",
|
|
"name": "OpenELM 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/apple/corenet",
|
|
"description": "Apple's efficiently layered open model for devices.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"On-Device",
|
|
"Apple"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "openelm-1-1b",
|
|
"name": "OpenELM 1.1B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/apple/corenet",
|
|
"description": "Tiny Apple model for extreme edge cases.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"On-Device",
|
|
"Apple"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 1.1,
|
|
"parameters_active_b": 1.1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "mamba-2-8b",
|
|
"name": "Mamba 2.8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/state-spaces/mamba",
|
|
"description": "Linear-time sequence modeling with state space architecture.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Cartesia",
|
|
"AI",
|
|
"SSM",
|
|
"LLM",
|
|
"Non-Transformer"
|
|
],
|
|
"hardware_req": "2GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 0,
|
|
"parameters_total_b": 2.8,
|
|
"parameters_active_b": 2.8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "mamba-1-4b",
|
|
"name": "Mamba 1.4B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/state-spaces/mamba",
|
|
"description": "Efficient non-transformer architecture.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Cartesia",
|
|
"AI",
|
|
"SSM",
|
|
"LLM",
|
|
"Non-Transformer"
|
|
],
|
|
"hardware_req": "1GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 0,
|
|
"parameters_total_b": 1.4,
|
|
"parameters_active_b": 1.4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "rwkv-6-14b",
|
|
"name": "RWKV 6 14B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.rwkv.com",
|
|
"description": "RNN with Transformer-level performance and infinite context potential.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"RNN",
|
|
"BlinkDL"
|
|
],
|
|
"hardware_req": "10GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 0,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "rwkv-6-7b",
|
|
"name": "RWKV 6 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.rwkv.com",
|
|
"description": "Efficient RNN language model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"AI",
|
|
"RNN",
|
|
"BlinkDL"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 0,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "cerebras-gpt-13b",
|
|
"name": "Cerebras GPT 13B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://www.cerebras.net",
|
|
"description": "Trained on the massive CS-2 wafer-scale engine.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"LLM",
|
|
"Cerebras",
|
|
"AI",
|
|
"Wafer-Scale"
|
|
],
|
|
"hardware_req": "9GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 2048,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361"
|
|
},
|
|
{
|
|
"slug": "qwen-audio-chat",
|
|
"name": "Qwen-Audio-Chat",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://github.com/QwenLM/Qwen-Audio",
|
|
"description": "Universal audio understanding model.",
|
|
"pros": [
|
|
"Open Source",
|
|
"High Performance",
|
|
"Run Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU inference",
|
|
"Management complexity"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "Open Weights",
|
|
"tags": [
|
|
"Audio",
|
|
"AI",
|
|
"Multimodal",
|
|
"Alibaba Cloud"
|
|
],
|
|
"hardware_req": "5GB+ VRAM",
|
|
"hosting_type": "both",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 0,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-7b-instruct",
|
|
"name": "Qwen2.5 7B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-7B-Instruct. 1073 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1073,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-7B",
|
|
"base_model:finetune:Qwen/Qwen2.5-7B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-7b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-0.6b",
|
|
"name": "Qwen3 0.6B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-0.6B",
|
|
"description": "Open source model Qwen/Qwen3-0.6B. 1083 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1083,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-0.6B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-0.6B-Base",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 6,
|
|
"parameters_active_b": 6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-0.6b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "gpt2",
|
|
"name": "Gpt2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/openai-community/gpt2",
|
|
"description": "Open source model openai-community/gpt2. 3114 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 3114,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"tflite",
|
|
"rust",
|
|
"onnx",
|
|
"safetensors",
|
|
"gpt2",
|
|
"exbert",
|
|
"en",
|
|
"doi:10.57967/hf/0039",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gpt2"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-1.5b-instruct",
|
|
"name": "Qwen2.5 1.5B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-1.5B-Instruct. 617 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 617,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-1.5B",
|
|
"base_model:finetune:Qwen/Qwen2.5-1.5B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-1.5b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-3b-instruct",
|
|
"name": "Qwen2.5 3B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-3B-Instruct. 404 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 404,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-3B",
|
|
"base_model:finetune:Qwen/Qwen2.5-3B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-3b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3.1-8b-instruct",
|
|
"name": "Llama 3.1 8B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct",
|
|
"description": "Open source model meta-llama/Llama-3.1-8B-Instruct. 5467 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 5467,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"conversational",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"arxiv:2204.05149",
|
|
"base_model:meta-llama/Llama-3.1-8B",
|
|
"base_model:finetune:meta-llama/Llama-3.1-8B",
|
|
"eval-results",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.1-8b-instruct",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "gpt-oss-20b",
|
|
"name": "Gpt Oss 20B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/openai/gpt-oss-20b",
|
|
"description": "Open source model openai/gpt-oss-20b. 4378 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 4378,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"gpt_oss",
|
|
"vllm",
|
|
"conversational",
|
|
"arxiv:2508.10925",
|
|
"endpoints_compatible",
|
|
"8-bit",
|
|
"mxfp4",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 14,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 20,
|
|
"parameters_active_b": 20,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gpt-oss-20b"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-0.5b-instruct",
|
|
"name": "Qwen2.5 0.5B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-0.5B-Instruct. 463 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 463,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-0.5B",
|
|
"base_model:finetune:Qwen/Qwen2.5-0.5B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-0.5b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-4b",
|
|
"name": "Qwen3 4B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-4B",
|
|
"description": "Open source model Qwen/Qwen3-4B. 552 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 552,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-4B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-4B-Base",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-4b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-8b",
|
|
"name": "Qwen3 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-8B",
|
|
"description": "Open source model Qwen/Qwen3-8B. 940 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 940,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-8B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-8B-Base",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-8b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-32b-instruct",
|
|
"name": "Qwen2.5 32B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-32B-Instruct. 328 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 328,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-32B",
|
|
"base_model:finetune:Qwen/Qwen2.5-32B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-32b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "opt-125m",
|
|
"name": "Opt 125M",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/facebook/opt-125m",
|
|
"description": "Open source model facebook/opt-125m. 233 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 233,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"opt",
|
|
"en",
|
|
"arxiv:2205.01068",
|
|
"arxiv:2005.14165",
|
|
"text-generation-inference",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "opt-125m"
|
|
},
|
|
{
|
|
"slug": "qwen3-1.7b",
|
|
"name": "Qwen3 1.7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-1.7B",
|
|
"description": "Open source model Qwen/Qwen3-1.7B. 422 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 422,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-1.7B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-1.7B-Base",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-1.7b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "tiny-qwen2forcausallm-2.5",
|
|
"name": "Tiny Qwen2Forcausallm 2.5",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
|
|
"description": "Open source model trl-internal-testing/tiny-Qwen2ForCausalLM-2.5. 3 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 3,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"trl",
|
|
"conversational",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "tiny-qwen2forcausallm-2.5",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "dolphin-2.9.1-yi-1.5-34b",
|
|
"name": "Dolphin 2.9.1 Yi 1.5 34B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/dphn/dolphin-2.9.1-yi-1.5-34b",
|
|
"description": "Open source model dphn/dolphin-2.9.1-yi-1.5-34b. 54 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 54,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"generated_from_trainer",
|
|
"axolotl",
|
|
"conversational",
|
|
"dataset:cognitivecomputations/Dolphin-2.9",
|
|
"dataset:teknium/OpenHermes-2.5",
|
|
"dataset:m-a-p/CodeFeedback-Filtered-Instruction",
|
|
"dataset:cognitivecomputations/dolphin-coder",
|
|
"dataset:cognitivecomputations/samantha-data",
|
|
"dataset:microsoft/orca-math-word-problems-200k",
|
|
"dataset:Locutusque/function-calling-chatml",
|
|
"dataset:internlm/Agent-FLAN",
|
|
"base_model:01-ai/Yi-1.5-34B",
|
|
"base_model:finetune:01-ai/Yi-1.5-34B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 24,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 34,
|
|
"parameters_active_b": 34,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "dolphin-2.9.1-yi-1.5-34b"
|
|
},
|
|
{
|
|
"slug": "qwen3-embedding-0.6b",
|
|
"name": "Qwen3 Embedding 0.6B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Embedding-0.6B",
|
|
"description": "Open source model Qwen/Qwen3-Embedding-0.6B. 879 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 879,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"sentence-transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"transformers",
|
|
"sentence-similarity",
|
|
"feature-extraction",
|
|
"text-embeddings-inference",
|
|
"arxiv:2506.05176",
|
|
"base_model:Qwen/Qwen3-0.6B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-0.6B-Base",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 6,
|
|
"parameters_active_b": 6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-embedding-0.6b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "gpt-oss-120b",
|
|
"name": "Gpt Oss 120B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/openai/gpt-oss-120b",
|
|
"description": "Open source model openai/gpt-oss-120b. 4503 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 4503,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"gpt_oss",
|
|
"vllm",
|
|
"conversational",
|
|
"arxiv:2508.10925",
|
|
"endpoints_compatible",
|
|
"8-bit",
|
|
"mxfp4",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 84,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 120,
|
|
"parameters_active_b": 120,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gpt-oss-120b"
|
|
},
|
|
{
|
|
"slug": "qwen3-4b-instruct-2507",
|
|
"name": "Qwen3 4B Instruct 2507",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507",
|
|
"description": "Open source model Qwen/Qwen3-4B-Instruct-2507. 730 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 730,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"eval-results",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-4b-instruct-2507",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "moondream2",
|
|
"name": "Moondream2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/vikhyatk/moondream2",
|
|
"description": "Open source model vikhyatk/moondream2. 1373 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1373,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"moondream1",
|
|
"image-text-to-text",
|
|
"custom_code",
|
|
"doi:10.57967/hf/6762",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "moondream2"
|
|
},
|
|
{
|
|
"slug": "llama-3.2-1b-instruct",
|
|
"name": "Llama 3.2 1B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct",
|
|
"description": "Open source model meta-llama/Llama-3.2-1B-Instruct. 1292 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1292,
|
|
"language": "Python",
|
|
"license": "llama3.2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"conversational",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"arxiv:2204.05149",
|
|
"arxiv:2405.16406",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.2-1b-instruct",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2-1.5b-instruct",
|
|
"name": "Qwen2 1.5B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2-1.5B-Instruct. 158 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 158,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2-1.5b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-0.5b-instruct",
|
|
"name": "Qwen2.5 Coder 0.5B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-0.5B-Instruct. 64 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 64,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"codeqwen",
|
|
"chat",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-Coder-0.5B",
|
|
"base_model:finetune:Qwen/Qwen2.5-Coder-0.5B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-0.5b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "kimi-k2.5",
|
|
"name": "Kimi K2.5",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/mlx-community/Kimi-K2.5",
|
|
"description": "Open source model mlx-community/Kimi-K2.5. 28 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 28,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"mlx",
|
|
"safetensors",
|
|
"kimi_k25",
|
|
"conversational",
|
|
"custom_code",
|
|
"base_model:moonshotai/Kimi-K2.5",
|
|
"base_model:quantized:moonshotai/Kimi-K2.5",
|
|
"4-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "kimi-k2.5"
|
|
},
|
|
{
|
|
"slug": "mistral-7b-instruct-v0.2",
|
|
"name": "Mistral 7B Instruct V0.2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
|
|
"description": "Open source model mistralai/Mistral-7B-Instruct-v0.2. 3075 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 3075,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"safetensors",
|
|
"mistral",
|
|
"finetuned",
|
|
"mistral-common",
|
|
"conversational",
|
|
"arxiv:2310.06825",
|
|
"text-generation-inference",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "mistral-7b-instruct-v0.2",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-30b-a3b-instruct-2507",
|
|
"name": "Qwen3 30B A3B Instruct 2507",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507",
|
|
"description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507. 766 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 766,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2402.17463",
|
|
"arxiv:2407.02490",
|
|
"arxiv:2501.15383",
|
|
"arxiv:2404.06654",
|
|
"arxiv:2505.09388",
|
|
"eval-results",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-30b-a3b-instruct-2507",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "llm-jp-3-3.7b-instruct",
|
|
"name": "Llm Jp 3 3.7B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/llm-jp/llm-jp-3-3.7b-instruct",
|
|
"description": "Open source model llm-jp/llm-jp-3-3.7b-instruct. 13 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 13,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"conversational",
|
|
"en",
|
|
"ja",
|
|
"text-generation-inference",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llm-jp-3-3.7b-instruct"
|
|
},
|
|
{
|
|
"slug": "llama-3.2-3b-instruct",
|
|
"name": "Llama 3.2 3B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct",
|
|
"description": "Open source model meta-llama/Llama-3.2-3B-Instruct. 1986 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1986,
|
|
"language": "Python",
|
|
"license": "llama3.2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"conversational",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"arxiv:2204.05149",
|
|
"arxiv:2405.16406",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.2-3b-instruct",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "distilgpt2",
|
|
"name": "Distilgpt2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/distilbert/distilgpt2",
|
|
"description": "Open source model distilbert/distilgpt2. 609 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 609,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"tflite",
|
|
"rust",
|
|
"coreml",
|
|
"safetensors",
|
|
"gpt2",
|
|
"exbert",
|
|
"en",
|
|
"dataset:openwebtext",
|
|
"arxiv:1910.01108",
|
|
"arxiv:2201.08542",
|
|
"arxiv:2203.12574",
|
|
"arxiv:1910.09700",
|
|
"arxiv:1503.02531",
|
|
"model-index",
|
|
"co2_eq_emissions",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "distilgpt2"
|
|
},
|
|
{
|
|
"slug": "qwen3-embedding-8b",
|
|
"name": "Qwen3 Embedding 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Embedding-8B",
|
|
"description": "Open source model Qwen/Qwen3-Embedding-8B. 584 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 584,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"sentence-transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"transformers",
|
|
"sentence-similarity",
|
|
"feature-extraction",
|
|
"text-embeddings-inference",
|
|
"arxiv:2506.05176",
|
|
"base_model:Qwen/Qwen3-8B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-8B-Base",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-embedding-8b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "meta-llama-3-8b",
|
|
"name": "Meta Llama 3 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B",
|
|
"description": "Open source model meta-llama/Meta-Llama-3-8B. 6458 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 6458,
|
|
"language": "Python",
|
|
"license": "llama3",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"en",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "meta-llama-3-8b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "tinyllama-1.1b-chat-v1.0",
|
|
"name": "Tinyllama 1.1B Chat V1.0",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
|
"description": "Open source model TinyLlama/TinyLlama-1.1B-Chat-v1.0. 1526 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1526,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"conversational",
|
|
"en",
|
|
"dataset:cerebras/SlimPajama-627B",
|
|
"dataset:bigcode/starcoderdata",
|
|
"dataset:HuggingFaceH4/ultrachat_200k",
|
|
"dataset:HuggingFaceH4/ultrafeedback_binarized",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "tinyllama-1.1b-chat-v1.0"
|
|
},
|
|
{
|
|
"slug": "glm-4.7-flash",
|
|
"name": "Glm 4.7 Flash",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/zai-org/GLM-4.7-Flash",
|
|
"description": "Open source model zai-org/GLM-4.7-Flash. 1538 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1538,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"glm4_moe_lite",
|
|
"conversational",
|
|
"en",
|
|
"zh",
|
|
"arxiv:2508.06471",
|
|
"eval-results",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "glm-4.7-flash"
|
|
},
|
|
{
|
|
"slug": "llama-3.2-1b",
|
|
"name": "Llama 3.2 1B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.2-1B",
|
|
"description": "Open source model meta-llama/Llama-3.2-1B. 2295 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2295,
|
|
"language": "Python",
|
|
"license": "llama3.2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"arxiv:2204.05149",
|
|
"arxiv:2405.16406",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.2-1b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-32b",
|
|
"name": "Qwen3 32B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-32B",
|
|
"description": "Open source model Qwen/Qwen3-32B. 656 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 656,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-32b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3.2-1b-instruct-fp8-dynamic",
|
|
"name": "Llama 3.2 1B Instruct Fp8 Dynamic",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic",
|
|
"description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8-dynamic. 3 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 3,
|
|
"language": "Python",
|
|
"license": "llama3.2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"safetensors",
|
|
"llama",
|
|
"fp8",
|
|
"vllm",
|
|
"conversational",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"base_model:meta-llama/Llama-3.2-1B-Instruct",
|
|
"base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
|
|
"compressed-tensors",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.2-1b-instruct-fp8-dynamic",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-1.5b-instruct",
|
|
"name": "Qwen2.5 Coder 1.5B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-1.5B-Instruct. 106 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 106,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"codeqwen",
|
|
"chat",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-Coder-1.5B",
|
|
"base_model:finetune:Qwen/Qwen2.5-Coder-1.5B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-1.5b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "meta-llama-3-8b-instruct",
|
|
"name": "Meta Llama 3 8B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct",
|
|
"description": "Open source model meta-llama/Meta-Llama-3-8B-Instruct. 4380 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 4380,
|
|
"language": "Python",
|
|
"license": "llama3",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"conversational",
|
|
"en",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "meta-llama-3-8b-instruct",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "gemma-3-1b-it",
|
|
"name": "Gemma 3 1B It",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/google/gemma-3-1b-it",
|
|
"description": "Open source model google/gemma-3-1b-it. 842 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 842,
|
|
"language": "Python",
|
|
"license": "gemma",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"gemma3_text",
|
|
"conversational",
|
|
"arxiv:1905.07830",
|
|
"arxiv:1905.10044",
|
|
"arxiv:1911.11641",
|
|
"arxiv:1904.09728",
|
|
"arxiv:1705.03551",
|
|
"arxiv:1911.01547",
|
|
"arxiv:1907.10641",
|
|
"arxiv:1903.00161",
|
|
"arxiv:2009.03300",
|
|
"arxiv:2304.06364",
|
|
"arxiv:2103.03874",
|
|
"arxiv:2110.14168",
|
|
"arxiv:2311.12022",
|
|
"arxiv:2108.07732",
|
|
"arxiv:2107.03374",
|
|
"arxiv:2210.03057",
|
|
"arxiv:2106.03193",
|
|
"arxiv:1910.11856",
|
|
"arxiv:2502.12404",
|
|
"arxiv:2502.21228",
|
|
"arxiv:2404.16816",
|
|
"arxiv:2104.12756",
|
|
"arxiv:2311.16502",
|
|
"arxiv:2203.10244",
|
|
"arxiv:2404.12390",
|
|
"arxiv:1810.12440",
|
|
"arxiv:1908.02660",
|
|
"arxiv:2312.11805",
|
|
"base_model:google/gemma-3-1b-pt",
|
|
"base_model:finetune:google/gemma-3-1b-pt",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gemma-3-1b-it",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "phi-2",
|
|
"name": "Phi 2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/microsoft/phi-2",
|
|
"description": "Open source model microsoft/phi-2. 3425 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 3425,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"phi",
|
|
"nlp",
|
|
"code",
|
|
"en",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "phi-2",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-7b-instruct",
|
|
"name": "Qwen2.5 Coder 7B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct. 646 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 646,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"codeqwen",
|
|
"chat",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-Coder-7B",
|
|
"base_model:finetune:Qwen/Qwen2.5-Coder-7B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-7b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-7b",
|
|
"name": "Qwen2.5 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-7B",
|
|
"description": "Open source model Qwen/Qwen2.5-7B. 264 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 264,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2407.10671",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-7b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-distill-qwen-1.5b",
|
|
"name": "Deepseek R1 Distill Qwen 1.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
"description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B. 1446 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1446,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"arxiv:2501.12948",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-distill-qwen-1.5b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-v3",
|
|
"name": "Deepseek V3",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-V3",
|
|
"description": "Open source model deepseek-ai/DeepSeek-V3. 4024 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 4024,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"deepseek_v3",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2412.19437",
|
|
"eval-results",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-v3"
|
|
},
|
|
{
|
|
"slug": "gpt2-large",
|
|
"name": "Gpt2 Large",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/openai-community/gpt2-large",
|
|
"description": "Open source model openai-community/gpt2-large. 344 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 344,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"rust",
|
|
"onnx",
|
|
"safetensors",
|
|
"gpt2",
|
|
"en",
|
|
"arxiv:1910.09700",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gpt2-large"
|
|
},
|
|
{
|
|
"slug": "glm-4.7-flash-mlx-8bit",
|
|
"name": "Glm 4.7 Flash Mlx 8Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-8bit",
|
|
"description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-8bit. 9 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 9,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"glm4_moe_lite",
|
|
"mlx",
|
|
"conversational",
|
|
"en",
|
|
"zh",
|
|
"base_model:zai-org/GLM-4.7-Flash",
|
|
"base_model:quantized:zai-org/GLM-4.7-Flash",
|
|
"endpoints_compatible",
|
|
"8-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "glm-4.7-flash-mlx-8bit"
|
|
},
|
|
{
|
|
"slug": "glm-4.7-flash-mlx-6bit",
|
|
"name": "Glm 4.7 Flash Mlx 6Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/lmstudio-community/GLM-4.7-Flash-MLX-6bit",
|
|
"description": "Open source model lmstudio-community/GLM-4.7-Flash-MLX-6bit. 7 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 7,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"glm4_moe_lite",
|
|
"mlx",
|
|
"conversational",
|
|
"en",
|
|
"zh",
|
|
"base_model:zai-org/GLM-4.7-Flash",
|
|
"base_model:quantized:zai-org/GLM-4.7-Flash",
|
|
"endpoints_compatible",
|
|
"6-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 6,
|
|
"parameters_active_b": 6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "glm-4.7-flash-mlx-6bit"
|
|
},
|
|
{
|
|
"slug": "qwen3-0.6b-fp8",
|
|
"name": "Qwen3 0.6B Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-0.6B-FP8",
|
|
"description": "Open source model Qwen/Qwen3-0.6B-FP8. 56 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 56,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-0.6B",
|
|
"base_model:quantized:Qwen/Qwen3-0.6B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 6,
|
|
"parameters_active_b": 6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-0.6b-fp8",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3.1-8b",
|
|
"name": "Llama 3.1 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.1-8B",
|
|
"description": "Open source model meta-llama/Llama-3.1-8B. 2065 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2065,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"arxiv:2204.05149",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.1-8b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "pythia-160m",
|
|
"name": "Pythia 160M",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/EleutherAI/pythia-160m",
|
|
"description": "Open source model EleutherAI/pythia-160m. 38 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 38,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"safetensors",
|
|
"gpt_neox",
|
|
"causal-lm",
|
|
"pythia",
|
|
"en",
|
|
"dataset:EleutherAI/pile",
|
|
"arxiv:2304.01373",
|
|
"arxiv:2101.00027",
|
|
"arxiv:2201.07311",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "pythia-160m"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-distill-qwen-32b",
|
|
"name": "Deepseek R1 Distill Qwen 32B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
"description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-32B. 1517 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1517,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"arxiv:2501.12948",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-distill-qwen-32b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "hunyuanocr",
|
|
"name": "Hunyuanocr",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/tencent/HunyuanOCR",
|
|
"description": "Open source model tencent/HunyuanOCR. 553 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 553,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"hunyuan_vl",
|
|
"ocr",
|
|
"hunyuan",
|
|
"vision-language",
|
|
"image-to-text",
|
|
"1B",
|
|
"end-to-end",
|
|
"image-text-to-text",
|
|
"conversational",
|
|
"multilingual",
|
|
"arxiv:2511.19575",
|
|
"base_model:tencent/HunyuanOCR",
|
|
"base_model:finetune:tencent/HunyuanOCR",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "hunyuanocr"
|
|
},
|
|
{
|
|
"slug": "qwen3-30b-a3b",
|
|
"name": "Qwen3 30B A3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-30B-A3B",
|
|
"description": "Open source model Qwen/Qwen3-30B-A3B. 855 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 855,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-30B-A3B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-30B-A3B-Base",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-30b-a3b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-0.5b",
|
|
"name": "Qwen2.5 0.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-0.5B",
|
|
"description": "Open source model Qwen/Qwen2.5-0.5B. 372 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 372,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2407.10671",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-0.5b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-32b-instruct-awq",
|
|
"name": "Qwen2.5 32B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-32B-Instruct-AWQ",
|
|
"description": "Open source model Qwen/Qwen2.5-32B-Instruct-AWQ. 94 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 94,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-32B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen2.5-32B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-32b-instruct-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "nvidia-nemotron-3-nano-30b-a3b-fp8",
|
|
"name": "Nvidia Nemotron 3 Nano 30B A3B Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8",
|
|
"description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8. 284 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 284,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"nemotron_h",
|
|
"feature-extraction",
|
|
"nvidia",
|
|
"pytorch",
|
|
"conversational",
|
|
"custom_code",
|
|
"en",
|
|
"es",
|
|
"fr",
|
|
"de",
|
|
"ja",
|
|
"it",
|
|
"dataset:nvidia/Nemotron-Pretraining-Code-v1",
|
|
"dataset:nvidia/Nemotron-CC-v2",
|
|
"dataset:nvidia/Nemotron-Pretraining-SFT-v1",
|
|
"dataset:nvidia/Nemotron-CC-Math-v1",
|
|
"dataset:nvidia/Nemotron-Pretraining-Code-v2",
|
|
"dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
|
|
"dataset:nvidia/Nemotron-CC-v2.1",
|
|
"dataset:nvidia/Nemotron-CC-Code-v1",
|
|
"dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
|
|
"dataset:nvidia/Nemotron-Competitive-Programming-v1",
|
|
"dataset:nvidia/Nemotron-Math-v2",
|
|
"dataset:nvidia/Nemotron-Agentic-v1",
|
|
"dataset:nvidia/Nemotron-Math-Proofs-v1",
|
|
"dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
|
|
"dataset:nvidia/Nemotron-Science-v1",
|
|
"dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
|
|
"arxiv:2512.20848",
|
|
"arxiv:2512.20856",
|
|
"base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
|
|
"base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
|
|
"eval-results",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "nvidia-nemotron-3-nano-30b-a3b-fp8"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-14b-instruct",
|
|
"name": "Qwen2.5 14B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-14B-Instruct. 312 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 312,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-14B",
|
|
"base_model:finetune:Qwen/Qwen2.5-14B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-14b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "nvidia-nemotron-3-nano-30b-a3b-bf16",
|
|
"name": "Nvidia Nemotron 3 Nano 30B A3B Bf16",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
|
|
"description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16. 634 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 634,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"nemotron_h",
|
|
"feature-extraction",
|
|
"nvidia",
|
|
"pytorch",
|
|
"conversational",
|
|
"custom_code",
|
|
"en",
|
|
"es",
|
|
"fr",
|
|
"de",
|
|
"ja",
|
|
"it",
|
|
"dataset:nvidia/Nemotron-Pretraining-Code-v1",
|
|
"dataset:nvidia/Nemotron-CC-v2",
|
|
"dataset:nvidia/Nemotron-Pretraining-SFT-v1",
|
|
"dataset:nvidia/Nemotron-CC-Math-v1",
|
|
"dataset:nvidia/Nemotron-Pretraining-Code-v2",
|
|
"dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
|
|
"dataset:nvidia/Nemotron-CC-v2.1",
|
|
"dataset:nvidia/Nemotron-CC-Code-v1",
|
|
"dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
|
|
"dataset:nvidia/Nemotron-Competitive-Programming-v1",
|
|
"dataset:nvidia/Nemotron-Math-v2",
|
|
"dataset:nvidia/Nemotron-Agentic-v1",
|
|
"dataset:nvidia/Nemotron-Math-Proofs-v1",
|
|
"dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
|
|
"dataset:nvidia/Nemotron-Science-v1",
|
|
"dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
|
|
"arxiv:2512.20848",
|
|
"arxiv:2512.20856",
|
|
"eval-results",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "nvidia-nemotron-3-nano-30b-a3b-bf16"
|
|
},
|
|
{
|
|
"slug": "openelm-1_1b-instruct",
|
|
"name": "Openelm 1_1B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/apple/OpenELM-1_1B-Instruct",
|
|
"description": "Open source model apple/OpenELM-1_1B-Instruct. 72 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 72,
|
|
"language": "Python",
|
|
"license": "apple-amlr",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"openelm",
|
|
"custom_code",
|
|
"arxiv:2404.14619",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "openelm-1_1b-instruct"
|
|
},
|
|
{
|
|
"slug": "tiny-random-llamaforcausallm",
|
|
"name": "Tiny Random Llamaforcausallm",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/hmellor/tiny-random-LlamaForCausalLM",
|
|
"description": "Open source model hmellor/tiny-random-LlamaForCausalLM. 0 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 0,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"conversational",
|
|
"arxiv:1910.09700",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "tiny-random-llamaforcausallm",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-next-80b-a3b-instruct",
|
|
"name": "Qwen3 Next 80B A3B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Instruct",
|
|
"description": "Open source model Qwen/Qwen3-Next-80B-A3B-Instruct. 937 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 937,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_next",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2404.06654",
|
|
"arxiv:2505.09388",
|
|
"arxiv:2501.15383",
|
|
"eval-results",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 56,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 80,
|
|
"parameters_active_b": 80,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-next-80b-a3b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "h2ovl-mississippi-800m",
|
|
"name": "H2Ovl Mississippi 800M",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/h2oai/h2ovl-mississippi-800m",
|
|
"description": "Open source model h2oai/h2ovl-mississippi-800m. 39 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 39,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"h2ovl_chat",
|
|
"feature-extraction",
|
|
"gpt",
|
|
"llm",
|
|
"multimodal large language model",
|
|
"ocr",
|
|
"conversational",
|
|
"custom_code",
|
|
"en",
|
|
"arxiv:2410.13611",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "h2ovl-mississippi-800m"
|
|
},
|
|
{
|
|
"slug": "bloomz-560m",
|
|
"name": "Bloomz 560M",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/bigscience/bloomz-560m",
|
|
"description": "Open source model bigscience/bloomz-560m. 137 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 137,
|
|
"language": "Python",
|
|
"license": "bigscience-bloom-rail-1.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tensorboard",
|
|
"safetensors",
|
|
"bloom",
|
|
"ak",
|
|
"ar",
|
|
"as",
|
|
"bm",
|
|
"bn",
|
|
"ca",
|
|
"code",
|
|
"en",
|
|
"es",
|
|
"eu",
|
|
"fon",
|
|
"fr",
|
|
"gu",
|
|
"hi",
|
|
"id",
|
|
"ig",
|
|
"ki",
|
|
"kn",
|
|
"lg",
|
|
"ln",
|
|
"ml",
|
|
"mr",
|
|
"ne",
|
|
"nso",
|
|
"ny",
|
|
"or",
|
|
"pa",
|
|
"pt",
|
|
"rn",
|
|
"rw",
|
|
"sn",
|
|
"st",
|
|
"sw",
|
|
"ta",
|
|
"te",
|
|
"tn",
|
|
"ts",
|
|
"tum",
|
|
"tw",
|
|
"ur",
|
|
"vi",
|
|
"wo",
|
|
"xh",
|
|
"yo",
|
|
"zh",
|
|
"zu",
|
|
"dataset:bigscience/xP3",
|
|
"arxiv:2211.01786",
|
|
"model-index",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "bloomz-560m"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-1.5b-quantized.w8a8",
|
|
"name": "Qwen2.5 1.5B Quantized.W8A8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/RedHatAI/Qwen2.5-1.5B-quantized.w8a8",
|
|
"description": "Open source model RedHatAI/Qwen2.5-1.5B-quantized.w8a8. 2 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"neuralmagic",
|
|
"llmcompressor",
|
|
"conversational",
|
|
"en",
|
|
"base_model:Qwen/Qwen2.5-1.5B",
|
|
"base_model:quantized:Qwen/Qwen2.5-1.5B",
|
|
"8-bit",
|
|
"compressed-tensors",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-1.5b-quantized.w8a8",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "h2ovl-mississippi-2b",
|
|
"name": "H2Ovl Mississippi 2B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/h2oai/h2ovl-mississippi-2b",
|
|
"description": "Open source model h2oai/h2ovl-mississippi-2b. 40 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 40,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"h2ovl_chat",
|
|
"feature-extraction",
|
|
"gpt",
|
|
"llm",
|
|
"multimodal large language model",
|
|
"ocr",
|
|
"conversational",
|
|
"custom_code",
|
|
"en",
|
|
"arxiv:2410.13611",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "h2ovl-mississippi-2b"
|
|
},
|
|
{
|
|
"slug": "llava-v1.5-7b",
|
|
"name": "Llava V1.5 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/liuhaotian/llava-v1.5-7b",
|
|
"description": "Open source model liuhaotian/llava-v1.5-7b. 537 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 537,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"llava",
|
|
"image-text-to-text",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llava-v1.5-7b"
|
|
},
|
|
{
|
|
"slug": "t5-3b",
|
|
"name": "T5 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/google-t5/t5-3b",
|
|
"description": "Open source model google-t5/t5-3b. 51 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 51,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"safetensors",
|
|
"t5",
|
|
"summarization",
|
|
"translation",
|
|
"en",
|
|
"fr",
|
|
"ro",
|
|
"de",
|
|
"multilingual",
|
|
"dataset:c4",
|
|
"arxiv:1805.12471",
|
|
"arxiv:1708.00055",
|
|
"arxiv:1704.05426",
|
|
"arxiv:1606.05250",
|
|
"arxiv:1808.09121",
|
|
"arxiv:1810.12885",
|
|
"arxiv:1905.10044",
|
|
"arxiv:1910.09700",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "t5-3b"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-14b-instruct-awq",
|
|
"name": "Qwen2.5 14B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct-AWQ",
|
|
"description": "Open source model Qwen/Qwen2.5-14B-Instruct-AWQ. 27 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 27,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-14B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen2.5-14B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-14b-instruct-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3.2-3b",
|
|
"name": "Llama 3.2 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.2-3B",
|
|
"description": "Open source model meta-llama/Llama-3.2-3B. 697 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 697,
|
|
"language": "Python",
|
|
"license": "llama3.2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"arxiv:2204.05149",
|
|
"arxiv:2405.16406",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.2-3b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "phi-3-mini-4k-instruct-gptq-4bit",
|
|
"name": "Phi 3 Mini 4K Instruct Gptq 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/kaitchup/Phi-3-mini-4k-instruct-gptq-4bit",
|
|
"description": "Open source model kaitchup/Phi-3-mini-4k-instruct-gptq-4bit. 2 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"phi3",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:1910.09700",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"gptq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "phi-3-mini-4k-instruct-gptq-4bit",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-72b-instruct-awq",
|
|
"name": "Qwen2.5 72B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct-AWQ",
|
|
"description": "Open source model Qwen/Qwen2.5-72B-Instruct-AWQ. 74 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 74,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-72B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen2.5-72B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 50,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 72,
|
|
"parameters_active_b": 72,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-72b-instruct-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "smollm2-135m",
|
|
"name": "Smollm2 135M",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M",
|
|
"description": "Open source model HuggingFaceTB/SmolLM2-135M. 166 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 166,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"en",
|
|
"arxiv:2502.02737",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "smollm2-135m"
|
|
},
|
|
{
|
|
"slug": "llama-3.3-70b-instruct",
|
|
"name": "Llama 3.3 70B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct",
|
|
"description": "Open source model meta-llama/Llama-3.3-70B-Instruct. 2658 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2658,
|
|
"language": "Python",
|
|
"license": "llama3.3",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"conversational",
|
|
"en",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"de",
|
|
"arxiv:2204.05149",
|
|
"base_model:meta-llama/Llama-3.1-70B",
|
|
"base_model:finetune:meta-llama/Llama-3.1-70B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.3-70b-instruct",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-30b-a3b-instruct-2507-fp8",
|
|
"name": "Qwen3 30B A3B Instruct 2507 Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
|
|
"description": "Open source model Qwen/Qwen3-30B-A3B-Instruct-2507-FP8. 112 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 112,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-30B-A3B-Instruct-2507",
|
|
"base_model:quantized:Qwen/Qwen3-30B-A3B-Instruct-2507",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-30b-a3b-instruct-2507-fp8",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-32b-instruct",
|
|
"name": "Qwen2.5 Coder 32B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct. 1995 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1995,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"codeqwen",
|
|
"chat",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-Coder-32B",
|
|
"base_model:finetune:Qwen/Qwen2.5-Coder-32B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-32b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-235b-a22b-instruct-2507-fp8",
|
|
"name": "Qwen3 235B A22B Instruct 2507 Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-235B-A22B-Instruct-2507-FP8",
|
|
"description": "Open source model Qwen/Qwen3-235B-A22B-Instruct-2507-FP8. 145 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 145,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
"base_model:quantized:Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 164,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 235,
|
|
"parameters_active_b": 235,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-235b-a22b-instruct-2507-fp8",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-distill-qwen-7b",
|
|
"name": "Deepseek R1 Distill Qwen 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
|
|
"description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-7B. 787 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 787,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"arxiv:2501.12948",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-distill-qwen-7b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "phi-3-mini-4k-instruct",
|
|
"name": "Phi 3 Mini 4K Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct",
|
|
"description": "Open source model microsoft/Phi-3-mini-4k-instruct. 1386 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1386,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"phi3",
|
|
"nlp",
|
|
"code",
|
|
"conversational",
|
|
"custom_code",
|
|
"en",
|
|
"fr",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "phi-3-mini-4k-instruct",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-14b",
|
|
"name": "Qwen3 14B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-14B",
|
|
"description": "Open source model Qwen/Qwen3-14B. 366 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 366,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-14B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-14B-Base",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-14b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-1.5b",
|
|
"name": "Qwen2.5 Coder 1.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-1.5B. 81 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 81,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"codeqwen",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-1.5B",
|
|
"base_model:finetune:Qwen/Qwen2.5-1.5B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-1.5b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3.1-70b-instruct",
|
|
"name": "Llama 3.1 70B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct",
|
|
"description": "Open source model meta-llama/Llama-3.1-70B-Instruct. 890 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 890,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"conversational",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"arxiv:2204.05149",
|
|
"base_model:meta-llama/Llama-3.1-70B",
|
|
"base_model:finetune:meta-llama/Llama-3.1-70B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.1-70b-instruct",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "hunyuanimage-3.0",
|
|
"name": "Hunyuanimage 3.0",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/tencent/HunyuanImage-3.0",
|
|
"description": "Open source model tencent/HunyuanImage-3.0. 640 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 640,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"hunyuan_image_3_moe",
|
|
"text-to-image",
|
|
"custom_code",
|
|
"arxiv:2509.23951",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "hunyuanimage-3.0"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-7b-instruct-awq",
|
|
"name": "Qwen2.5 Coder 7B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-AWQ",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-AWQ. 19 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 19,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"codeqwen",
|
|
"chat",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-Coder-7B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-7b-instruct-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-coder-30b-a3b-instruct",
|
|
"name": "Qwen3 Coder 30B A3B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct",
|
|
"description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct. 945 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 945,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-coder-30b-a3b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-0528",
|
|
"name": "Deepseek R1 0528",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-0528",
|
|
"description": "Open source model deepseek-ai/DeepSeek-R1-0528. 2400 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2400,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"deepseek_v3",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2501.12948",
|
|
"eval-results",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-0528"
|
|
},
|
|
{
|
|
"slug": "tiny-random-llama-3",
|
|
"name": "Tiny Random Llama 3",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/llamafactory/tiny-random-Llama-3",
|
|
"description": "Open source model llamafactory/tiny-random-Llama-3. 3 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 3,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"conversational",
|
|
"text-generation-inference",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "tiny-random-llama-3",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-32b-instruct-awq",
|
|
"name": "Qwen2.5 Coder 32B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct-AWQ",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-32B-Instruct-AWQ. 33 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 33,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"codeqwen",
|
|
"chat",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-32b-instruct-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "mistral-7b-instruct-v0.1",
|
|
"name": "Mistral 7B Instruct V0.1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1",
|
|
"description": "Open source model mistralai/Mistral-7B-Instruct-v0.1. 1826 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1826,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"safetensors",
|
|
"mistral",
|
|
"finetuned",
|
|
"mistral-common",
|
|
"conversational",
|
|
"arxiv:2310.06825",
|
|
"base_model:mistralai/Mistral-7B-v0.1",
|
|
"base_model:finetune:mistralai/Mistral-7B-v0.1",
|
|
"text-generation-inference",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "mistral-7b-instruct-v0.1",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "gpt-oss-20b-mxfp4-q8",
|
|
"name": "Gpt Oss 20B Mxfp4 Q8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/mlx-community/gpt-oss-20b-MXFP4-Q8",
|
|
"description": "Open source model mlx-community/gpt-oss-20b-MXFP4-Q8. 31 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 31,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"mlx",
|
|
"safetensors",
|
|
"gpt_oss",
|
|
"vllm",
|
|
"conversational",
|
|
"base_model:openai/gpt-oss-20b",
|
|
"base_model:quantized:openai/gpt-oss-20b",
|
|
"4-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 14,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 20,
|
|
"parameters_active_b": 20,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gpt-oss-20b-mxfp4-q8"
|
|
},
|
|
{
|
|
"slug": "qwen3-embedding-4b",
|
|
"name": "Qwen3 Embedding 4B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Embedding-4B",
|
|
"description": "Open source model Qwen/Qwen3-Embedding-4B. 224 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 224,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"sentence-transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"transformers",
|
|
"sentence-similarity",
|
|
"feature-extraction",
|
|
"text-embeddings-inference",
|
|
"arxiv:2506.05176",
|
|
"base_model:Qwen/Qwen3-4B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-4B-Base",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-embedding-4b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-1.5b-instruct-awq",
|
|
"name": "Qwen2.5 1.5B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-AWQ",
|
|
"description": "Open source model Qwen/Qwen2.5-1.5B-Instruct-AWQ. 6 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 6,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-1.5B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen2.5-1.5B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-1.5b-instruct-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "meta-llama-3.1-8b-instruct-fp8",
|
|
"name": "Meta Llama 3.1 8B Instruct Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8",
|
|
"description": "Open source model RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8. 44 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 44,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"fp8",
|
|
"vllm",
|
|
"conversational",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"base_model:meta-llama/Llama-3.1-8B-Instruct",
|
|
"base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"compressed-tensors",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "meta-llama-3.1-8b-instruct-fp8",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "phi-4",
|
|
"name": "Phi 4",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/microsoft/phi-4",
|
|
"description": "Open source model microsoft/phi-4. 2220 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2220,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"phi3",
|
|
"phi",
|
|
"nlp",
|
|
"math",
|
|
"code",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2412.08905",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "phi-4",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1",
|
|
"name": "Deepseek R1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-R1",
|
|
"description": "Open source model deepseek-ai/DeepSeek-R1. 13011 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 13011,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"deepseek_v3",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2501.12948",
|
|
"eval-results",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1"
|
|
},
|
|
{
|
|
"slug": "llama-3.2-1b-instruct-fp8",
|
|
"name": "Llama 3.2 1B Instruct Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/RedHatAI/Llama-3.2-1B-Instruct-FP8",
|
|
"description": "Open source model RedHatAI/Llama-3.2-1B-Instruct-FP8. 3 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 3,
|
|
"language": "Python",
|
|
"license": "llama3.2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"safetensors",
|
|
"llama",
|
|
"llama-3",
|
|
"neuralmagic",
|
|
"llmcompressor",
|
|
"conversational",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"base_model:meta-llama/Llama-3.2-1B-Instruct",
|
|
"base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
|
|
"compressed-tensors",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.2-1b-instruct-fp8",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3.1-405b",
|
|
"name": "Llama 3.1 405B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-3.1-405B",
|
|
"description": "Open source model meta-llama/Llama-3.1-405B. 961 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 961,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama-3",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"arxiv:2204.05149",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 284,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 405,
|
|
"parameters_active_b": 405,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.1-405b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-4b-thinking-2507",
|
|
"name": "Qwen3 4B Thinking 2507",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-4B-Thinking-2507",
|
|
"description": "Open source model Qwen/Qwen3-4B-Thinking-2507. 548 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 548,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"eval-results",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-4b-thinking-2507",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "gpt2-medium",
|
|
"name": "Gpt2 Medium",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/openai-community/gpt2-medium",
|
|
"description": "Open source model openai-community/gpt2-medium. 193 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 193,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"rust",
|
|
"onnx",
|
|
"safetensors",
|
|
"gpt2",
|
|
"en",
|
|
"arxiv:1910.09700",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gpt2-medium"
|
|
},
|
|
{
|
|
"slug": "tiny-gpt2",
|
|
"name": "Tiny Gpt2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/sshleifer/tiny-gpt2",
|
|
"description": "Open source model sshleifer/tiny-gpt2. 34 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 34,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"gpt2",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "tiny-gpt2"
|
|
},
|
|
{
|
|
"slug": "hermes-3-llama-3.1-8b",
|
|
"name": "Hermes 3 Llama 3.1 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B",
|
|
"description": "Open source model NousResearch/Hermes-3-Llama-3.1-8B. 385 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 385,
|
|
"language": "Python",
|
|
"license": "llama3",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"Llama-3",
|
|
"instruct",
|
|
"finetune",
|
|
"chatml",
|
|
"gpt4",
|
|
"synthetic data",
|
|
"distillation",
|
|
"function calling",
|
|
"json mode",
|
|
"axolotl",
|
|
"roleplaying",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2408.11857",
|
|
"base_model:meta-llama/Llama-3.1-8B",
|
|
"base_model:finetune:meta-llama/Llama-3.1-8B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "hermes-3-llama-3.1-8b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "phi-3.5-vision-instruct",
|
|
"name": "Phi 3.5 Vision Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/microsoft/Phi-3.5-vision-instruct",
|
|
"description": "Open source model microsoft/Phi-3.5-vision-instruct. 726 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 726,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"phi3_v",
|
|
"nlp",
|
|
"code",
|
|
"vision",
|
|
"image-text-to-text",
|
|
"conversational",
|
|
"custom_code",
|
|
"multilingual",
|
|
"arxiv:2404.14219",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": true
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "phi-3.5-vision-instruct",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "minimax-m2",
|
|
"name": "Minimax M2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/MiniMaxAI/MiniMax-M2",
|
|
"description": "Open source model MiniMaxAI/MiniMax-M2. 1485 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1485,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"minimax_m2",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2504.07164",
|
|
"arxiv:2509.06501",
|
|
"arxiv:2509.13160",
|
|
"eval-results",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "minimax-m2"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-distill-llama-8b",
|
|
"name": "Deepseek R1 Distill Llama 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
|
|
"description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-8B. 843 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 843,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"conversational",
|
|
"arxiv:2501.12948",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-distill-llama-8b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-14b-awq",
|
|
"name": "Qwen3 14B Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-14B-AWQ",
|
|
"description": "Open source model Qwen/Qwen3-14B-AWQ. 57 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 57,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-14B",
|
|
"base_model:quantized:Qwen/Qwen3-14B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-14b-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-235b-a22b",
|
|
"name": "Qwen3 235B A22B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-235B-A22B",
|
|
"description": "Open source model Qwen/Qwen3-235B-A22B. 1075 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1075,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 164,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 235,
|
|
"parameters_active_b": 235,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-235b-a22b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "meta-llama-3.1-8b-instruct-awq-int4",
|
|
"name": "Meta Llama 3.1 8B Instruct Awq Int4",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
"description": "Open source model hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4. 87 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 87,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"llama-3.1",
|
|
"meta",
|
|
"autoawq",
|
|
"conversational",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "meta-llama-3.1-8b-instruct-awq-int4",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "lfm2.5-1.2b-instruct-mlx-8bit",
|
|
"name": "Lfm2.5 1.2B Instruct Mlx 8Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit",
|
|
"description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-8bit. 1 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"lfm2",
|
|
"liquid",
|
|
"lfm2.5",
|
|
"edge",
|
|
"mlx",
|
|
"conversational",
|
|
"en",
|
|
"ar",
|
|
"zh",
|
|
"fr",
|
|
"de",
|
|
"ja",
|
|
"ko",
|
|
"es",
|
|
"base_model:LiquidAI/LFM2.5-1.2B-Instruct",
|
|
"base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
|
|
"endpoints_compatible",
|
|
"8-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "lfm2.5-1.2b-instruct-mlx-8bit"
|
|
},
|
|
{
|
|
"slug": "glm-4.7-flash-gguf",
|
|
"name": "Glm 4.7 Flash Gguf",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF",
|
|
"description": "Open source model unsloth/GLM-4.7-Flash-GGUF. 482 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 482,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"gguf",
|
|
"unsloth",
|
|
"en",
|
|
"zh",
|
|
"arxiv:2508.06471",
|
|
"base_model:zai-org/GLM-4.7-Flash",
|
|
"base_model:quantized:zai-org/GLM-4.7-Flash",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us",
|
|
"imatrix",
|
|
"conversational"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "glm-4.7-flash-gguf"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-distill-qwen-14b",
|
|
"name": "Deepseek R1 Distill Qwen 14B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
"description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Qwen-14B. 603 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 603,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"arxiv:2501.12948",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-distill-qwen-14b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "lfm2.5-1.2b-instruct-mlx-6bit",
|
|
"name": "Lfm2.5 1.2B Instruct Mlx 6Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit",
|
|
"description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-6bit. 4 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 4,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"lfm2",
|
|
"liquid",
|
|
"lfm2.5",
|
|
"edge",
|
|
"mlx",
|
|
"conversational",
|
|
"en",
|
|
"ar",
|
|
"zh",
|
|
"fr",
|
|
"de",
|
|
"ja",
|
|
"ko",
|
|
"es",
|
|
"base_model:LiquidAI/LFM2.5-1.2B-Instruct",
|
|
"base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
|
|
"endpoints_compatible",
|
|
"6-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "lfm2.5-1.2b-instruct-mlx-6bit"
|
|
},
|
|
{
|
|
"slug": "lfm2.5-1.2b-instruct-mlx-4bit",
|
|
"name": "Lfm2.5 1.2B Instruct Mlx 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit",
|
|
"description": "Open source model lmstudio-community/LFM2.5-1.2B-Instruct-MLX-4bit. 1 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"lfm2",
|
|
"liquid",
|
|
"lfm2.5",
|
|
"edge",
|
|
"mlx",
|
|
"conversational",
|
|
"en",
|
|
"ar",
|
|
"zh",
|
|
"fr",
|
|
"de",
|
|
"ja",
|
|
"ko",
|
|
"es",
|
|
"base_model:LiquidAI/LFM2.5-1.2B-Instruct",
|
|
"base_model:quantized:LiquidAI/LFM2.5-1.2B-Instruct",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "lfm2.5-1.2b-instruct-mlx-4bit"
|
|
},
|
|
{
|
|
"slug": "vicuna-7b-v1.5",
|
|
"name": "Vicuna 7B V1.5",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/lmsys/vicuna-7b-v1.5",
|
|
"description": "Open source model lmsys/vicuna-7b-v1.5. 387 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 387,
|
|
"language": "Python",
|
|
"license": "llama2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"llama",
|
|
"arxiv:2307.09288",
|
|
"arxiv:2306.05685",
|
|
"text-generation-inference",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "vicuna-7b-v1.5"
|
|
},
|
|
{
|
|
"slug": "llama-3.2-1b-instruct-q8_0-gguf",
|
|
"name": "Llama 3.2 1B Instruct Q8_0 Gguf",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF",
|
|
"description": "Open source model hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF. 43 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 43,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"gguf",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama",
|
|
"llama-3",
|
|
"llama-cpp",
|
|
"gguf-my-repo",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"base_model:meta-llama/Llama-3.2-1B-Instruct",
|
|
"base_model:quantized:meta-llama/Llama-3.2-1B-Instruct",
|
|
"endpoints_compatible",
|
|
"region:us",
|
|
"conversational"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.2-1b-instruct-q8_0-gguf",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "llama-3.3-70b-instruct-awq",
|
|
"name": "Llama 3.3 70B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/kosbu/Llama-3.3-70B-Instruct-AWQ",
|
|
"description": "Open source model kosbu/Llama-3.3-70B-Instruct-AWQ. 10 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 10,
|
|
"language": "Python",
|
|
"license": "llama3.3",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"llama-3",
|
|
"awq",
|
|
"conversational",
|
|
"en",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"de",
|
|
"base_model:meta-llama/Llama-3.3-70B-Instruct",
|
|
"base_model:quantized:meta-llama/Llama-3.3-70B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-3.3-70b-instruct-awq",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-32b-fp8",
|
|
"name": "Qwen3 32B Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-32B-FP8",
|
|
"description": "Open source model Qwen/Qwen3-32B-FP8. 80 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 80,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-32B",
|
|
"base_model:quantized:Qwen/Qwen3-32B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-32b-fp8",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "gpt2-xl",
|
|
"name": "Gpt2 Xl",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/openai-community/gpt2-xl",
|
|
"description": "Open source model openai-community/gpt2-xl. 373 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 373,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"rust",
|
|
"safetensors",
|
|
"gpt2",
|
|
"en",
|
|
"arxiv:1910.09700",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gpt2-xl"
|
|
},
|
|
{
|
|
"slug": "qwen3-4b-instruct-2507-fp8",
|
|
"name": "Qwen3 4B Instruct 2507 Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-4B-Instruct-2507-FP8",
|
|
"description": "Open source model Qwen/Qwen3-4B-Instruct-2507-FP8. 65 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 65,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-4B-Instruct-2507",
|
|
"base_model:quantized:Qwen/Qwen3-4B-Instruct-2507",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-4b-instruct-2507-fp8",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "xlnet-base-cased",
|
|
"name": "Xlnet Base Cased",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/xlnet/xlnet-base-cased",
|
|
"description": "Open source model xlnet/xlnet-base-cased. 80 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 80,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"rust",
|
|
"xlnet",
|
|
"en",
|
|
"dataset:bookcorpus",
|
|
"dataset:wikipedia",
|
|
"arxiv:1906.08237",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "xlnet-base-cased"
|
|
},
|
|
{
|
|
"slug": "llama-2-7b-hf",
|
|
"name": "Llama 2 7B Hf",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-2-7b-hf",
|
|
"description": "Open source model meta-llama/Llama-2-7b-hf. 2268 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2268,
|
|
"language": "Python",
|
|
"license": "llama2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"llama-2",
|
|
"en",
|
|
"arxiv:2307.09288",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-2-7b-hf",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-math-7b-instruct",
|
|
"name": "Qwen2.5 Math 7B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Math-7B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-Math-7B-Instruct. 89 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 89,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12122",
|
|
"base_model:Qwen/Qwen2.5-Math-7B",
|
|
"base_model:finetune:Qwen/Qwen2.5-Math-7B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-math-7b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-reranker-0.6b",
|
|
"name": "Qwen3 Reranker 0.6B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Reranker-0.6B",
|
|
"description": "Open source model Qwen/Qwen3-Reranker-0.6B. 305 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 305,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"text-ranking",
|
|
"arxiv:2506.05176",
|
|
"base_model:Qwen/Qwen3-0.6B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-0.6B-Base",
|
|
"text-embeddings-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 6,
|
|
"parameters_active_b": 6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-reranker-0.6b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-1.5b",
|
|
"name": "Qwen2.5 1.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-1.5B",
|
|
"description": "Open source model Qwen/Qwen2.5-1.5B. 165 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 165,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2407.10671",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-1.5b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-30b-a3b-thinking-2507",
|
|
"name": "Qwen3 30B A3B Thinking 2507",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-Thinking-2507",
|
|
"description": "Open source model Qwen/Qwen3-30B-A3B-Thinking-2507. 359 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 359,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2402.17463",
|
|
"arxiv:2407.02490",
|
|
"arxiv:2501.15383",
|
|
"arxiv:2404.06654",
|
|
"arxiv:2505.09388",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-30b-a3b-thinking-2507",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "smollm2-135m-instruct",
|
|
"name": "Smollm2 135M Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct",
|
|
"description": "Open source model HuggingFaceTB/SmolLM2-135M-Instruct. 292 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 292,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"tensorboard",
|
|
"onnx",
|
|
"safetensors",
|
|
"llama",
|
|
"transformers.js",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2502.02737",
|
|
"base_model:HuggingFaceTB/SmolLM2-135M",
|
|
"base_model:quantized:HuggingFaceTB/SmolLM2-135M",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "smollm2-135m-instruct"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-math-1.5b",
|
|
"name": "Qwen2.5 Math 1.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Math-1.5B",
|
|
"description": "Open source model Qwen/Qwen2.5-Math-1.5B. 100 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 100,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12122",
|
|
"base_model:Qwen/Qwen2.5-1.5B",
|
|
"base_model:finetune:Qwen/Qwen2.5-1.5B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-math-1.5b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "glm-4.5-air-awq-4bit",
|
|
"name": "Glm 4.5 Air Awq 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/cyankiwi/GLM-4.5-Air-AWQ-4bit",
|
|
"description": "Open source model cyankiwi/GLM-4.5-Air-AWQ-4bit. 27 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 27,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"glm4_moe",
|
|
"conversational",
|
|
"en",
|
|
"zh",
|
|
"arxiv:2508.06471",
|
|
"base_model:zai-org/GLM-4.5-Air",
|
|
"base_model:quantized:zai-org/GLM-4.5-Air",
|
|
"endpoints_compatible",
|
|
"compressed-tensors",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "glm-4.5-air-awq-4bit"
|
|
},
|
|
{
|
|
"slug": "llama-2-7b-chat-hf",
|
|
"name": "Llama 2 7B Chat Hf",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf",
|
|
"description": "Open source model meta-llama/Llama-2-7b-chat-hf. 4705 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 4705,
|
|
"language": "Python",
|
|
"license": "llama2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"llama-2",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2307.09288",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-2-7b-chat-hf",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-7b-instruct-gptq-int4",
|
|
"name": "Qwen2.5 Coder 7B Instruct Gptq Int4",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4. 12 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 12,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"codeqwen",
|
|
"chat",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-Coder-7B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen2.5-Coder-7B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"gptq",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-7b-instruct-gptq-int4",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-vl-30b-a3b-instruct-awq",
|
|
"name": "Qwen3 Vl 30B A3B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ",
|
|
"description": "Open source model QuantTrio/Qwen3-VL-30B-A3B-Instruct-AWQ. 38 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 38,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_vl_moe",
|
|
"image-text-to-text",
|
|
"AWQ",
|
|
"vLLM",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"arxiv:2502.13923",
|
|
"arxiv:2409.12191",
|
|
"arxiv:2308.12966",
|
|
"base_model:Qwen/Qwen3-VL-30B-A3B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen3-VL-30B-A3B-Instruct",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-vl-30b-a3b-instruct-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-8b-base",
|
|
"name": "Qwen3 8B Base",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-8B-Base",
|
|
"description": "Open source model Qwen/Qwen3-8B-Base. 82 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 82,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-8b-base",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-14b-instruct",
|
|
"name": "Qwen2.5 Coder 14B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-14B-Instruct. 140 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 140,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"codeqwen",
|
|
"chat",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-Coder-14B",
|
|
"base_model:finetune:Qwen/Qwen2.5-Coder-14B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 10,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 14,
|
|
"parameters_active_b": 14,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-14b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "stories15m_moe",
|
|
"name": "Stories15M_Moe",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/ggml-org/stories15M_MOE",
|
|
"description": "Open source model ggml-org/stories15M_MOE. 5 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 5,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"gguf",
|
|
"mixtral",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "stories15m_moe"
|
|
},
|
|
{
|
|
"slug": "opt-1.3b",
|
|
"name": "Opt 1.3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/facebook/opt-1.3b",
|
|
"description": "Open source model facebook/opt-1.3b. 182 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 182,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"opt",
|
|
"en",
|
|
"arxiv:2205.01068",
|
|
"arxiv:2005.14165",
|
|
"text-generation-inference",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "opt-1.3b"
|
|
},
|
|
{
|
|
"slug": "minimax-m2-awq",
|
|
"name": "Minimax M2 Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/QuantTrio/MiniMax-M2-AWQ",
|
|
"description": "Open source model QuantTrio/MiniMax-M2-AWQ. 8 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 8,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"mixtral",
|
|
"vLLM",
|
|
"AWQ",
|
|
"conversational",
|
|
"arxiv:2504.07164",
|
|
"arxiv:2509.06501",
|
|
"arxiv:2509.13160",
|
|
"base_model:MiniMaxAI/MiniMax-M2",
|
|
"base_model:quantized:MiniMaxAI/MiniMax-M2",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "minimax-m2-awq"
|
|
},
|
|
{
|
|
"slug": "glm-4.7-flash-nvfp4",
|
|
"name": "Glm 4.7 Flash Nvfp4",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/GadflyII/GLM-4.7-Flash-NVFP4",
|
|
"description": "Open source model GadflyII/GLM-4.7-Flash-NVFP4. 62 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 62,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"glm4_moe_lite",
|
|
"moe",
|
|
"nvfp4",
|
|
"quantized",
|
|
"vllm",
|
|
"glm",
|
|
"30b",
|
|
"conversational",
|
|
"en",
|
|
"zh",
|
|
"base_model:zai-org/GLM-4.7-Flash",
|
|
"base_model:quantized:zai-org/GLM-4.7-Flash",
|
|
"endpoints_compatible",
|
|
"compressed-tensors",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "glm-4.7-flash-nvfp4"
|
|
},
|
|
{
|
|
"slug": "hy-mt1.5-7b",
|
|
"name": "Hy Mt1.5 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/tencent/HY-MT1.5-7B",
|
|
"description": "Open source model tencent/HY-MT1.5-7B. 133 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 133,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"hunyuan_v1_dense",
|
|
"translation",
|
|
"zh",
|
|
"en",
|
|
"fr",
|
|
"pt",
|
|
"es",
|
|
"ja",
|
|
"tr",
|
|
"ru",
|
|
"ar",
|
|
"ko",
|
|
"th",
|
|
"it",
|
|
"de",
|
|
"vi",
|
|
"ms",
|
|
"id",
|
|
"tl",
|
|
"hi",
|
|
"pl",
|
|
"cs",
|
|
"nl",
|
|
"km",
|
|
"my",
|
|
"fa",
|
|
"gu",
|
|
"ur",
|
|
"te",
|
|
"mr",
|
|
"he",
|
|
"bn",
|
|
"ta",
|
|
"uk",
|
|
"bo",
|
|
"kk",
|
|
"mn",
|
|
"ug",
|
|
"arxiv:2512.24092",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "hy-mt1.5-7b"
|
|
},
|
|
{
|
|
"slug": "gemma-2-27b-it",
|
|
"name": "Gemma 2 27B It",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/google/gemma-2-27b-it",
|
|
"description": "Open source model google/gemma-2-27b-it. 559 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 559,
|
|
"language": "Python",
|
|
"license": "gemma",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"gemma2",
|
|
"conversational",
|
|
"arxiv:2009.03300",
|
|
"arxiv:1905.07830",
|
|
"arxiv:1911.11641",
|
|
"arxiv:1904.09728",
|
|
"arxiv:1905.10044",
|
|
"arxiv:1907.10641",
|
|
"arxiv:1811.00937",
|
|
"arxiv:1809.02789",
|
|
"arxiv:1911.01547",
|
|
"arxiv:1705.03551",
|
|
"arxiv:2107.03374",
|
|
"arxiv:2108.07732",
|
|
"arxiv:2110.14168",
|
|
"arxiv:2009.11462",
|
|
"arxiv:2101.11718",
|
|
"arxiv:2110.08193",
|
|
"arxiv:1804.09301",
|
|
"arxiv:2109.07958",
|
|
"arxiv:1804.06876",
|
|
"arxiv:2103.03874",
|
|
"arxiv:2304.06364",
|
|
"arxiv:2206.04615",
|
|
"arxiv:2203.09509",
|
|
"base_model:google/gemma-2-27b",
|
|
"base_model:finetune:google/gemma-2-27b",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 19,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 27,
|
|
"parameters_active_b": 27,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gemma-2-27b-it",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-coder-next-gguf",
|
|
"name": "Qwen3 Coder Next Gguf",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/unsloth/Qwen3-Coder-Next-GGUF",
|
|
"description": "Open source model unsloth/Qwen3-Coder-Next-GGUF. 347 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 347,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"gguf",
|
|
"qwen3_next",
|
|
"unsloth",
|
|
"qwen",
|
|
"qwen3",
|
|
"base_model:Qwen/Qwen3-Coder-Next",
|
|
"base_model:quantized:Qwen/Qwen3-Coder-Next",
|
|
"endpoints_compatible",
|
|
"region:us",
|
|
"imatrix",
|
|
"conversational"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-coder-next-gguf",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "gte-qwen2-1.5b-instruct",
|
|
"name": "Gte Qwen2 1.5B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Alibaba-NLP/gte-Qwen2-1.5B-instruct",
|
|
"description": "Open source model Alibaba-NLP/gte-Qwen2-1.5B-instruct. 229 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 229,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"sentence-transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"mteb",
|
|
"transformers",
|
|
"Qwen2",
|
|
"sentence-similarity",
|
|
"custom_code",
|
|
"arxiv:2308.03281",
|
|
"model-index",
|
|
"text-embeddings-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gte-qwen2-1.5b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "lfm2-1.2b",
|
|
"name": "Lfm2 1.2B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/LiquidAI/LFM2-1.2B",
|
|
"description": "Open source model LiquidAI/LFM2-1.2B. 349 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 349,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"lfm2",
|
|
"liquid",
|
|
"edge",
|
|
"conversational",
|
|
"en",
|
|
"ar",
|
|
"zh",
|
|
"fr",
|
|
"de",
|
|
"ja",
|
|
"ko",
|
|
"es",
|
|
"arxiv:2511.23404",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "lfm2-1.2b"
|
|
},
|
|
{
|
|
"slug": "saiga_llama3_8b",
|
|
"name": "Saiga_Llama3_8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/IlyaGusev/saiga_llama3_8b",
|
|
"description": "Open source model IlyaGusev/saiga_llama3_8b. 137 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 137,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"conversational",
|
|
"ru",
|
|
"dataset:IlyaGusev/saiga_scored",
|
|
"doi:10.57967/hf/2368",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "saiga_llama3_8b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-1.7b-base",
|
|
"name": "Qwen3 1.7B Base",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-1.7B-Base",
|
|
"description": "Open source model Qwen/Qwen3-1.7B-Base. 62 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 62,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-1.7b-base",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "mistral-7b-v0.3-bnb-4bit",
|
|
"name": "Mistral 7B V0.3 Bnb 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/unsloth/mistral-7b-v0.3-bnb-4bit",
|
|
"description": "Open source model unsloth/mistral-7b-v0.3-bnb-4bit. 22 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 22,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"mistral",
|
|
"unsloth",
|
|
"mistral-7b",
|
|
"en",
|
|
"base_model:mistralai/Mistral-7B-v0.3",
|
|
"base_model:quantized:mistralai/Mistral-7B-v0.3",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"bitsandbytes",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "mistral-7b-v0.3-bnb-4bit",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "gemma-2-2b-it",
|
|
"name": "Gemma 2 2B It",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/google/gemma-2-2b-it",
|
|
"description": "Open source model google/gemma-2-2b-it. 1285 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1285,
|
|
"language": "Python",
|
|
"license": "gemma",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"gemma2",
|
|
"conversational",
|
|
"arxiv:2009.03300",
|
|
"arxiv:1905.07830",
|
|
"arxiv:1911.11641",
|
|
"arxiv:1904.09728",
|
|
"arxiv:1905.10044",
|
|
"arxiv:1907.10641",
|
|
"arxiv:1811.00937",
|
|
"arxiv:1809.02789",
|
|
"arxiv:1911.01547",
|
|
"arxiv:1705.03551",
|
|
"arxiv:2107.03374",
|
|
"arxiv:2108.07732",
|
|
"arxiv:2110.14168",
|
|
"arxiv:2009.11462",
|
|
"arxiv:2101.11718",
|
|
"arxiv:2110.08193",
|
|
"arxiv:1804.09301",
|
|
"arxiv:2109.07958",
|
|
"arxiv:1804.06876",
|
|
"arxiv:2103.03874",
|
|
"arxiv:2304.06364",
|
|
"arxiv:1903.00161",
|
|
"arxiv:2206.04615",
|
|
"arxiv:2203.09509",
|
|
"arxiv:2403.13793",
|
|
"base_model:google/gemma-2-2b",
|
|
"base_model:finetune:google/gemma-2-2b",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 2,
|
|
"parameters_active_b": 2,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gemma-2-2b-it",
|
|
"logo_url": "/logos/gemma.svg"
|
|
},
|
|
{
|
|
"slug": "phi-4-multimodal-instruct",
|
|
"name": "Phi 4 Multimodal Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/microsoft/Phi-4-multimodal-instruct",
|
|
"description": "Open source model microsoft/Phi-4-multimodal-instruct. 1573 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1573,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"phi4mm",
|
|
"nlp",
|
|
"code",
|
|
"audio",
|
|
"automatic-speech-recognition",
|
|
"speech-summarization",
|
|
"speech-translation",
|
|
"visual-question-answering",
|
|
"phi-4-multimodal",
|
|
"phi",
|
|
"phi-4-mini",
|
|
"custom_code",
|
|
"multilingual",
|
|
"ar",
|
|
"zh",
|
|
"cs",
|
|
"da",
|
|
"nl",
|
|
"en",
|
|
"fi",
|
|
"fr",
|
|
"de",
|
|
"he",
|
|
"hu",
|
|
"it",
|
|
"ja",
|
|
"ko",
|
|
"no",
|
|
"pl",
|
|
"pt",
|
|
"ru",
|
|
"es",
|
|
"sv",
|
|
"th",
|
|
"tr",
|
|
"uk",
|
|
"arxiv:2503.01743",
|
|
"arxiv:2407.13833",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "phi-4-multimodal-instruct",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "pythia-70m-deduped",
|
|
"name": "Pythia 70M Deduped",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/EleutherAI/pythia-70m-deduped",
|
|
"description": "Open source model EleutherAI/pythia-70m-deduped. 27 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 27,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"safetensors",
|
|
"gpt_neox",
|
|
"causal-lm",
|
|
"pythia",
|
|
"en",
|
|
"dataset:EleutherAI/the_pile_deduplicated",
|
|
"arxiv:2304.01373",
|
|
"arxiv:2101.00027",
|
|
"arxiv:2201.07311",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "pythia-70m-deduped"
|
|
},
|
|
{
|
|
"slug": "dialogpt-medium",
|
|
"name": "Dialogpt Medium",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/microsoft/DialoGPT-medium",
|
|
"description": "Open source model microsoft/DialoGPT-medium. 433 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 433,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"jax",
|
|
"rust",
|
|
"gpt2",
|
|
"conversational",
|
|
"arxiv:1911.00536",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "dialogpt-medium"
|
|
},
|
|
{
|
|
"slug": "gpt-oss-20b-bf16",
|
|
"name": "Gpt Oss 20B Bf16",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/unsloth/gpt-oss-20b-BF16",
|
|
"description": "Open source model unsloth/gpt-oss-20b-BF16. 29 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 29,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"gpt_oss",
|
|
"vllm",
|
|
"unsloth",
|
|
"conversational",
|
|
"base_model:openai/gpt-oss-20b",
|
|
"base_model:finetune:openai/gpt-oss-20b",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 14,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 20,
|
|
"parameters_active_b": 20,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "gpt-oss-20b-bf16"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-72b-instruct",
|
|
"name": "Qwen2.5 72B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct",
|
|
"description": "Open source model Qwen/Qwen2.5-72B-Instruct. 910 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 910,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-72B",
|
|
"base_model:finetune:Qwen/Qwen2.5-72B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 50,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 72,
|
|
"parameters_active_b": 72,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-72b-instruct",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-32b-awq",
|
|
"name": "Qwen3 32B Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-32B-AWQ",
|
|
"description": "Open source model Qwen/Qwen3-32B-AWQ. 125 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 125,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-32B",
|
|
"base_model:quantized:Qwen/Qwen3-32B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-32b-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "mimo-v2-flash",
|
|
"name": "Mimo V2 Flash",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/XiaomiMiMo/MiMo-V2-Flash",
|
|
"description": "Open source model XiaomiMiMo/MiMo-V2-Flash. 628 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 628,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"mimo_v2_flash",
|
|
"conversational",
|
|
"custom_code",
|
|
"eval-results",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "mimo-v2-flash"
|
|
},
|
|
{
|
|
"slug": "qwen3-coder-30b-a3b-instruct-fp8",
|
|
"name": "Qwen3 Coder 30B A3B Instruct Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8",
|
|
"description": "Open source model Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8. 158 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 158,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-coder-30b-a3b-instruct-fp8",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen3-8b-fp8",
|
|
"name": "Qwen3 8B Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-8B-FP8",
|
|
"description": "Open source model Qwen/Qwen3-8B-FP8. 56 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 56,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-8B",
|
|
"base_model:quantized:Qwen/Qwen3-8B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-8b-fp8",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-v3.2",
|
|
"name": "Deepseek V3.2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-V3.2",
|
|
"description": "Open source model deepseek-ai/DeepSeek-V3.2. 1251 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1251,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"deepseek_v32",
|
|
"conversational",
|
|
"base_model:deepseek-ai/DeepSeek-V3.2-Exp-Base",
|
|
"base_model:finetune:deepseek-ai/DeepSeek-V3.2-Exp-Base",
|
|
"eval-results",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-v3.2"
|
|
},
|
|
{
|
|
"slug": "qwen3-coder-next",
|
|
"name": "Qwen3 Coder Next",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Coder-Next",
|
|
"description": "Open source model Qwen/Qwen3-Coder-Next. 912 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 912,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_next",
|
|
"conversational",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-coder-next",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2-0.5b",
|
|
"name": "Qwen2 0.5B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2-0.5B",
|
|
"description": "Open source model Qwen/Qwen2-0.5B. 164 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 164,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"pretrained",
|
|
"conversational",
|
|
"en",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 5,
|
|
"parameters_active_b": 5,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2-0.5b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "mistral-7b-v0.1",
|
|
"name": "Mistral 7B V0.1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/mistralai/Mistral-7B-v0.1",
|
|
"description": "Open source model mistralai/Mistral-7B-v0.1. 4042 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 4042,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"safetensors",
|
|
"mistral",
|
|
"pretrained",
|
|
"mistral-common",
|
|
"en",
|
|
"arxiv:2310.06825",
|
|
"text-generation-inference",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "mistral-7b-v0.1",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "kimi-k2-thinking",
|
|
"name": "Kimi K2 Thinking",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/moonshotai/Kimi-K2-Thinking",
|
|
"description": "Open source model moonshotai/Kimi-K2-Thinking. 1670 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1670,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"kimi_k2",
|
|
"conversational",
|
|
"custom_code",
|
|
"eval-results",
|
|
"endpoints_compatible",
|
|
"compressed-tensors",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "kimi-k2-thinking"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-0528-qwen3-8b-mlx-4bit",
|
|
"name": "Deepseek R1 0528 Qwen3 8B Mlx 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit",
|
|
"description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-4bit. 7 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 7,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"mlx",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
|
|
"base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
|
|
"4-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-0528-qwen3-8b-mlx-4bit",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-7b-instruct-awq",
|
|
"name": "Qwen2.5 7B Instruct Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-AWQ",
|
|
"description": "Open source model Qwen/Qwen2.5-7B-Instruct-AWQ. 36 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 36,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-7B-Instruct",
|
|
"base_model:quantized:Qwen/Qwen2.5-7B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-7b-instruct-awq",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "points-reader",
|
|
"name": "Points Reader",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/tencent/POINTS-Reader",
|
|
"description": "Open source model tencent/POINTS-Reader. 100 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 100,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"image-text-to-text",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2509.01215",
|
|
"arxiv:2412.08443",
|
|
"arxiv:2409.04828",
|
|
"arxiv:2405.11850",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "points-reader"
|
|
},
|
|
{
|
|
"slug": "qwen3-4b-base",
|
|
"name": "Qwen3 4B Base",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-4B-Base",
|
|
"description": "Open source model Qwen/Qwen3-4B-Base. 80 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 80,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-4b-base",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "step-3.5-flash",
|
|
"name": "Step 3.5 Flash",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/stepfun-ai/Step-3.5-Flash",
|
|
"description": "Open source model stepfun-ai/Step-3.5-Flash. 621 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 621,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"step3p5",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2602.10604",
|
|
"arxiv:2601.05593",
|
|
"arxiv:2507.19427",
|
|
"eval-results",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "step-3.5-flash"
|
|
},
|
|
{
|
|
"slug": "kogpt2-base-v2",
|
|
"name": "Kogpt2 Base V2",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/skt/kogpt2-base-v2",
|
|
"description": "Open source model skt/kogpt2-base-v2. 60 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 60,
|
|
"language": "Python",
|
|
"license": "cc-by-nc-sa-4.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"jax",
|
|
"gpt2",
|
|
"ko",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "kogpt2-base-v2"
|
|
},
|
|
{
|
|
"slug": "parler-tts-mini-multilingual-v1.1",
|
|
"name": "Parler Tts Mini Multilingual V1.1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/parler-tts/parler-tts-mini-multilingual-v1.1",
|
|
"description": "Open source model parler-tts/parler-tts-mini-multilingual-v1.1. 54 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 54,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"parler_tts",
|
|
"text-to-speech",
|
|
"annotation",
|
|
"en",
|
|
"fr",
|
|
"es",
|
|
"pt",
|
|
"pl",
|
|
"de",
|
|
"nl",
|
|
"it",
|
|
"dataset:facebook/multilingual_librispeech",
|
|
"dataset:parler-tts/libritts_r_filtered",
|
|
"dataset:parler-tts/libritts-r-filtered-speaker-descriptions",
|
|
"dataset:parler-tts/mls_eng",
|
|
"dataset:parler-tts/mls-eng-speaker-descriptions",
|
|
"dataset:ylacombe/mls-annotated",
|
|
"dataset:ylacombe/cml-tts-filtered-annotated",
|
|
"dataset:PHBJT/cml-tts-filtered",
|
|
"arxiv:2402.01912",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "parler-tts-mini-multilingual-v1.1"
|
|
},
|
|
{
|
|
"slug": "qwen3-reranker-8b",
|
|
"name": "Qwen3 Reranker 8B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-Reranker-8B",
|
|
"description": "Open source model Qwen/Qwen3-Reranker-8B. 213 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 213,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"text-ranking",
|
|
"arxiv:2506.05176",
|
|
"base_model:Qwen/Qwen3-8B-Base",
|
|
"base_model:finetune:Qwen/Qwen3-8B-Base",
|
|
"text-embeddings-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-reranker-8b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-0528-qwen3-8b-mlx-8bit",
|
|
"name": "Deepseek R1 0528 Qwen3 8B Mlx 8Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit",
|
|
"description": "Open source model lmstudio-community/DeepSeek-R1-0528-Qwen3-8B-MLX-8bit. 13 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 13,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"mlx",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"base_model:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
|
|
"base_model:quantized:deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
|
|
"8-bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-0528-qwen3-8b-mlx-8bit",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "powermoe-3b",
|
|
"name": "Powermoe 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/ibm-research/PowerMoE-3b",
|
|
"description": "Open source model ibm-research/PowerMoE-3b. 14 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 14,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"granitemoe",
|
|
"arxiv:2408.13359",
|
|
"model-index",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "powermoe-3b"
|
|
},
|
|
{
|
|
"slug": "llada-8b-instruct",
|
|
"name": "Llada 8B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/GSAI-ML/LLaDA-8B-Instruct",
|
|
"description": "Open source model GSAI-ML/LLaDA-8B-Instruct. 342 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 342,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llada",
|
|
"conversational",
|
|
"custom_code",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llada-8b-instruct"
|
|
},
|
|
{
|
|
"slug": "apertus-8b-instruct-2509",
|
|
"name": "Apertus 8B Instruct 2509",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/swiss-ai/Apertus-8B-Instruct-2509",
|
|
"description": "Open source model swiss-ai/Apertus-8B-Instruct-2509. 435 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 435,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"apertus",
|
|
"multilingual",
|
|
"compliant",
|
|
"swiss-ai",
|
|
"conversational",
|
|
"arxiv:2509.14233",
|
|
"base_model:swiss-ai/Apertus-8B-2509",
|
|
"base_model:finetune:swiss-ai/Apertus-8B-2509",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "apertus-8b-instruct-2509"
|
|
},
|
|
{
|
|
"slug": "qwen3-30b-a3b-gptq-int4",
|
|
"name": "Qwen3 30B A3B Gptq Int4",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-30B-A3B-GPTQ-Int4",
|
|
"description": "Open source model Qwen/Qwen3-30B-A3B-GPTQ-Int4. 45 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 45,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3_moe",
|
|
"conversational",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-30B-A3B",
|
|
"base_model:quantized:Qwen/Qwen3-30B-A3B",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"gptq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-30b-a3b-gptq-int4",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "tinyllama-1.1b-chat-v0.3-gptq",
|
|
"name": "Tinyllama 1.1B Chat V0.3 Gptq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ",
|
|
"description": "Open source model TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ. 9 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 9,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"en",
|
|
"dataset:cerebras/SlimPajama-627B",
|
|
"dataset:bigcode/starcoderdata",
|
|
"dataset:OpenAssistant/oasst_top1_2023-08-25",
|
|
"base_model:TinyLlama/TinyLlama-1.1B-Chat-v0.3",
|
|
"base_model:quantized:TinyLlama/TinyLlama-1.1B-Chat-v0.3",
|
|
"text-generation-inference",
|
|
"4-bit",
|
|
"gptq",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 1,
|
|
"parameters_active_b": 1,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "tinyllama-1.1b-chat-v0.3-gptq"
|
|
},
|
|
{
|
|
"slug": "prot_t5_xl_bfd",
|
|
"name": "Prot_T5_Xl_Bfd",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Rostlab/prot_t5_xl_bfd",
|
|
"description": "Open source model Rostlab/prot_t5_xl_bfd. 10 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 10,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"tf",
|
|
"t5",
|
|
"protein language model",
|
|
"dataset:BFD",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "prot_t5_xl_bfd"
|
|
},
|
|
{
|
|
"slug": "qwen3-4b-instruct-2507-unsloth-bnb-4bit",
|
|
"name": "Qwen3 4B Instruct 2507 Unsloth Bnb 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit",
|
|
"description": "Open source model unsloth/Qwen3-4B-Instruct-2507-unsloth-bnb-4bit. 13 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 13,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"unsloth",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"base_model:Qwen/Qwen3-4B-Instruct-2507",
|
|
"base_model:quantized:Qwen/Qwen3-4B-Instruct-2507",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"bitsandbytes",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-4b-instruct-2507-unsloth-bnb-4bit",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "phi-3.5-mini-instruct",
|
|
"name": "Phi 3.5 Mini Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/microsoft/Phi-3.5-mini-instruct",
|
|
"description": "Open source model microsoft/Phi-3.5-mini-instruct. 963 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 963,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"phi3",
|
|
"nlp",
|
|
"code",
|
|
"conversational",
|
|
"custom_code",
|
|
"multilingual",
|
|
"arxiv:2404.14219",
|
|
"arxiv:2407.13833",
|
|
"arxiv:2403.06412",
|
|
"eval-results",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "phi-3.5-mini-instruct",
|
|
"logo_url": "/logos/phi.svg"
|
|
},
|
|
{
|
|
"slug": "meta-llama-3.1-8b-instruct-bnb-4bit",
|
|
"name": "Meta Llama 3.1 8B Instruct Bnb 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit",
|
|
"description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit. 95 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 95,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"llama-3",
|
|
"meta",
|
|
"facebook",
|
|
"unsloth",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2204.05149",
|
|
"base_model:meta-llama/Llama-3.1-8B-Instruct",
|
|
"base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"bitsandbytes",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "meta-llama-3.1-8b-instruct-bnb-4bit",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "glm-4.7-flash-awq-4bit",
|
|
"name": "Glm 4.7 Flash Awq 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/cyankiwi/GLM-4.7-Flash-AWQ-4bit",
|
|
"description": "Open source model cyankiwi/GLM-4.7-Flash-AWQ-4bit. 43 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 43,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"glm4_moe_lite",
|
|
"conversational",
|
|
"en",
|
|
"zh",
|
|
"arxiv:2508.06471",
|
|
"base_model:zai-org/GLM-4.7-Flash",
|
|
"base_model:quantized:zai-org/GLM-4.7-Flash",
|
|
"endpoints_compatible",
|
|
"compressed-tensors",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 3,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 4,
|
|
"parameters_active_b": 4,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "glm-4.7-flash-awq-4bit"
|
|
},
|
|
{
|
|
"slug": "dots.ocr",
|
|
"name": "Dots.Ocr",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/rednote-hilab/dots.ocr",
|
|
"description": "Open source model rednote-hilab/dots.ocr. 1243 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1243,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"dots_ocr",
|
|
"safetensors",
|
|
"image-to-text",
|
|
"ocr",
|
|
"document-parse",
|
|
"layout",
|
|
"table",
|
|
"formula",
|
|
"transformers",
|
|
"custom_code",
|
|
"image-text-to-text",
|
|
"conversational",
|
|
"en",
|
|
"zh",
|
|
"multilingual",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "dots.ocr"
|
|
},
|
|
{
|
|
"slug": "mistral-7b-bnb-4bit",
|
|
"name": "Mistral 7B Bnb 4Bit",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/unsloth/mistral-7b-bnb-4bit",
|
|
"description": "Open source model unsloth/mistral-7b-bnb-4bit. 30 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 30,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"mistral",
|
|
"unsloth",
|
|
"mistral-7b",
|
|
"bnb",
|
|
"en",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"4-bit",
|
|
"bitsandbytes",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "mistral-7b-bnb-4bit",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "glm-5-fp8",
|
|
"name": "Glm 5 Fp8",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/zai-org/GLM-5-FP8",
|
|
"description": "Open source model zai-org/GLM-5-FP8. 108 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 108,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"glm_moe_dsa",
|
|
"conversational",
|
|
"en",
|
|
"zh",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "glm-5-fp8"
|
|
},
|
|
{
|
|
"slug": "qwen-7b",
|
|
"name": "Qwen 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen-7B",
|
|
"description": "Open source model Qwen/Qwen-7B. 395 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 395,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen",
|
|
"custom_code",
|
|
"zh",
|
|
"en",
|
|
"arxiv:2309.16609",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen-7b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwq-32b-awq",
|
|
"name": "Qwq 32B Awq",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/QwQ-32B-AWQ",
|
|
"description": "Open source model Qwen/QwQ-32B-AWQ. 133 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 133,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"safetensors",
|
|
"qwen2",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2412.15115",
|
|
"base_model:Qwen/QwQ-32B",
|
|
"base_model:quantized:Qwen/QwQ-32B",
|
|
"4-bit",
|
|
"awq",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 22,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 32,
|
|
"parameters_active_b": 32,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwq-32b-awq"
|
|
},
|
|
{
|
|
"slug": "deepseek-r1-distill-llama-70b",
|
|
"name": "Deepseek R1 Distill Llama 70B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
"description": "Open source model deepseek-ai/DeepSeek-R1-Distill-Llama-70B. 741 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 741,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"conversational",
|
|
"arxiv:2501.12948",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 49,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 70,
|
|
"parameters_active_b": 70,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-r1-distill-llama-70b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-coder-7b",
|
|
"name": "Qwen2.5 Coder 7B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B",
|
|
"description": "Open source model Qwen/Qwen2.5-Coder-7B. 134 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 134,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen2",
|
|
"code",
|
|
"qwen",
|
|
"qwen-coder",
|
|
"codeqwen",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2409.12186",
|
|
"arxiv:2309.00071",
|
|
"arxiv:2407.10671",
|
|
"base_model:Qwen/Qwen2.5-7B",
|
|
"base_model:finetune:Qwen/Qwen2.5-7B",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-coder-7b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "qwen2.5-3b",
|
|
"name": "Qwen2.5 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen2.5-3B",
|
|
"description": "Open source model Qwen/Qwen2.5-3B. 169 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 169,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"safetensors",
|
|
"qwen2",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2407.10671",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen2.5-3b",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-v2-lite-chat",
|
|
"name": "Deepseek V2 Lite Chat",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat",
|
|
"description": "Open source model deepseek-ai/DeepSeek-V2-Lite-Chat. 133 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 133,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"deepseek_v2",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2405.04434",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-v2-lite-chat"
|
|
},
|
|
{
|
|
"slug": "tiny-qwen3forcausallm",
|
|
"name": "Tiny Qwen3Forcausallm",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/trl-internal-testing/tiny-Qwen3ForCausalLM",
|
|
"description": "Open source model trl-internal-testing/tiny-Qwen3ForCausalLM. 1 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1,
|
|
"language": "Python",
|
|
"license": "unknown",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"trl",
|
|
"conversational",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "tiny-qwen3forcausallm",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-coder-v2-lite-instruct",
|
|
"name": "Deepseek Coder V2 Lite Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct",
|
|
"description": "Open source model deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct. 539 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 539,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"deepseek_v2",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2401.06066",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-coder-v2-lite-instruct"
|
|
},
|
|
{
|
|
"slug": "qwen3-0.6b-base",
|
|
"name": "Qwen3 0.6B Base",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen3-0.6B-Base",
|
|
"description": "Open source model Qwen/Qwen3-0.6B-Base. 146 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 146,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen3",
|
|
"conversational",
|
|
"arxiv:2505.09388",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 4,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 6,
|
|
"parameters_active_b": 6,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen3-0.6b-base",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "diffractgpt_mistral_chemical_formula",
|
|
"name": "Diffractgpt_Mistral_Chemical_Formula",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/knc6/diffractgpt_mistral_chemical_formula",
|
|
"description": "Open source model knc6/diffractgpt_mistral_chemical_formula. 1 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"peft",
|
|
"safetensors",
|
|
"chemistry",
|
|
"text-generation-inference",
|
|
"atomgpt",
|
|
"diffraction",
|
|
"en",
|
|
"base_model:unsloth/mistral-7b-bnb-4bit",
|
|
"base_model:adapter:unsloth/mistral-7b-bnb-4bit",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "diffractgpt_mistral_chemical_formula",
|
|
"logo_url": "/logos/mistral.svg"
|
|
},
|
|
{
|
|
"slug": "qwen-7b-chat",
|
|
"name": "Qwen 7B Chat",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Qwen/Qwen-7B-Chat",
|
|
"description": "Open source model Qwen/Qwen-7B-Chat. 787 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 787,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"qwen",
|
|
"custom_code",
|
|
"zh",
|
|
"en",
|
|
"arxiv:2309.16609",
|
|
"arxiv:2305.08322",
|
|
"arxiv:2009.03300",
|
|
"arxiv:2305.05280",
|
|
"arxiv:2210.03629",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 5,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 7,
|
|
"parameters_active_b": 7,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "qwen-7b-chat",
|
|
"logo_url": "/logos/qwen.svg"
|
|
},
|
|
{
|
|
"slug": "nvidia-nemotron-3-nano-30b-a3b-nvfp4",
|
|
"name": "Nvidia Nemotron 3 Nano 30B A3B Nvfp4",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4",
|
|
"description": "Open source model nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4. 100 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 100,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"nemotron_h",
|
|
"feature-extraction",
|
|
"nvidia",
|
|
"pytorch",
|
|
"conversational",
|
|
"custom_code",
|
|
"en",
|
|
"es",
|
|
"fr",
|
|
"de",
|
|
"ja",
|
|
"it",
|
|
"dataset:nvidia/Nemotron-Pretraining-Code-v1",
|
|
"dataset:nvidia/Nemotron-CC-v2",
|
|
"dataset:nvidia/Nemotron-Pretraining-SFT-v1",
|
|
"dataset:nvidia/Nemotron-CC-Math-v1",
|
|
"dataset:nvidia/Nemotron-Pretraining-Code-v2",
|
|
"dataset:nvidia/Nemotron-Pretraining-Specialized-v1",
|
|
"dataset:nvidia/Nemotron-CC-v2.1",
|
|
"dataset:nvidia/Nemotron-CC-Code-v1",
|
|
"dataset:nvidia/Nemotron-Pretraining-Dataset-sample",
|
|
"dataset:nvidia/Nemotron-Competitive-Programming-v1",
|
|
"dataset:nvidia/Nemotron-Math-v2",
|
|
"dataset:nvidia/Nemotron-Agentic-v1",
|
|
"dataset:nvidia/Nemotron-Math-Proofs-v1",
|
|
"dataset:nvidia/Nemotron-Instruction-Following-Chat-v1",
|
|
"dataset:nvidia/Nemotron-Science-v1",
|
|
"dataset:nvidia/Nemotron-3-Nano-RL-Training-Blend",
|
|
"arxiv:2512.20848",
|
|
"arxiv:2512.20856",
|
|
"arxiv:2601.20088",
|
|
"base_model:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
|
|
"base_model:quantized:nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "24GB+ VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 21,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 30,
|
|
"parameters_active_b": 30,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "nvidia-nemotron-3-nano-30b-a3b-nvfp4"
|
|
},
|
|
{
|
|
"slug": "falcon-h1-tiny-90m-instruct",
|
|
"name": "Falcon H1 Tiny 90M Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/tiiuae/Falcon-H1-Tiny-90M-Instruct",
|
|
"description": "Open source model tiiuae/Falcon-H1-Tiny-90M-Instruct. 31 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 31,
|
|
"language": "Python",
|
|
"license": "other",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"falcon_h1",
|
|
"falcon-h1",
|
|
"edge",
|
|
"conversational",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "falcon-h1-tiny-90m-instruct",
|
|
"logo_url": "/logos/falcon.svg"
|
|
},
|
|
{
|
|
"slug": "hermes-3-llama-3.2-3b",
|
|
"name": "Hermes 3 Llama 3.2 3B",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.2-3B",
|
|
"description": "Open source model NousResearch/Hermes-3-Llama-3.2-3B. 174 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 174,
|
|
"language": "Python",
|
|
"license": "llama3",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"Llama-3",
|
|
"instruct",
|
|
"finetune",
|
|
"chatml",
|
|
"gpt4",
|
|
"synthetic data",
|
|
"distillation",
|
|
"function calling",
|
|
"json mode",
|
|
"axolotl",
|
|
"roleplaying",
|
|
"chat",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2408.11857",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"deploy:azure",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 2,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 3,
|
|
"parameters_active_b": 3,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "hermes-3-llama-3.2-3b",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "meta-llama-3.1-8b-instruct",
|
|
"name": "Meta Llama 3.1 8B Instruct",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/unsloth/Meta-Llama-3.1-8B-Instruct",
|
|
"description": "Open source model unsloth/Meta-Llama-3.1-8B-Instruct. 94 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 94,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"llama-3",
|
|
"meta",
|
|
"facebook",
|
|
"unsloth",
|
|
"conversational",
|
|
"en",
|
|
"base_model:meta-llama/Llama-3.1-8B-Instruct",
|
|
"base_model:finetune:meta-llama/Llama-3.1-8B-Instruct",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "meta-llama-3.1-8b-instruct",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "meta-llama-3.1-8b-instruct-gguf",
|
|
"name": "Meta Llama 3.1 8B Instruct Gguf",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF",
|
|
"description": "Open source model bartowski/Meta-Llama-3.1-8B-Instruct-GGUF. 321 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 321,
|
|
"language": "Python",
|
|
"license": "llama3.1",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"gguf",
|
|
"facebook",
|
|
"meta",
|
|
"pytorch",
|
|
"llama",
|
|
"llama-3",
|
|
"en",
|
|
"de",
|
|
"fr",
|
|
"it",
|
|
"pt",
|
|
"hi",
|
|
"es",
|
|
"th",
|
|
"base_model:meta-llama/Llama-3.1-8B-Instruct",
|
|
"base_model:quantized:meta-llama/Llama-3.1-8B-Instruct",
|
|
"endpoints_compatible",
|
|
"region:us",
|
|
"conversational"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 6,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 8,
|
|
"parameters_active_b": 8,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "meta-llama-3.1-8b-instruct-gguf",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "deepseek-v3-0324",
|
|
"name": "Deepseek V3 0324",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/deepseek-ai/DeepSeek-V3-0324",
|
|
"description": "Open source model deepseek-ai/DeepSeek-V3-0324. 3087 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 3087,
|
|
"language": "Python",
|
|
"license": "mit",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"deepseek_v3",
|
|
"conversational",
|
|
"custom_code",
|
|
"arxiv:2412.19437",
|
|
"eval-results",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"fp8",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "deepseek-v3-0324"
|
|
},
|
|
{
|
|
"slug": "elm",
|
|
"name": "Elm",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/Joaoffg/ELM",
|
|
"description": "Open source model Joaoffg/ELM. 2 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 2,
|
|
"language": "Python",
|
|
"license": "llama2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"academic",
|
|
"university",
|
|
"en",
|
|
"nl",
|
|
"arxiv:2408.06931",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "elm"
|
|
},
|
|
{
|
|
"slug": "llama-2-13b-chat-hf",
|
|
"name": "Llama 2 13B Chat Hf",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/meta-llama/Llama-2-13b-chat-hf",
|
|
"description": "Open source model meta-llama/Llama-2-13b-chat-hf. 1109 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 1109,
|
|
"language": "Python",
|
|
"license": "llama2",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"pytorch",
|
|
"safetensors",
|
|
"llama",
|
|
"facebook",
|
|
"meta",
|
|
"llama-2",
|
|
"conversational",
|
|
"en",
|
|
"arxiv:2307.09288",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "16GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 9,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 13,
|
|
"parameters_active_b": 13,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "llama-2-13b-chat-hf",
|
|
"logo_url": "/logos/meta.svg"
|
|
},
|
|
{
|
|
"slug": "svara-tts-v1",
|
|
"name": "Svara Tts V1",
|
|
"category": "AI Models",
|
|
"is_open_source": true,
|
|
"website": "https://huggingface.co/kenpath/svara-tts-v1",
|
|
"description": "Open source model kenpath/svara-tts-v1. 18 likes on Hugging Face.",
|
|
"pros": [
|
|
"Open Source",
|
|
"Running Locally"
|
|
],
|
|
"cons": [
|
|
"Requires GPU"
|
|
],
|
|
"stars": 18,
|
|
"language": "Python",
|
|
"license": "apache-2.0",
|
|
"tags": [
|
|
"AI",
|
|
"LLM",
|
|
"transformers",
|
|
"safetensors",
|
|
"llama",
|
|
"text-to-speech",
|
|
"speech-synthesis",
|
|
"multilingual",
|
|
"indic",
|
|
"orpheus",
|
|
"lora",
|
|
"low-latency",
|
|
"gguf",
|
|
"zero-shot",
|
|
"emotions",
|
|
"discrete-audio-tokens",
|
|
"hi",
|
|
"bn",
|
|
"mr",
|
|
"te",
|
|
"kn",
|
|
"bho",
|
|
"mag",
|
|
"hne",
|
|
"mai",
|
|
"as",
|
|
"brx",
|
|
"doi",
|
|
"gu",
|
|
"ml",
|
|
"pa",
|
|
"ta",
|
|
"ne",
|
|
"sa",
|
|
"en",
|
|
"dataset:SYSPIN",
|
|
"dataset:RASA",
|
|
"dataset:IndicTTS",
|
|
"dataset:SPICOR",
|
|
"base_model:canopylabs/3b-hi-ft-research_release",
|
|
"base_model:adapter:canopylabs/3b-hi-ft-research_release",
|
|
"text-generation-inference",
|
|
"endpoints_compatible",
|
|
"region:us"
|
|
],
|
|
"hardware_req": "8GB VRAM",
|
|
"hosting_type": "self-hosted",
|
|
"ai_metadata": {
|
|
"vram_inference_gb": 1,
|
|
"context_window_tokens": 4096,
|
|
"parameters_total_b": 0,
|
|
"parameters_active_b": 0,
|
|
"is_multimodal": false
|
|
},
|
|
"referral_url": "https://m.do.co/c/2ed27757a361",
|
|
"id": "svara-tts-v1"
|
|
},
|
|
{
|
|
"slug": "postman",
|
|
"name": "Postman",
|
|
"category": "API Development",
|
|
"is_open_source": false,
|
|
"pricing_model": "Freemium",
|
|
"website": "https://www.postman.com/",
|
|
"description": "An API platform for building and using APIs.",
|
|
"alternatives": [
|
|
"hoppscotch"
|
|
],
|
|
"tags": [
|
|
"API",
|
|
"Testing",
|
|
"Developer Tools"
|
|
],
|
|
"logo_url": "https://www.vectorlogo.zone/logos/getpostman/getpostman-icon.svg",
|
|
"avg_monthly_cost": 15,
|
|
"pros": [
|
|
"Comprehensive feature set for API development",
|
|
"Excellent collaboration tools",
|
|
"Extensive integrations with CI/CD"
|
|
],
|
|
"cons": [
|
|
"Can be resource-heavy and slow",
|
|
"Requires an account for core collaboration features",
|
|
"Some advanced features are locked behind expensive subscriptions"
|
|
]
|
|
},
|
|
{
|
|
"slug": "hoppscotch",
|
|
"name": "Hoppscotch",
|
|
"category": "API Development",
|
|
"is_open_source": true,
|
|
"pricing_model": "Free/Open Source",
|
|
"website": "https://hoppscotch.io/",
|
|
"description": "An open-source API development ecosystem.",
|
|
"github_repo": "hoppscotch/hoppscotch",
|
|
"alternatives": [
|
|
"postman"
|
|
],
|
|
"tags": [
|
|
"API",
|
|
"Testing",
|
|
"Developer Tools",
|
|
"Open Source"
|
|
],
|
|
"logo_url": "https://hoppscotch.io/_nuxt/logo.6d552ca3.svg",
|
|
"avg_monthly_cost": 0,
|
|
"pros": [
|
|
"Lightweight and runs directly in the browser",
|
|
"Fully open-source and self-hostable",
|
|
"Real-time collaboration and history syncing"
|
|
],
|
|
"cons": [
|
|
"Browser extensions sometimes required for certain requests",
|
|
"Fewer enterprise integrations compared to commercial alternatives"
|
|
]
|
|
},
|
|
{
|
|
"slug": "seaweedfs",
|
|
"name": "SeaweedFS",
|
|
"category": "Cloud Infrastructure",
|
|
"is_open_source": true,
|
|
"github_repo": "seaweedfs/seaweedfs",
|
|
"stars": 22000,
|
|
"website": "https://github.com/seaweedfs/seaweedfs",
|
|
"description": "SeaweedFS is a fast distributed storage system for blobs, objects, files, and data lake, for billions of files!",
|
|
"pros": [
|
|
"Extremely fast for small files",
|
|
"Highly scalable distributed architecture",
|
|
"S3 compatible API"
|
|
],
|
|
"cons": [
|
|
"More complex to configure than some alternatives"
|
|
],
|
|
"language": "Go",
|
|
"license": "Apache 2.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=github.com/seaweedfs",
|
|
"last_commit": "2025-03-01T00:00:00Z",
|
|
"hosting_type": "self-hosted",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/seaweedfs"
|
|
}
|
|
},
|
|
{
|
|
"slug": "ceph",
|
|
"name": "Ceph",
|
|
"category": "Cloud Infrastructure",
|
|
"is_open_source": true,
|
|
"github_repo": "ceph/ceph",
|
|
"stars": 13000,
|
|
"website": "https://ceph.io",
|
|
"description": "Ceph is a highly scalable distributed storage solution for block, object, and file storage.",
|
|
"pros": [
|
|
"Enterprise-grade reliability and scalability",
|
|
"Unified storage (block, object, file)",
|
|
"Strong community and industry support"
|
|
],
|
|
"cons": [
|
|
"Steep learning curve and complex management",
|
|
"Resource intensive"
|
|
],
|
|
"language": "C++",
|
|
"license": "LGPL-2.1",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=ceph.io",
|
|
"last_commit": "2025-03-01T00:00:00Z",
|
|
"hosting_type": "self-hosted",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/ceph"
|
|
}
|
|
},
|
|
{
|
|
"slug": "rustfs",
|
|
"name": "RustFS",
|
|
"category": "Cloud Infrastructure",
|
|
"is_open_source": true,
|
|
"github_repo": "rustfs/rustfs",
|
|
"stars": 1000,
|
|
"website": "https://rustfs.com/",
|
|
"description": "High-performance S3-compatible object storage designed for modern data workloads.",
|
|
"pros": [
|
|
"Written in Rust for high performance and safety",
|
|
"Permissive Apache 2.0 license",
|
|
"Faster than MinIO for small object workloads"
|
|
],
|
|
"cons": [
|
|
"Newer project with a smaller ecosystem",
|
|
"Less enterprise features than mature counterparts"
|
|
],
|
|
"language": "Rust",
|
|
"license": "Apache 2.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=rustfs.com",
|
|
"last_commit": "2025-02-15T00:00:00Z",
|
|
"hosting_type": "self-hosted",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/rustfs"
|
|
}
|
|
},
|
|
{
|
|
"slug": "forward-email",
|
|
"name": "Forward Email",
|
|
"category": "Email",
|
|
"is_open_source": true,
|
|
"github_repo": "forwardemail/forwardemail.net",
|
|
"stars": 1488,
|
|
"website": "https://forwardemail.net",
|
|
"description": "Privacy-focused encrypted email for everyone. All-in-one alternative to Gmail, Mailchimp, and Sendgrid with free inbound forwarding, full email hosting, built-in webmail, and IMAP/POP3/SMTP support.",
|
|
"pros": [
|
|
"100% open-source with self-hosted Docker deployment",
|
|
"Free inbound email forwarding for custom domains",
|
|
"Built-in webmail with IMAP/POP3/SMTP support",
|
|
"Quantum-resistant encryption for future-proof security"
|
|
],
|
|
"cons": [
|
|
"Self-hosting email requires careful DNS and deliverability setup",
|
|
"Native apps still in development"
|
|
],
|
|
"last_commit": "2026-03-02T16:45:21Z",
|
|
"language": "JavaScript",
|
|
"license": "MIT",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=forwardemail.net"
|
|
},
|
|
{
|
|
"slug": "gmail",
|
|
"name": "Gmail",
|
|
"category": "Email",
|
|
"is_open_source": false,
|
|
"pricing_model": "Paid/Freemium",
|
|
"website": "https://mail.google.com",
|
|
"description": "Google's free email service with 15GB storage, powerful search, and integration with Google Workspace.",
|
|
"alternatives": [
|
|
"forward-email"
|
|
],
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=gmail.com",
|
|
"avg_monthly_cost": 6,
|
|
"pros": [
|
|
"Excellent spam filtering and search",
|
|
"15GB free storage",
|
|
"Deep integration with Google Workspace",
|
|
"Works everywhere with great mobile apps"
|
|
],
|
|
"cons": [
|
|
"Google scans emails for ad targeting",
|
|
"No end-to-end encryption by default",
|
|
"Vendor lock-in with Google ecosystem",
|
|
"Privacy concerns for sensitive communications"
|
|
]
|
|
},
|
|
{
|
|
"slug": "tymeslot",
|
|
"name": "Tymeslot",
|
|
"category": "Productivity",
|
|
"is_open_source": true,
|
|
"github_repo": "Tymeslot/tymeslot",
|
|
"website": "https://github.com/Tymeslot/tymeslot",
|
|
"description": "Self-hostable, open-source meeting scheduling platform built with Elixir/Phoenix LiveView.",
|
|
"pros": [
|
|
"No ads, no tracking, no per-seat pricing",
|
|
"Google Calendar sync with timezone-aware scheduling",
|
|
"SSO support via OIDC/OAuth out of the box",
|
|
"Multiple calendar accounts per user"
|
|
],
|
|
"cons": [
|
|
"Smaller community and ecosystem than Calendly",
|
|
"Fewer third-party integrations out of the box"
|
|
],
|
|
"language": "Elixir",
|
|
"license": "AGPL-3.0",
|
|
"logo_url": "https://www.google.com/s2/favicons?sz=128&domain=raw.githubusercontent.com",
|
|
"deployment": {
|
|
"type": "docker-compose",
|
|
"local_path": "./.docker-deploy/tymeslot"
|
|
}
|
|
}
|
|
]
|