Compare commits
586 Commits
jmorganca/
...
pdevine/ad
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7bcdb250b9 | ||
|
|
7bbcd2e6be | ||
|
|
22d6c817f8 | ||
|
|
ca01373b28 | ||
|
|
24e038d56a | ||
|
|
5d1021603a | ||
|
|
8e05d734b9 | ||
|
|
05e0f21bec | ||
|
|
ff23dd343f | ||
|
|
123b300af6 | ||
|
|
57653b8e42 | ||
|
|
a50ce61c54 | ||
|
|
2bb7ea00d2 | ||
|
|
55fa80d07a | ||
|
|
b9cb535407 | ||
|
|
031baef094 | ||
|
|
7d271e6dc9 | ||
|
|
c88dae2d6b | ||
|
|
9e3618d663 | ||
|
|
5d920cc6bc | ||
|
|
e585ecd11f | ||
|
|
cdddea0592 | ||
|
|
43f90def04 | ||
|
|
06ae6367bd | ||
|
|
48ad7085c4 | ||
|
|
e1e3cec8d0 | ||
|
|
d3e67e305c | ||
|
|
698e04a14b | ||
|
|
1d9537bc33 | ||
|
|
120424d832 | ||
|
|
5818001610 | ||
|
|
2cba7756c5 | ||
|
|
bf2a421727 | ||
|
|
f3cf6b75fb | ||
|
|
5dfac387a6 | ||
|
|
a99e5d9c22 | ||
|
|
0abf3aca36 | ||
|
|
ee0266462a | ||
|
|
c88fb286ec | ||
|
|
d3da29cbfc | ||
|
|
1b70bb8a10 | ||
|
|
ec29ce4ce3 | ||
|
|
4d75f5da03 | ||
|
|
798fd09bfe | ||
|
|
9330bb9120 | ||
|
|
40a1317dfd | ||
|
|
fdfe9cec98 | ||
|
|
9517864603 | ||
|
|
8e6d86dbe3 | ||
|
|
80d3744c5d | ||
|
|
2a94f03823 | ||
|
|
eb97274e5c | ||
|
|
6b5db12aa2 | ||
|
|
612f0a17d3 | ||
|
|
673726fa0e | ||
|
|
b5918f9785 | ||
|
|
d17f482d50 | ||
|
|
4e16f562c0 | ||
|
|
55308f1421 | ||
|
|
d64812eb5d | ||
|
|
f86a969f27 | ||
|
|
9fa80a1660 | ||
|
|
dde09129d1 | ||
|
|
780556c4d0 | ||
|
|
dfae363b5b | ||
|
|
30fdd229a4 | ||
|
|
e823bff873 | ||
|
|
8968740836 | ||
|
|
8c8f8f3450 | ||
|
|
82f0139587 | ||
|
|
26a58b294c | ||
|
|
34a790a2e6 | ||
|
|
4589fa2cf5 | ||
|
|
4bc2728047 | ||
|
|
49d5fd5a3e | ||
|
|
3cd2b03a5e | ||
|
|
c8e0878814 | ||
|
|
bb0c58e134 | ||
|
|
036ed1b9b5 | ||
|
|
3536ef58f6 | ||
|
|
de9673ac3f | ||
|
|
96b202d34b | ||
|
|
79865e6c5a | ||
|
|
5ab10d347a | ||
|
|
a8292dd85f | ||
|
|
cb0033598e | ||
|
|
4d14b0ff92 | ||
|
|
d9cb70c270 | ||
|
|
31f968fe1f | ||
|
|
b7bda92d52 | ||
|
|
8e54823fd3 | ||
|
|
7c8da5679e | ||
|
|
6214103e66 | ||
|
|
9e7cb9697e | ||
|
|
3824e380a8 | ||
|
|
c9b2dcfc52 | ||
|
|
b00bd1dfd4 | ||
|
|
ac83ac20c4 | ||
|
|
e7ccc129ea | ||
|
|
69ed0c2729 | ||
|
|
1cefa749aa | ||
|
|
aec2fef95d | ||
|
|
366625a831 | ||
|
|
516ebd8548 | ||
|
|
f567abc63f | ||
|
|
1adfc27f04 | ||
|
|
4a2b9f9dbc | ||
|
|
e46b67a6cc | ||
|
|
c000afe76c | ||
|
|
9d7b18f81e | ||
|
|
4f5999fd3f | ||
|
|
ac5f0dbb6a | ||
|
|
d1151e18a1 | ||
|
|
ebbce136c7 | ||
|
|
26b9f53f8e | ||
|
|
7575438366 | ||
|
|
7d7c90d702 | ||
|
|
4fda69809a | ||
|
|
c9b5da6b0c | ||
|
|
de5cb7311f | ||
|
|
95ee7fbd29 | ||
|
|
ec55536734 | ||
|
|
77491439c2 | ||
|
|
b166b36cd2 | ||
|
|
c2b0bb7a52 | ||
|
|
22c2bdbd8a | ||
|
|
6df6d097d9 | ||
|
|
d7c176ab91 | ||
|
|
0ff7d724ff | ||
|
|
46cb7795e1 | ||
|
|
126d8db7f3 | ||
|
|
3f3a24b418 | ||
|
|
96e36c0d90 | ||
|
|
6f8ddbb26b | ||
|
|
b5e7888414 | ||
|
|
eab4d22269 | ||
|
|
5759c2d2d2 | ||
|
|
42b1c2642b | ||
|
|
727d69ddf3 | ||
|
|
f622b0c5fc | ||
|
|
5d0000634c | ||
|
|
676d9845ba | ||
|
|
e37a9b4c01 | ||
|
|
d727aacd04 | ||
|
|
fa69b833cd | ||
|
|
bbbad97686 | ||
|
|
bcf6d55b54 | ||
|
|
810d4f9c22 | ||
|
|
856c047a6c | ||
|
|
79c1e93c00 | ||
|
|
f8b657c967 | ||
|
|
10fefe0d57 | ||
|
|
2f9a68f9e9 | ||
|
|
3980c0217d | ||
|
|
870599f5da | ||
|
|
abf8e8e9c8 | ||
|
|
f3f31a8192 | ||
|
|
9e7ba835da | ||
|
|
347f17b8d1 | ||
|
|
081b9eb423 | ||
|
|
bb867c6fdb | ||
|
|
81f4506a61 | ||
|
|
76925f1284 | ||
|
|
f676231de9 | ||
|
|
af5f7c0a9e | ||
|
|
a6b27d776b | ||
|
|
539741199e | ||
|
|
8f45236d09 | ||
|
|
97013a190c | ||
|
|
c222735c02 | ||
|
|
87d21c7fc0 | ||
|
|
54e05172a0 | ||
|
|
464186e995 | ||
|
|
8c4d5d6c2f | ||
|
|
bc72b14016 | ||
|
|
61086083eb | ||
|
|
62d1f01ab4 | ||
|
|
10e51c5177 | ||
|
|
3e06bde643 | ||
|
|
6be2de8214 | ||
|
|
ebb1b9ec14 | ||
|
|
d126467d5d | ||
|
|
afb4c62fbf | ||
|
|
e790dc435b | ||
|
|
288077c3a3 | ||
|
|
4425c54eda | ||
|
|
778899a5d2 | ||
|
|
4eab60c1e2 | ||
|
|
1af850e6e3 | ||
|
|
9b0c7cc7b9 | ||
|
|
6928630601 | ||
|
|
9896e3627f | ||
|
|
15732f0ea7 | ||
|
|
562c76d7cc | ||
|
|
122c68c151 | ||
|
|
82848a7806 | ||
|
|
39982a954e | ||
|
|
e9f6ea232f | ||
|
|
110eff01a9 | ||
|
|
799e51d419 | ||
|
|
e8fcb29586 | ||
|
|
97d2f05a6d | ||
|
|
8207e55ec7 | ||
|
|
ad16bffc7d | ||
|
|
c1e3ef4bcc | ||
|
|
a3093cd5e5 | ||
|
|
23d4cad1a2 | ||
|
|
86513cb697 | ||
|
|
3490e9590b | ||
|
|
8da09b1e7e | ||
|
|
a60b9adcce | ||
|
|
a16f96658b | ||
|
|
18ab09b431 | ||
|
|
638faeac54 | ||
|
|
dd5eb6337d | ||
|
|
79917cf80b | ||
|
|
cc90a035a0 | ||
|
|
d98dda4676 | ||
|
|
d69ddc1edc | ||
|
|
9bf41969f0 | ||
|
|
0f23b7bff5 | ||
|
|
4e57d2094e | ||
|
|
7f9efd53df | ||
|
|
da70c3222e | ||
|
|
9d902d63ce | ||
|
|
f4f0a4a471 | ||
|
|
3323c1d319 | ||
|
|
f20dc6b698 | ||
|
|
4b2ac1f369 | ||
|
|
8daf47fb3a | ||
|
|
6c980579cd | ||
|
|
5c73c4e2ee | ||
|
|
5daf59cc66 | ||
|
|
0ade9205cc | ||
|
|
06edabdde1 | ||
|
|
8b4e5a82a8 | ||
|
|
3445223311 | ||
|
|
fa6c0127e6 | ||
|
|
97323d1c68 | ||
|
|
458dd1b9d9 | ||
|
|
9d02d1d767 | ||
|
|
1a636fb47a | ||
|
|
0759fface9 | ||
|
|
325b72bc31 | ||
|
|
f01a9a7859 | ||
|
|
9aefd2dfee | ||
|
|
d07e4a1dd3 | ||
|
|
8a257ec00a | ||
|
|
2f4de1acf7 | ||
|
|
ec95c45f70 | ||
|
|
3a88f7eb20 | ||
|
|
0d5da826d4 | ||
|
|
9b795698b8 | ||
|
|
041fb77639 | ||
|
|
8224cce583 | ||
|
|
d18dcd7775 | ||
|
|
5f5ef20131 | ||
|
|
f0a07a353b | ||
|
|
948de6bbd2 | ||
|
|
598b74d42c | ||
|
|
935a48ed1a | ||
|
|
de39e24bf7 | ||
|
|
519b11eba1 | ||
|
|
379fd64fa8 | ||
|
|
59c019a6fb | ||
|
|
fad3bcccb2 | ||
|
|
bd6697ad95 | ||
|
|
f8dc7c9f54 | ||
|
|
4a3741129d | ||
|
|
77ba9404ac | ||
|
|
0aaf6119ec | ||
|
|
f08427c138 | ||
|
|
2dbb000908 | ||
|
|
c980e19995 | ||
|
|
6162374ca9 | ||
|
|
44bdd9a2ef | ||
|
|
db493d6e5e | ||
|
|
75695f16a5 | ||
|
|
a0407d07fa | ||
|
|
9ec733e527 | ||
|
|
5ef04dab52 | ||
|
|
aea316f1e9 | ||
|
|
235ba3df5c | ||
|
|
099a0f18ef | ||
|
|
fff696ee31 | ||
|
|
2e3ce6eab3 | ||
|
|
9e2003f88a | ||
|
|
42e1d49fbe | ||
|
|
814630ca60 | ||
|
|
87cf187774 | ||
|
|
6ddd8862cd | ||
|
|
f1373193dc | ||
|
|
8a4b77f9da | ||
|
|
5f53fe7884 | ||
|
|
7ab4ca0e7f | ||
|
|
e36f389e82 | ||
|
|
c61023f554 | ||
|
|
d25535c3f3 | ||
|
|
c323161f24 | ||
|
|
255579aaa7 | ||
|
|
f7102ba826 | ||
|
|
cefabd79a8 | ||
|
|
df70249520 | ||
|
|
77eb2ca619 | ||
|
|
ee25219edd | ||
|
|
b1fccabb34 | ||
|
|
a6355329bf | ||
|
|
0398b24b42 | ||
|
|
75b1dddf91 | ||
|
|
e1e80ffc3e | ||
|
|
71896485fd | ||
|
|
ef00199fb4 | ||
|
|
8f4a008139 | ||
|
|
d8cc798c2b | ||
|
|
6582f6da5c | ||
|
|
0334ffa625 | ||
|
|
d11fbd2c60 | ||
|
|
6a7c3f188e | ||
|
|
427e2c962a | ||
|
|
27db7f806f | ||
|
|
3590fbfa76 | ||
|
|
cd0094f772 | ||
|
|
06bc8e6712 | ||
|
|
fc5f9bb448 | ||
|
|
a0740f7ef7 | ||
|
|
a0923cbdd0 | ||
|
|
f92e362b2e | ||
|
|
aa23d8ecd2 | ||
|
|
7b62c41060 | ||
|
|
26acab64b7 | ||
|
|
e0f03790b1 | ||
|
|
3ab842b0f5 | ||
|
|
b8e8ef8929 | ||
|
|
465d124183 | ||
|
|
d310e56fa3 | ||
|
|
a1ca428c90 | ||
|
|
16750865d1 | ||
|
|
f3b476c592 | ||
|
|
5267d31d56 | ||
|
|
b44f56319f | ||
|
|
0209c268bb | ||
|
|
912d984346 | ||
|
|
aae6ecbaff | ||
|
|
64737330a4 | ||
|
|
2eda97f1c3 | ||
|
|
66831dcf70 | ||
|
|
1044b0419a | ||
|
|
771d9280ec | ||
|
|
862bc0a3bf | ||
|
|
c01608b6a1 | ||
|
|
199c41e16e | ||
|
|
3b3bf6c217 | ||
|
|
f52c21f457 | ||
|
|
b5d0f72f16 | ||
|
|
148a1be0a3 | ||
|
|
d6dd430abd | ||
|
|
ae78112c50 | ||
|
|
01cf7445f3 | ||
|
|
31085d5e53 | ||
|
|
c42e9d244f | ||
|
|
e98b5e8b4e | ||
|
|
68e00c7c36 | ||
|
|
4f138a1749 | ||
|
|
03bf241c33 | ||
|
|
a887406c24 | ||
|
|
d51e95ba7e | ||
|
|
3d01f2aa34 | ||
|
|
634c416645 | ||
|
|
57de86cc61 | ||
|
|
12719b6e87 | ||
|
|
a077d996e3 | ||
|
|
c23d5095de | ||
|
|
7601f0e93e | ||
|
|
aad3f03890 | ||
|
|
55d0b6e8b9 | ||
|
|
38eac40d56 | ||
|
|
80f3f1bc25 | ||
|
|
b1a0db547b | ||
|
|
75d7b5f926 | ||
|
|
349d814814 | ||
|
|
c8743031e0 | ||
|
|
4adb9cf4bb | ||
|
|
74f475e735 | ||
|
|
875cecba74 | ||
|
|
7d411a4686 | ||
|
|
02a2401596 | ||
|
|
e4b488a7b5 | ||
|
|
98079ddd79 | ||
|
|
d70942f47b | ||
|
|
58e4701557 | ||
|
|
dbf47ee55a | ||
|
|
af7ea6e96e | ||
|
|
8f1e0140e7 | ||
|
|
35c3c9e3c2 | ||
|
|
d06acbcb19 | ||
|
|
9667c2282f | ||
|
|
a937a68317 | ||
|
|
2185112d84 | ||
|
|
91926601dc | ||
|
|
361d6c16c2 | ||
|
|
7e2496e88e | ||
|
|
5b84e29882 | ||
|
|
7cc2a653f2 | ||
|
|
2584940016 | ||
|
|
c6d4c0c7f2 | ||
|
|
1ef4241727 | ||
|
|
68fafd3002 | ||
|
|
2b2cda7a2b | ||
|
|
3cfe9fe146 | ||
|
|
a23b559b4c | ||
|
|
33ee7168ba | ||
|
|
34d0c55ea5 | ||
|
|
53a5a9e9ae | ||
|
|
e30e08a7d6 | ||
|
|
12e2b3514a | ||
|
|
626af2d809 | ||
|
|
76912c062a | ||
|
|
6c3faafed2 | ||
|
|
e51dead636 | ||
|
|
d087e46bd1 | ||
|
|
37f6f3af24 | ||
|
|
e1bdc23dd2 | ||
|
|
2e78653ff9 | ||
|
|
f5f74e12c1 | ||
|
|
18fdcc94e5 | ||
|
|
7ad036992f | ||
|
|
172b5924af | ||
|
|
8852220f59 | ||
|
|
7325791599 | ||
|
|
522c11a763 | ||
|
|
0fadeffaee | ||
|
|
49a9c9ba6a | ||
|
|
1c094038bc | ||
|
|
a013693f80 | ||
|
|
f6a016f49d | ||
|
|
45c4739374 | ||
|
|
2dd029de12 | ||
|
|
903b1fc97f | ||
|
|
89eb795293 | ||
|
|
7e3ea813c1 | ||
|
|
7b95087b9d | ||
|
|
971d62595a | ||
|
|
ffbe8e076d | ||
|
|
2c639431b1 | ||
|
|
aacd1cb394 | ||
|
|
e3731fb160 | ||
|
|
8dbc9e7b68 | ||
|
|
abe67acf8a | ||
|
|
4ff8a691bc | ||
|
|
1b308e1d2a | ||
|
|
bd6c1d6b49 | ||
|
|
3af5d3b738 | ||
|
|
7730895158 | ||
|
|
de9ecfd01c | ||
|
|
95fdd8d619 | ||
|
|
9f7822851c | ||
|
|
9b2035d194 | ||
|
|
93d45d7a04 | ||
|
|
709f842457 | ||
|
|
2dfb74410d | ||
|
|
1eb5e75972 | ||
|
|
3475d915cb | ||
|
|
48e78e9be1 | ||
|
|
a838421ea3 | ||
|
|
1c4e85b4df | ||
|
|
dac4f17fea | ||
|
|
56b8fb024c | ||
|
|
b95693056c | ||
|
|
c34fc64688 | ||
|
|
7cf6f18c1f | ||
|
|
bbbb6b2a01 | ||
|
|
76f88caf43 | ||
|
|
2bccf8c624 | ||
|
|
0c5e5f6630 | ||
|
|
d475d1f081 | ||
|
|
d2f334c1f7 | ||
|
|
603ceefaa6 | ||
|
|
e082d60a24 | ||
|
|
5dae738067 | ||
|
|
0c78723174 | ||
|
|
5a41d69b2a | ||
|
|
c146a138e3 | ||
|
|
31b8c6a214 | ||
|
|
9191dfaf05 | ||
|
|
1108d8b34e | ||
|
|
7837a5bc7e | ||
|
|
0a844f8e96 | ||
|
|
a03223b86f | ||
|
|
0cf7794b16 | ||
|
|
854d40edc5 | ||
|
|
84a2cedf18 | ||
|
|
3f30836734 | ||
|
|
cc9555aff0 | ||
|
|
20aee96706 | ||
|
|
18b5958d46 | ||
|
|
5317202c38 | ||
|
|
d771043e88 | ||
|
|
f8f1071818 | ||
|
|
d3e0a0dee4 | ||
|
|
554172759c | ||
|
|
5b6a8e6001 | ||
|
|
467bbc0dd5 | ||
|
|
6d9f9323c5 | ||
|
|
0c2489605d | ||
|
|
8b1b89a984 | ||
|
|
47e272c35a | ||
|
|
417a81fda3 | ||
|
|
dba62ff3a5 | ||
|
|
d70e935526 | ||
|
|
5c1063df7f | ||
|
|
cb485b2019 | ||
|
|
b2af50960f | ||
|
|
eac5b8bfbd | ||
|
|
604e43b28d | ||
|
|
53985b3c4d | ||
|
|
b6e02cbbd2 | ||
|
|
91935631ac | ||
|
|
8de30b568a | ||
|
|
485da9fd35 | ||
|
|
0796d79d19 | ||
|
|
92981ae3f2 | ||
|
|
8ed1adf3db | ||
|
|
440a3823a6 | ||
|
|
718961de68 | ||
|
|
330f62a7fa | ||
|
|
584e2d646f | ||
|
|
1fd4cb87b2 | ||
|
|
4aba2e8b72 | ||
|
|
2f36d769aa | ||
|
|
399eacf486 | ||
|
|
231cc878cb | ||
|
|
aa676b313f | ||
|
|
dd0ed0ef17 | ||
|
|
d5649821ae | ||
|
|
4cea757e70 | ||
|
|
a751bc159c | ||
|
|
5d31242fbf | ||
|
|
d7fd72193f | ||
|
|
72ff5b9d8c | ||
|
|
ce29f695b4 | ||
|
|
12b174b10e | ||
|
|
333203d871 | ||
|
|
c114987523 | ||
|
|
b48083f33f | ||
|
|
482bec824f | ||
|
|
684a9a8c5a | ||
|
|
54a76d3773 | ||
|
|
8a75d8b015 | ||
|
|
f206357412 | ||
|
|
8224cd9063 | ||
|
|
6286d9a3a5 | ||
|
|
3a9e8e9fd4 | ||
|
|
cb1cb06478 | ||
|
|
2d5e066c8c | ||
|
|
15968714bd | ||
|
|
8bf38552de | ||
|
|
b13fbad0fe | ||
|
|
f560bd077f | ||
|
|
4372d0bfef | ||
|
|
31361c4d3c | ||
|
|
59241c5bee | ||
|
|
2a9b61f099 | ||
|
|
6df4208836 | ||
|
|
9d615cdaa0 | ||
|
|
6a818b8a09 | ||
|
|
2aaf29acb5 | ||
|
|
a42f826acb | ||
|
|
e10a3533a5 | ||
|
|
91ec3ddbeb | ||
|
|
755ac3b069 | ||
|
|
60b8973559 | ||
|
|
d2ef679d42 | ||
|
|
d4e0da0890 | ||
|
|
565b802a6b | ||
|
|
6c79e6c09a | ||
|
|
780762f9d2 | ||
|
|
30fcc71983 | ||
|
|
3501a4bdf9 | ||
|
|
73a0cafc1e | ||
|
|
e309c80474 | ||
|
|
a4a53692f8 | ||
|
|
536c987c39 | ||
|
|
a534d4e9e1 | ||
|
|
74586aa9df | ||
|
|
8c74f5ddfd | ||
|
|
bddfa2100f |
4
.gitattributes
vendored
@@ -15,8 +15,12 @@ ml/backend/**/*.cu linguist-vendored
|
||||
ml/backend/**/*.cuh linguist-vendored
|
||||
ml/backend/**/*.m linguist-vendored
|
||||
ml/backend/**/*.metal linguist-vendored
|
||||
ml/backend/**/*.comp linguist-vendored
|
||||
ml/backend/**/*.glsl linguist-vendored
|
||||
ml/backend/**/CMakeLists.txt linguist-vendored
|
||||
|
||||
app/webview linguist-vendored
|
||||
|
||||
llama/build-info.cpp linguist-generated
|
||||
ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.s linguist-generated
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/10_bug_report.yml
vendored
@@ -13,7 +13,7 @@ body:
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: Please copy and paste any relevant log output. See [Troubleshooting Guide](https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md#how-to-troubleshoot-issues) for details.
|
||||
description: Please copy and paste any relevant log output. See [Troubleshooting Guide](https://github.com/ollama/ollama/blob/main/docs/troubleshooting.mdx#how-to-troubleshoot-issues) for details.
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
|
||||
140
.github/workflows/release.yaml
vendored
@@ -16,16 +16,18 @@ jobs:
|
||||
outputs:
|
||||
GOFLAGS: ${{ steps.goflags.outputs.GOFLAGS }}
|
||||
VERSION: ${{ steps.goflags.outputs.VERSION }}
|
||||
vendorsha: ${{ steps.changes.outputs.vendorsha }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set environment
|
||||
id: goflags
|
||||
run: |
|
||||
echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${GITHUB_REF_NAME#v}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_OUTPUT
|
||||
echo VERSION="${GITHUB_REF_NAME#v}" >>$GITHUB_OUTPUT
|
||||
echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${GITHUB_REF_NAME#v}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" | tee -a $GITHUB_OUTPUT
|
||||
echo VERSION="${GITHUB_REF_NAME#v}" | tee -a $GITHUB_OUTPUT
|
||||
echo vendorsha=$(make -f Makefile.sync print-base) | tee -a $GITHUB_OUTPUT
|
||||
|
||||
darwin-build:
|
||||
runs-on: macos-14-xlarge
|
||||
runs-on: macos-26-xlarge
|
||||
environment: release
|
||||
needs: setup-environment
|
||||
env:
|
||||
@@ -53,6 +55,9 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
Makefile.sync
|
||||
- run: |
|
||||
./scripts/build_darwin.sh
|
||||
- name: Log build results
|
||||
@@ -63,6 +68,7 @@ jobs:
|
||||
name: bundles-darwin
|
||||
path: |
|
||||
dist/*.tgz
|
||||
dist/*.tar.zst
|
||||
dist/*.zip
|
||||
dist/*.dmg
|
||||
|
||||
@@ -104,6 +110,32 @@ jobs:
|
||||
install: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q4-WinSvr2022-For-HIP.exe
|
||||
rocm-version: '6.2'
|
||||
flags: '-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma" -DCMAKE_CXX_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma"'
|
||||
runner_dir: 'rocm'
|
||||
- os: windows
|
||||
arch: amd64
|
||||
preset: Vulkan
|
||||
install: https://sdk.lunarg.com/sdk/download/1.4.321.1/windows/vulkansdk-windows-X64-1.4.321.1.exe
|
||||
flags: ''
|
||||
runner_dir: 'vulkan'
|
||||
- os: windows
|
||||
arch: amd64
|
||||
preset: 'MLX CUDA 13'
|
||||
install: https://developer.download.nvidia.com/compute/cuda/13.0.0/local_installers/cuda_13.0.0_windows.exe
|
||||
cudnn-install: https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/windows-x86_64/cudnn-windows-x86_64-9.18.1.3_cuda13-archive.zip
|
||||
cuda-components:
|
||||
- '"cudart"'
|
||||
- '"nvcc"'
|
||||
- '"cublas"'
|
||||
- '"cublas_dev"'
|
||||
- '"cufft"'
|
||||
- '"cufft_dev"'
|
||||
- '"nvrtc"'
|
||||
- '"nvrtc_dev"'
|
||||
- '"crt"'
|
||||
- '"nvvm"'
|
||||
- '"nvptxcompiler"'
|
||||
cuda-version: '13.0'
|
||||
flags: ''
|
||||
runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }}
|
||||
environment: release
|
||||
env:
|
||||
@@ -112,16 +144,20 @@ jobs:
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
choco install -y --no-progress ccache ninja
|
||||
ccache -o cache_dir=${{ github.workspace }}\.ccache
|
||||
- if: startsWith(matrix.preset, 'CUDA ') || startsWith(matrix.preset, 'ROCm ')
|
||||
if (Get-Command ccache -ErrorAction SilentlyContinue) {
|
||||
ccache -o cache_dir=${{ github.workspace }}\.ccache
|
||||
}
|
||||
- if: startsWith(matrix.preset, 'CUDA ') || startsWith(matrix.preset, 'ROCm ') || startsWith(matrix.preset, 'Vulkan') || startsWith(matrix.preset, 'MLX ')
|
||||
id: cache-install
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA
|
||||
C:\Program Files\AMD\ROCm
|
||||
key: ${{ matrix.install }}
|
||||
- if: startsWith(matrix.preset, 'CUDA ')
|
||||
C:\VulkanSDK
|
||||
C:\Program Files\NVIDIA\CUDNN
|
||||
key: ${{ matrix.install }}-${{ matrix.cudnn-install }}
|
||||
- if: startsWith(matrix.preset, 'CUDA ') || startsWith(matrix.preset, 'MLX ')
|
||||
name: Install CUDA ${{ matrix.cuda-version }}
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
@@ -149,29 +185,60 @@ jobs:
|
||||
echo "HIPCXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "HIP_PLATFORM=amd" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "CMAKE_PREFIX_PATH=$hipPath" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
- if: matrix.preset == 'Vulkan'
|
||||
name: Install Vulkan ${{ matrix.rocm-version }}
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
||||
Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe"
|
||||
Start-Process -FilePath .\install.exe -ArgumentList "-c","--am","--al","in" -NoNewWindow -Wait
|
||||
}
|
||||
|
||||
$vulkanPath = (Resolve-Path "C:\VulkanSDK\*").path
|
||||
echo "$vulkanPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "VULKAN_SDK=$vulkanPath" >> $env:GITHUB_ENV
|
||||
- if: matrix.preset == 'CPU'
|
||||
run: |
|
||||
echo "CC=clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "CXX=clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
- if: startsWith(matrix.preset, 'MLX ')
|
||||
name: Install cuDNN for MLX
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
$cudnnRoot = "C:\Program Files\NVIDIA\CUDNN"
|
||||
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
||||
Invoke-WebRequest -Uri "${{ matrix.cudnn-install }}" -OutFile "cudnn.zip"
|
||||
Expand-Archive -Path cudnn.zip -DestinationPath cudnn-extracted
|
||||
$cudnnDir = (Get-ChildItem -Path cudnn-extracted -Directory)[0].FullName
|
||||
New-Item -ItemType Directory -Force -Path $cudnnRoot
|
||||
Copy-Item -Path "$cudnnDir\*" -Destination "$cudnnRoot\" -Recurse
|
||||
}
|
||||
|
||||
echo "CUDNN_ROOT_DIR=$cudnnRoot" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "CUDNN_INCLUDE_PATH=$cudnnRoot\include" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "CUDNN_LIBRARY_PATH=$cudnnRoot\lib\x64" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "$cudnnRoot\bin\x64" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- if: ${{ !cancelled() && steps.cache-install.outputs.cache-hit != 'true' }}
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA
|
||||
C:\Program Files\AMD\ROCm
|
||||
key: ${{ matrix.install }}
|
||||
C:\VulkanSDK
|
||||
C:\Program Files\NVIDIA\CUDNN
|
||||
key: ${{ matrix.install }}-${{ matrix.cudnn-install }}
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}\.ccache
|
||||
key: ccache-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.preset }}
|
||||
key: ccache-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.preset }}-${{ needs.setup-environment.outputs.vendorsha }}
|
||||
- name: Build target "${{ matrix.preset }}"
|
||||
run: |
|
||||
Import-Module 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -VsInstallPath 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
cmake --preset "${{ matrix.preset }}" ${{ matrix.flags }} --install-prefix "$((pwd).Path)\dist\${{ matrix.os }}-${{ matrix.arch }}"
|
||||
cmake --build --parallel ([Environment]::ProcessorCount) --preset "${{ matrix.preset }}"
|
||||
cmake --install build --component "${{ startsWith(matrix.preset, 'CUDA ') && 'CUDA' || startsWith(matrix.preset, 'ROCm ') && 'HIP' || 'CPU' }}" --strip
|
||||
cmake --install build --component "${{ startsWith(matrix.preset, 'MLX ') && 'MLX' || startsWith(matrix.preset, 'CUDA ') && 'CUDA' || startsWith(matrix.preset, 'ROCm ') && 'HIP' || startsWith(matrix.preset, 'Vulkan') && 'Vulkan' || 'CPU' }}" --strip
|
||||
Remove-Item -Path dist\lib\ollama\rocm\rocblas\library\*gfx906* -ErrorAction SilentlyContinue
|
||||
env:
|
||||
CMAKE_GENERATOR: Ninja
|
||||
@@ -228,6 +295,9 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
Makefile.sync
|
||||
- name: Verify gcc is actually clang
|
||||
run: |
|
||||
$ErrorActionPreference='Continue'
|
||||
@@ -281,6 +351,9 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
Makefile.sync
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: depends-windows*
|
||||
@@ -304,6 +377,7 @@ jobs:
|
||||
name: bundles-windows
|
||||
path: |
|
||||
dist/*.zip
|
||||
dist/*.ps1
|
||||
dist/OllamaSetup.exe
|
||||
|
||||
linux-build:
|
||||
@@ -312,13 +386,13 @@ jobs:
|
||||
include:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
target: archive_novulkan
|
||||
target: archive
|
||||
- os: linux
|
||||
arch: amd64
|
||||
target: rocm
|
||||
- os: linux
|
||||
arch: arm64
|
||||
target: archive_novulkan
|
||||
target: archive
|
||||
runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }}
|
||||
environment: release
|
||||
needs: setup-environment
|
||||
@@ -339,12 +413,18 @@ jobs:
|
||||
outputs: type=local,dest=dist/${{ matrix.os }}-${{ matrix.arch }}
|
||||
cache-from: type=registry,ref=${{ vars.DOCKER_REPO }}:latest
|
||||
cache-to: type=inline
|
||||
- name: Deduplicate CUDA libraries
|
||||
run: |
|
||||
./scripts/deduplicate_cuda_libs.sh dist/${{ matrix.os }}-${{ matrix.arch }}
|
||||
- run: |
|
||||
for COMPONENT in bin/* lib/ollama/*; do
|
||||
case "$COMPONENT" in
|
||||
bin/ollama) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||
bin/ollama*) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||
lib/ollama/*.so*) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||
lib/ollama/cuda_v*) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||
lib/ollama/vulkan*) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||
lib/ollama/mlx*) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||
lib/ollama/include*) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||
lib/ollama/cuda_jetpack5) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack5.tar.in ;;
|
||||
lib/ollama/cuda_jetpack6) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack6.tar.in ;;
|
||||
lib/ollama/rocm) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-rocm.tar.in ;;
|
||||
@@ -359,13 +439,13 @@ jobs:
|
||||
done
|
||||
- run: |
|
||||
for ARCHIVE in dist/${{ matrix.os }}-${{ matrix.arch }}/*.tar.in; do
|
||||
tar c -C dist/${{ matrix.os }}-${{ matrix.arch }} -T $ARCHIVE --owner 0 --group 0 | pigz -9vc >$(basename ${ARCHIVE//.*/}.tgz);
|
||||
tar c -C dist/${{ matrix.os }}-${{ matrix.arch }} -T $ARCHIVE --owner 0 --group 0 | zstd --ultra -22 -T0 >$(basename ${ARCHIVE//.*/}.tar.zst);
|
||||
done
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bundles-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.target }}
|
||||
path: |
|
||||
*.tgz
|
||||
*.tar.zst
|
||||
|
||||
# Build each Docker variant (OS, arch, and flavor) separately. Using QEMU is unreliable and slower.
|
||||
docker-build-push:
|
||||
@@ -374,14 +454,12 @@ jobs:
|
||||
include:
|
||||
- os: linux
|
||||
arch: arm64
|
||||
target: novulkan
|
||||
build-args: |
|
||||
CGO_CFLAGS
|
||||
CGO_CXXFLAGS
|
||||
GOFLAGS
|
||||
- os: linux
|
||||
arch: amd64
|
||||
target: novulkan
|
||||
build-args: |
|
||||
CGO_CFLAGS
|
||||
CGO_CXXFLAGS
|
||||
@@ -394,14 +472,6 @@ jobs:
|
||||
CGO_CXXFLAGS
|
||||
GOFLAGS
|
||||
FLAVOR=rocm
|
||||
- os: linux
|
||||
arch: amd64
|
||||
suffix: '-vulkan'
|
||||
target: default
|
||||
build-args: |
|
||||
CGO_CFLAGS
|
||||
CGO_CXXFLAGS
|
||||
GOFLAGS
|
||||
runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }}
|
||||
environment: release
|
||||
needs: setup-environment
|
||||
@@ -419,7 +489,6 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
platforms: ${{ matrix.os }}/${{ matrix.arch }}
|
||||
target: ${{ matrix.preset }}
|
||||
build-args: ${{ matrix.build-args }}
|
||||
outputs: type=image,name=${{ vars.DOCKER_REPO }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: type=registry,ref=${{ vars.DOCKER_REPO }}:latest
|
||||
@@ -487,6 +556,9 @@ jobs:
|
||||
- name: Log dist contents
|
||||
run: |
|
||||
ls -l dist/
|
||||
- name: Copy install scripts to dist
|
||||
run: |
|
||||
cp scripts/install.sh dist/install.sh
|
||||
- name: Generate checksum file
|
||||
run: find . -type f -not -name 'sha256sum.txt' | xargs sha256sum | tee sha256sum.txt
|
||||
working-directory: dist
|
||||
@@ -509,14 +581,22 @@ jobs:
|
||||
- name: Upload release artifacts
|
||||
run: |
|
||||
pids=()
|
||||
for payload in dist/*.txt dist/*.zip dist/*.tgz dist/*.exe dist/*.dmg ; do
|
||||
for payload in dist/*.txt dist/*.zip dist/*.tgz dist/*.tar.zst dist/*.exe dist/*.dmg dist/*.ps1 dist/*.sh ; do
|
||||
echo "Uploading $payload"
|
||||
gh release upload ${GITHUB_REF_NAME} $payload --clobber &
|
||||
pids[$!]=$!
|
||||
pids+=($!)
|
||||
sleep 1
|
||||
done
|
||||
echo "Waiting for uploads to complete"
|
||||
for pid in "${pids[*]}"; do
|
||||
wait $pid
|
||||
failed=0
|
||||
for pid in "${pids[@]}"; do
|
||||
if ! wait $pid; then
|
||||
echo "::error::Upload failed (pid $pid)"
|
||||
failed=1
|
||||
fi
|
||||
done
|
||||
if [ $failed -ne 0 ]; then
|
||||
echo "One or more uploads failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "done"
|
||||
|
||||
22
.github/workflows/test-install.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: test-install
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'scripts/install.sh'
|
||||
- '.github/workflows/test-install.yaml'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run install script
|
||||
run: sh ./scripts/install.sh
|
||||
env:
|
||||
OLLAMA_NO_START: 1 # do not start app
|
||||
- name: Verify ollama is available
|
||||
run: ollama --version
|
||||
92
.github/workflows/test.yaml
vendored
@@ -22,6 +22,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
changed: ${{ steps.changes.outputs.changed }}
|
||||
vendorsha: ${{ steps.changes.outputs.vendorsha }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -36,7 +37,8 @@ jobs:
|
||||
| xargs python3 -c "import sys; from pathlib import Path; print(any(Path(x).match(glob) for x in sys.argv[1:] for glob in '$*'.split(' ')))"
|
||||
}
|
||||
|
||||
echo changed=$(changed 'llama/llama.cpp/**/*' 'ml/backend/ggml/ggml/**/*') | tee -a $GITHUB_OUTPUT
|
||||
echo changed=$(changed 'llama/llama.cpp/**/*' 'ml/backend/ggml/ggml/**/*' '.github/**/*') | tee -a $GITHUB_OUTPUT
|
||||
echo vendorsha=$(make -f Makefile.sync print-base) | tee -a $GITHUB_OUTPUT
|
||||
|
||||
linux:
|
||||
needs: [changes]
|
||||
@@ -49,7 +51,7 @@ jobs:
|
||||
container: nvidia/cuda:13.0.0-devel-ubuntu22.04
|
||||
flags: '-DCMAKE_CUDA_ARCHITECTURES=87'
|
||||
- preset: ROCm
|
||||
container: rocm/dev-ubuntu-22.04:6.1.2
|
||||
container: rocm/dev-ubuntu-22.04:7.2.1
|
||||
extra-packages: rocm-libs
|
||||
flags: '-DAMDGPU_TARGETS=gfx1010 -DCMAKE_PREFIX_PATH=/opt/rocm'
|
||||
- preset: Vulkan
|
||||
@@ -58,6 +60,11 @@ jobs:
|
||||
mesa-vulkan-drivers vulkan-tools
|
||||
libvulkan1 libvulkan-dev
|
||||
vulkan-sdk cmake ccache g++ make
|
||||
- preset: 'MLX CUDA 13'
|
||||
container: nvidia/cuda:13.0.0-devel-ubuntu22.04
|
||||
extra-packages: libcudnn9-dev-cuda-13 libopenblas-dev liblapack-dev liblapacke-dev git curl
|
||||
flags: '-DCMAKE_CUDA_ARCHITECTURES=87 -DBLAS_INCLUDE_DIRS=/usr/include/x86_64-linux-gnu -DLAPACK_INCLUDE_DIRS=/usr/include/x86_64-linux-gnu'
|
||||
install-go: true
|
||||
runs-on: linux
|
||||
container: ${{ matrix.container }}
|
||||
steps:
|
||||
@@ -74,19 +81,29 @@ jobs:
|
||||
$sudo apt-get update
|
||||
fi
|
||||
$sudo apt-get install -y cmake ccache ${{ matrix.extra-packages }}
|
||||
# MLX requires CMake 3.25+, install from official releases
|
||||
if [ "${{ matrix.preset }}" = "MLX CUDA 13" ]; then
|
||||
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.31.2/cmake-3.31.2-linux-$(uname -m).tar.gz | $sudo tar xz -C /usr/local --strip-components 1
|
||||
fi
|
||||
# Export VULKAN_SDK if provided by LunarG package (defensive)
|
||||
if [ -d "/usr/lib/x86_64-linux-gnu/vulkan" ] && [ "${{ matrix.preset }}" = "Vulkan" ]; then
|
||||
echo "VULKAN_SDK=/usr" >> $GITHUB_ENV
|
||||
fi
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- if: matrix.install-go
|
||||
name: Install Go
|
||||
run: |
|
||||
GO_VERSION=$(awk '/^go / { print $2 }' go.mod)
|
||||
curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-$(dpkg --print-architecture).tar.gz" | tar xz -C /usr/local
|
||||
echo "/usr/local/go/bin" >> $GITHUB_PATH
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: /github/home/.cache/ccache
|
||||
key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }}
|
||||
key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }}-${{ needs.changes.outputs.vendorsha }}
|
||||
- run: |
|
||||
cmake --preset ${{ matrix.preset }} ${{ matrix.flags }}
|
||||
cmake --build --preset ${{ matrix.preset }} --parallel
|
||||
cmake --preset "${{ matrix.preset }}" ${{ matrix.flags }}
|
||||
cmake --build --preset "${{ matrix.preset }}" --parallel
|
||||
|
||||
windows:
|
||||
needs: [changes]
|
||||
@@ -112,12 +129,31 @@ jobs:
|
||||
flags: '-DAMDGPU_TARGETS=gfx1010 -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma" -DCMAKE_CXX_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma"'
|
||||
- preset: Vulkan
|
||||
install: https://sdk.lunarg.com/sdk/download/1.4.321.1/windows/vulkansdk-windows-X64-1.4.321.1.exe
|
||||
- preset: 'MLX CUDA 13'
|
||||
install: https://developer.download.nvidia.com/compute/cuda/13.0.0/local_installers/cuda_13.0.0_windows.exe
|
||||
cudnn-install: https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/windows-x86_64/cudnn-windows-x86_64-9.18.1.3_cuda13-archive.zip
|
||||
flags: '-DCMAKE_CUDA_ARCHITECTURES=80'
|
||||
cuda-components:
|
||||
- '"cudart"'
|
||||
- '"nvcc"'
|
||||
- '"cublas"'
|
||||
- '"cublas_dev"'
|
||||
- '"cufft"'
|
||||
- '"cufft_dev"'
|
||||
- '"nvrtc"'
|
||||
- '"nvrtc_dev"'
|
||||
- '"crt"'
|
||||
- '"nvvm"'
|
||||
- '"nvptxcompiler"'
|
||||
cuda-version: '13.0'
|
||||
runs-on: windows
|
||||
steps:
|
||||
- run: |
|
||||
choco install -y --no-progress ccache ninja
|
||||
ccache -o cache_dir=${{ github.workspace }}\.ccache
|
||||
- if: matrix.preset == 'CUDA' || matrix.preset == 'ROCm' || matrix.preset == 'Vulkan'
|
||||
if (Get-Command ccache -ErrorAction SilentlyContinue) {
|
||||
ccache -o cache_dir=${{ github.workspace }}\.ccache
|
||||
}
|
||||
- if: matrix.preset == 'CUDA' || matrix.preset == 'ROCm' || matrix.preset == 'Vulkan' || matrix.preset == 'MLX CUDA 13'
|
||||
id: cache-install
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
@@ -125,8 +161,9 @@ jobs:
|
||||
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA
|
||||
C:\Program Files\AMD\ROCm
|
||||
C:\VulkanSDK
|
||||
key: ${{ matrix.install }}
|
||||
- if: matrix.preset == 'CUDA'
|
||||
C:\Program Files\NVIDIA\CUDNN
|
||||
key: ${{ matrix.install }}-${{ matrix.cudnn-install }}
|
||||
- if: matrix.preset == 'CUDA' || matrix.preset == 'MLX CUDA 13'
|
||||
name: Install CUDA ${{ matrix.cuda-version }}
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
@@ -162,22 +199,41 @@ jobs:
|
||||
Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe"
|
||||
Start-Process -FilePath .\install.exe -ArgumentList "-c","--am","--al","in" -NoNewWindow -Wait
|
||||
}
|
||||
|
||||
|
||||
$vulkanPath = (Resolve-Path "C:\VulkanSDK\*").path
|
||||
echo "$vulkanPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "VULKAN_SDK=$vulkanPath" >> $env:GITHUB_ENV
|
||||
- if: matrix.preset == 'MLX CUDA 13'
|
||||
name: Install cuDNN for MLX
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
$cudnnRoot = "C:\Program Files\NVIDIA\CUDNN"
|
||||
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
||||
Invoke-WebRequest -Uri "${{ matrix.cudnn-install }}" -OutFile "cudnn.zip"
|
||||
Expand-Archive -Path cudnn.zip -DestinationPath cudnn-extracted
|
||||
$cudnnDir = (Get-ChildItem -Path cudnn-extracted -Directory)[0].FullName
|
||||
New-Item -ItemType Directory -Force -Path $cudnnRoot
|
||||
Copy-Item -Path "$cudnnDir\*" -Destination "$cudnnRoot\" -Recurse
|
||||
}
|
||||
|
||||
echo "CUDNN_ROOT_DIR=$cudnnRoot" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "CUDNN_INCLUDE_PATH=$cudnnRoot\include" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "CUDNN_LIBRARY_PATH=$cudnnRoot\lib\x64" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
echo "$cudnnRoot\bin\x64" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- if: ${{ !cancelled() && steps.cache-install.outputs.cache-hit != 'true' }}
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA
|
||||
C:\Program Files\AMD\ROCm
|
||||
key: ${{ matrix.install }}
|
||||
C:\VulkanSDK
|
||||
C:\Program Files\NVIDIA\CUDNN
|
||||
key: ${{ matrix.install }}-${{ matrix.cudnn-install }}
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}\.ccache
|
||||
key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }}
|
||||
key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }}-${{ needs.changes.outputs.vendorsha }}
|
||||
- run: |
|
||||
Import-Module 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -VsInstallPath 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
@@ -205,6 +261,9 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
Makefile.sync
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
@@ -225,12 +284,9 @@ jobs:
|
||||
if: always()
|
||||
run: go test -count=1 -benchtime=1x ./...
|
||||
|
||||
# TODO(bmizerany): replace this heavy tool with just the
|
||||
# tools/checks/binaries we want and then make them all run in parallel
|
||||
# across jobs, not on a single tiny vm on Github Actions.
|
||||
- uses: golangci/golangci-lint-action@v6
|
||||
- uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
args: --timeout 10m0s -v
|
||||
only-new-issues: true
|
||||
|
||||
patches:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -239,4 +295,4 @@ jobs:
|
||||
- name: Verify patches apply cleanly and do not change files
|
||||
run: |
|
||||
make -f Makefile.sync clean checkout apply-patches sync
|
||||
git diff --compact-summary --exit-code
|
||||
git diff --compact-summary --exit-code
|
||||
|
||||
1
.gitignore
vendored
@@ -15,3 +15,4 @@ __debug_bin*
|
||||
llama/build
|
||||
llama/vendor
|
||||
/ollama
|
||||
integration/testdata/models/
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
run:
|
||||
timeout: 5m
|
||||
version: "2"
|
||||
linters:
|
||||
enable:
|
||||
- asasalint
|
||||
@@ -7,35 +6,46 @@ linters:
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- gocheckcompilerdirectives
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- intrange
|
||||
- makezero
|
||||
- misspell
|
||||
- nilerr
|
||||
- nolintlint
|
||||
- nosprintfhostport
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- usetesting
|
||||
- wastedassign
|
||||
- whitespace
|
||||
disable:
|
||||
- usestdlibvars
|
||||
- errcheck
|
||||
linters-settings:
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -SA1019 # omit Deprecated check
|
||||
- usestdlibvars
|
||||
settings:
|
||||
govet:
|
||||
disable:
|
||||
- unusedresult
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -QF* # disable quick fix suggestions
|
||||
- -SA1019
|
||||
- -ST1000 # package comment format
|
||||
- -ST1003 # underscores in package names
|
||||
- -ST1005 # error strings should not be capitalized
|
||||
- -ST1012 # error var naming (ErrFoo)
|
||||
- -ST1016 # receiver name consistency
|
||||
- -ST1020 # comment on exported function format
|
||||
- -ST1021 # comment on exported type format
|
||||
- -ST1022 # comment on exported var format
|
||||
- -ST1023 # omit type from declaration
|
||||
severity:
|
||||
default-severity: error
|
||||
default: error
|
||||
rules:
|
||||
- linters:
|
||||
- gofmt
|
||||
- goimports
|
||||
- intrange
|
||||
severity: info
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- gofumpt
|
||||
|
||||
245
CMakeLists.txt
@@ -2,6 +2,22 @@ cmake_minimum_required(VERSION 3.21)
|
||||
|
||||
project(Ollama C CXX)
|
||||
|
||||
# Handle cross-compilation on macOS: when CMAKE_OSX_ARCHITECTURES is set to a
|
||||
# single architecture different from the host, override CMAKE_SYSTEM_PROCESSOR
|
||||
# to match. This is necessary because CMAKE_SYSTEM_PROCESSOR defaults to the
|
||||
# host architecture, but downstream projects (like MLX) use it to detect the
|
||||
# target architecture.
|
||||
if(CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_OSX_ARCHITECTURES MATCHES ";")
|
||||
# Single architecture specified
|
||||
if(CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" AND NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
|
||||
message(STATUS "Cross-compiling for x86_64: overriding CMAKE_SYSTEM_PROCESSOR from ${CMAKE_SYSTEM_PROCESSOR} to x86_64")
|
||||
set(CMAKE_SYSTEM_PROCESSOR "x86_64")
|
||||
elseif(CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" AND NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
|
||||
message(STATUS "Cross-compiling for arm64: overriding CMAKE_SYSTEM_PROCESSOR from ${CMAKE_SYSTEM_PROCESSOR} to arm64")
|
||||
set(CMAKE_SYSTEM_PROCESSOR "arm64")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(CheckLanguage)
|
||||
include(GNUInstallDirs)
|
||||
|
||||
@@ -12,7 +28,7 @@ set(BUILD_SHARED_LIBS ON)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_EXTENSIONS ON) # Recent versions of MLX Requires gnu++17 extensions to compile properly
|
||||
|
||||
set(GGML_BUILD ON)
|
||||
set(GGML_SHARED ON)
|
||||
@@ -32,9 +48,10 @@ if((CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_OSX_ARCHITECTURES MATCHES "arm64")
|
||||
set(GGML_CPU_ALL_VARIANTS ON)
|
||||
endif()
|
||||
|
||||
if (CMAKE_OSX_ARCHITECTURES MATCHES "x86_64")
|
||||
if(APPLE)
|
||||
set(CMAKE_BUILD_RPATH "@loader_path")
|
||||
set(CMAKE_INSTALL_RPATH "@loader_path")
|
||||
set(CMAKE_BUILD_WITH_INSTALL_RPATH ON)
|
||||
endif()
|
||||
|
||||
set(OLLAMA_BUILD_DIR ${CMAKE_BINARY_DIR}/lib/ollama)
|
||||
@@ -47,13 +64,25 @@ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OLLAMA_BUILD_DIR})
|
||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${OLLAMA_BUILD_DIR})
|
||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${OLLAMA_BUILD_DIR})
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/include)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cpu)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cpu/amx)
|
||||
# Store ggml include paths for use with target_include_directories later.
|
||||
# We avoid global include_directories() to prevent polluting the include path
|
||||
# for other projects like MLX (whose openblas dependency has its own common.h).
|
||||
set(GGML_INCLUDE_DIRS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/include
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cpu
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cpu/amx
|
||||
)
|
||||
|
||||
add_compile_definitions(NDEBUG GGML_VERSION=0x0 GGML_COMMIT=0x0)
|
||||
|
||||
# Define GGML version variables for shared library SOVERSION
|
||||
# These are required by ggml/src/CMakeLists.txt for proper library versioning
|
||||
set(GGML_VERSION_MAJOR 0)
|
||||
set(GGML_VERSION_MINOR 0)
|
||||
set(GGML_VERSION_PATCH 0)
|
||||
set(GGML_VERSION "${GGML_VERSION_MAJOR}.${GGML_VERSION_MINOR}.${GGML_VERSION_PATCH}")
|
||||
|
||||
set(GGML_CPU ON)
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src)
|
||||
set_property(TARGET ggml PROPERTY EXCLUDE_FROM_ALL TRUE)
|
||||
@@ -63,6 +92,14 @@ if(NOT CPU_VARIANTS)
|
||||
set(CPU_VARIANTS "ggml-cpu")
|
||||
endif()
|
||||
|
||||
# Apply ggml include directories to ggml targets only (not globally)
|
||||
target_include_directories(ggml-base PRIVATE ${GGML_INCLUDE_DIRS})
|
||||
foreach(variant ${CPU_VARIANTS})
|
||||
if(TARGET ${variant})
|
||||
target_include_directories(${variant} PRIVATE ${GGML_INCLUDE_DIRS})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
install(TARGETS ggml-base ${CPU_VARIANTS}
|
||||
RUNTIME_DEPENDENCIES
|
||||
PRE_EXCLUDE_REGEXES ".*"
|
||||
@@ -79,6 +116,7 @@ if(CMAKE_CUDA_COMPILER)
|
||||
|
||||
find_package(CUDAToolkit)
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cuda)
|
||||
target_include_directories(ggml-cuda PRIVATE ${GGML_INCLUDE_DIRS})
|
||||
install(TARGETS ggml-cuda
|
||||
RUNTIME_DEPENDENCIES
|
||||
DIRECTORIES ${CUDAToolkit_BIN_DIR} ${CUDAToolkit_BIN_DIR}/x64 ${CUDAToolkit_LIBRARY_DIR}
|
||||
@@ -110,6 +148,7 @@ if(CMAKE_HIP_COMPILER)
|
||||
if(AMDGPU_TARGETS)
|
||||
find_package(hip REQUIRED)
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-hip)
|
||||
target_include_directories(ggml-hip PRIVATE ${GGML_INCLUDE_DIRS})
|
||||
|
||||
if (WIN32)
|
||||
target_compile_definitions(ggml-hip PRIVATE GGML_CUDA_NO_PEER_COPY)
|
||||
@@ -124,7 +163,7 @@ if(CMAKE_HIP_COMPILER)
|
||||
)
|
||||
install(RUNTIME_DEPENDENCY_SET rocm
|
||||
DIRECTORIES ${HIP_BIN_INSTALL_DIR} ${HIP_LIB_INSTALL_DIR}
|
||||
PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse tinfo rocprofiler-register drm drm_amdgpu numa elf
|
||||
PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse tinfo rocprofiler-register roctx64 rocroller drm drm_amdgpu numa elf
|
||||
PRE_EXCLUDE_REGEXES ".*"
|
||||
POST_EXCLUDE_REGEXES "system32"
|
||||
RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT HIP
|
||||
@@ -140,14 +179,186 @@ if(CMAKE_HIP_COMPILER)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
find_package(Vulkan)
|
||||
if(Vulkan_FOUND)
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-vulkan)
|
||||
install(TARGETS ggml-vulkan
|
||||
RUNTIME_DEPENDENCIES
|
||||
PRE_INCLUDE_REGEXES vulkan
|
||||
PRE_EXCLUDE_REGEXES ".*"
|
||||
RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT Vulkan
|
||||
LIBRARY DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT Vulkan
|
||||
)
|
||||
if(NOT APPLE)
|
||||
find_package(Vulkan)
|
||||
if(Vulkan_FOUND)
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-vulkan)
|
||||
target_include_directories(ggml-vulkan PRIVATE ${GGML_INCLUDE_DIRS})
|
||||
install(TARGETS ggml-vulkan
|
||||
RUNTIME_DEPENDENCIES
|
||||
PRE_INCLUDE_REGEXES vulkan
|
||||
PRE_EXCLUDE_REGEXES ".*"
|
||||
RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT Vulkan
|
||||
LIBRARY DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT Vulkan
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
option(MLX_ENGINE "Enable MLX backend" OFF)
|
||||
if(MLX_ENGINE)
|
||||
message(STATUS "Setting up MLX (this takes a while...)")
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/x/imagegen/mlx)
|
||||
|
||||
# Find CUDA toolkit if MLX is built with CUDA support
|
||||
find_package(CUDAToolkit)
|
||||
|
||||
# Build list of directories for runtime dependency resolution
|
||||
set(MLX_RUNTIME_DIRS ${CUDAToolkit_BIN_DIR} ${CUDAToolkit_BIN_DIR}/x64 ${CUDAToolkit_LIBRARY_DIR})
|
||||
# Add cuDNN bin paths for DLLs (Windows MLX CUDA builds)
|
||||
# CUDNN_ROOT_DIR is the standard CMake variable for cuDNN location
|
||||
if(DEFINED ENV{CUDNN_ROOT_DIR})
|
||||
# cuDNN 9.x has versioned subdirectories under bin/ (e.g., bin/13.0/)
|
||||
file(GLOB CUDNN_BIN_SUBDIRS "$ENV{CUDNN_ROOT_DIR}/bin/*")
|
||||
list(APPEND MLX_RUNTIME_DIRS ${CUDNN_BIN_SUBDIRS})
|
||||
endif()
|
||||
# Add build output directory and MLX dependency build directories
|
||||
list(APPEND MLX_RUNTIME_DIRS ${OLLAMA_BUILD_DIR})
|
||||
# OpenBLAS DLL location (pre-built zip extracts into openblas-src/bin/)
|
||||
list(APPEND MLX_RUNTIME_DIRS ${CMAKE_BINARY_DIR}/_deps/openblas-src/bin)
|
||||
# NCCL: on Linux, if real NCCL is found, cmake bundles libnccl.so via the
|
||||
# regex below. If NCCL is not found, MLX links a static stub (OBJECT lib)
|
||||
# so there is no runtime dependency. This path covers the stub build dir
|
||||
# for windows so we include the DLL in our dependencies.
|
||||
list(APPEND MLX_RUNTIME_DIRS ${CMAKE_BINARY_DIR}/_deps/mlx-build/mlx/distributed/nccl/nccl_stub-prefix/src/nccl_stub-build/Release)
|
||||
|
||||
# Base regexes for runtime dependencies (cross-platform)
|
||||
set(MLX_INCLUDE_REGEXES cublas cublasLt cudart cufft nvrtc nvrtc-builtins cudnn nccl openblas gfortran)
|
||||
# On Windows, also include dl.dll (dlfcn-win32 POSIX emulation layer)
|
||||
if(WIN32)
|
||||
list(APPEND MLX_INCLUDE_REGEXES "^dl\\.dll$")
|
||||
endif()
|
||||
|
||||
install(TARGETS mlx mlxc
|
||||
RUNTIME_DEPENDENCIES
|
||||
DIRECTORIES ${MLX_RUNTIME_DIRS}
|
||||
PRE_INCLUDE_REGEXES ${MLX_INCLUDE_REGEXES}
|
||||
PRE_EXCLUDE_REGEXES ".*"
|
||||
RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT MLX
|
||||
LIBRARY DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT MLX
|
||||
FRAMEWORK DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT MLX
|
||||
)
|
||||
|
||||
# Install the Metal library for macOS arm64 (must be colocated with the binary)
|
||||
# Metal backend is only built for arm64, not x86_64
|
||||
if(APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
|
||||
install(FILES ${CMAKE_BINARY_DIR}/_deps/mlx-build/mlx/backend/metal/kernels/mlx.metallib
|
||||
DESTINATION ${OLLAMA_INSTALL_DIR}
|
||||
COMPONENT MLX)
|
||||
endif()
|
||||
|
||||
# Install headers for NVRTC JIT compilation at runtime.
|
||||
# MLX's own install rules use the default component so they get skipped by
|
||||
# --component MLX. Headers are installed alongside libmlx in OLLAMA_INSTALL_DIR.
|
||||
#
|
||||
# Layout:
|
||||
# ${OLLAMA_INSTALL_DIR}/include/cccl/{cuda,nv}/ — CCCL headers
|
||||
# ${OLLAMA_INSTALL_DIR}/include/*.h — CUDA toolkit headers
|
||||
#
|
||||
# MLX's jit_module.cpp resolves CCCL via
|
||||
# current_binary_dir()[.parent_path()] / "include" / "cccl"
|
||||
# On Linux, MLX's jit_module.cpp resolves CCCL via
|
||||
# current_binary_dir().parent_path() / "include" / "cccl", so we create a
|
||||
# symlink from lib/ollama/include -> ${OLLAMA_RUNNER_DIR}/include
|
||||
# This will need refinement if we add multiple CUDA versions for MLX in the future.
|
||||
# CUDA runtime headers are found via CUDA_PATH env var (set by mlxrunner).
|
||||
if(EXISTS ${CMAKE_BINARY_DIR}/_deps/cccl-src/include/cuda)
|
||||
install(DIRECTORY ${CMAKE_BINARY_DIR}/_deps/cccl-src/include/cuda
|
||||
DESTINATION ${OLLAMA_INSTALL_DIR}/include/cccl
|
||||
COMPONENT MLX)
|
||||
install(DIRECTORY ${CMAKE_BINARY_DIR}/_deps/cccl-src/include/nv
|
||||
DESTINATION ${OLLAMA_INSTALL_DIR}/include/cccl
|
||||
COMPONENT MLX)
|
||||
if(NOT WIN32 AND NOT APPLE)
|
||||
install(CODE "
|
||||
set(_link \"${CMAKE_INSTALL_PREFIX}/lib/ollama/include\")
|
||||
set(_target \"${OLLAMA_RUNNER_DIR}/include\")
|
||||
if(NOT EXISTS \${_link})
|
||||
execute_process(COMMAND \${CMAKE_COMMAND} -E create_symlink \${_target} \${_link})
|
||||
endif()
|
||||
" COMPONENT MLX)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Install minimal CUDA toolkit headers needed by MLX JIT kernels.
|
||||
# These are the transitive closure of includes from mlx/backend/cuda/device/*.cuh.
|
||||
# The Go mlxrunner sets CUDA_PATH to OLLAMA_INSTALL_DIR so MLX finds them at
|
||||
# $CUDA_PATH/include/*.h via NVRTC --include-path.
|
||||
if(CUDAToolkit_FOUND)
|
||||
# CUDAToolkit_INCLUDE_DIRS may be a semicolon-separated list
|
||||
# (e.g. ".../include;.../include/cccl"). Find the entry that
|
||||
# contains the CUDA runtime headers we need.
|
||||
set(_cuda_inc "")
|
||||
foreach(_dir ${CUDAToolkit_INCLUDE_DIRS})
|
||||
if(EXISTS "${_dir}/cuda_runtime_api.h")
|
||||
set(_cuda_inc "${_dir}")
|
||||
break()
|
||||
endif()
|
||||
endforeach()
|
||||
if(NOT _cuda_inc)
|
||||
message(WARNING "Could not find cuda_runtime_api.h in CUDAToolkit_INCLUDE_DIRS: ${CUDAToolkit_INCLUDE_DIRS}")
|
||||
else()
|
||||
set(_dst "${OLLAMA_INSTALL_DIR}/include")
|
||||
set(_MLX_JIT_CUDA_HEADERS
|
||||
builtin_types.h
|
||||
cooperative_groups.h
|
||||
cuda_bf16.h
|
||||
cuda_bf16.hpp
|
||||
cuda_device_runtime_api.h
|
||||
cuda_fp16.h
|
||||
cuda_fp16.hpp
|
||||
cuda_fp8.h
|
||||
cuda_fp8.hpp
|
||||
cuda_runtime_api.h
|
||||
device_types.h
|
||||
driver_types.h
|
||||
math_constants.h
|
||||
surface_types.h
|
||||
texture_types.h
|
||||
vector_functions.h
|
||||
vector_functions.hpp
|
||||
vector_types.h
|
||||
)
|
||||
foreach(_hdr ${_MLX_JIT_CUDA_HEADERS})
|
||||
install(FILES "${_cuda_inc}/${_hdr}"
|
||||
DESTINATION ${_dst}
|
||||
COMPONENT MLX)
|
||||
endforeach()
|
||||
# Subdirectory headers
|
||||
install(DIRECTORY "${_cuda_inc}/cooperative_groups"
|
||||
DESTINATION ${_dst}
|
||||
COMPONENT MLX
|
||||
FILES_MATCHING PATTERN "*.h")
|
||||
install(FILES "${_cuda_inc}/crt/host_defines.h"
|
||||
DESTINATION "${_dst}/crt"
|
||||
COMPONENT MLX)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# On Windows, explicitly install dl.dll (dlfcn-win32 POSIX dlopen emulation)
|
||||
# RUNTIME_DEPENDENCIES auto-excludes it via POST_EXCLUDE_FILES_STRICT because
|
||||
# dlfcn-win32 is a known CMake target with its own install rules (which install
|
||||
# to the wrong destination). We must install it explicitly here.
|
||||
if(WIN32)
|
||||
install(FILES ${OLLAMA_BUILD_DIR}/dl.dll
|
||||
DESTINATION ${OLLAMA_INSTALL_DIR}
|
||||
COMPONENT MLX)
|
||||
endif()
|
||||
|
||||
# Manually install CUDA runtime libraries that MLX loads via dlopen
|
||||
# (not detected by RUNTIME_DEPENDENCIES since they aren't link-time deps)
|
||||
if(CUDAToolkit_FOUND)
|
||||
file(GLOB MLX_CUDA_LIBS
|
||||
"${CUDAToolkit_LIBRARY_DIR}/libcudart.so*"
|
||||
"${CUDAToolkit_LIBRARY_DIR}/libcublas.so*"
|
||||
"${CUDAToolkit_LIBRARY_DIR}/libcublasLt.so*"
|
||||
"${CUDAToolkit_LIBRARY_DIR}/libnvrtc.so*"
|
||||
"${CUDAToolkit_LIBRARY_DIR}/libnvrtc-builtins.so*"
|
||||
"${CUDAToolkit_LIBRARY_DIR}/libcufft.so*"
|
||||
"${CUDAToolkit_LIBRARY_DIR}/libcudnn.so*")
|
||||
if(MLX_CUDA_LIBS)
|
||||
install(FILES ${MLX_CUDA_LIBS}
|
||||
DESTINATION ${OLLAMA_INSTALL_DIR}
|
||||
COMPONENT MLX)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"inherits": [ "CUDA" ],
|
||||
"cacheVariables": {
|
||||
"CMAKE_CUDA_ARCHITECTURES": "75-virtual;80-virtual;86-virtual;87-virtual;89-virtual;90-virtual;90a-virtual;100-virtual;103-virtual;110-virtual;120-virtual;121-virtual",
|
||||
"CMAKE_CUDA_FLAGS": "-t 2",
|
||||
"CMAKE_CUDA_FLAGS": "-t 4",
|
||||
"OLLAMA_RUNNER_DIR": "cuda_v13"
|
||||
}
|
||||
},
|
||||
@@ -77,12 +77,44 @@
|
||||
"OLLAMA_RUNNER_DIR": "rocm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ROCm 7",
|
||||
"inherits": [ "ROCm" ],
|
||||
"cacheVariables": {
|
||||
"CMAKE_HIP_FLAGS": "-parallel-jobs=4",
|
||||
"AMDGPU_TARGETS": "gfx942;gfx950;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102;gfx1103;gfx1150;gfx1151;gfx1200;gfx1201;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-",
|
||||
"OLLAMA_RUNNER_DIR": "rocm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Vulkan",
|
||||
"inherits": [ "Default" ],
|
||||
"cacheVariables": {
|
||||
"OLLAMA_RUNNER_DIR": "vulkan"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "MLX",
|
||||
"inherits": [ "Default" ],
|
||||
"cacheVariables": {
|
||||
"MLX_ENGINE": "ON",
|
||||
"OLLAMA_RUNNER_DIR": "mlx"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "MLX CUDA 12",
|
||||
"inherits": [ "MLX", "CUDA 12" ],
|
||||
"cacheVariables": {
|
||||
"OLLAMA_RUNNER_DIR": "mlx_cuda_v12"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "MLX CUDA 13",
|
||||
"inherits": [ "MLX", "CUDA 13" ],
|
||||
"cacheVariables": {
|
||||
"MLX_CUDA_ARCHITECTURES": "86;89;90;90a;100;103;75-virtual;80-virtual;110-virtual;120-virtual;121-virtual",
|
||||
"OLLAMA_RUNNER_DIR": "mlx_cuda_v13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"buildPresets": [
|
||||
@@ -136,10 +168,30 @@
|
||||
"inherits": [ "ROCm" ],
|
||||
"configurePreset": "ROCm 6"
|
||||
},
|
||||
{
|
||||
"name": "ROCm 7",
|
||||
"inherits": [ "ROCm" ],
|
||||
"configurePreset": "ROCm 7"
|
||||
},
|
||||
{
|
||||
"name": "Vulkan",
|
||||
"targets": [ "ggml-vulkan" ],
|
||||
"configurePreset": "Vulkan"
|
||||
},
|
||||
{
|
||||
"name": "MLX",
|
||||
"targets": [ "mlx", "mlxc" ],
|
||||
"configurePreset": "MLX"
|
||||
},
|
||||
{
|
||||
"name": "MLX CUDA 12",
|
||||
"targets": [ "mlx", "mlxc" ],
|
||||
"configurePreset": "MLX CUDA 12"
|
||||
},
|
||||
{
|
||||
"name": "MLX CUDA 13",
|
||||
"targets": [ "mlx", "mlxc" ],
|
||||
"configurePreset": "MLX CUDA 13"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ See the [development documentation](./docs/development.md) for instructions on h
|
||||
|
||||
* New features: new features (e.g. API fields, environment variables) add surface area to Ollama and make it harder to maintain in the long run as they cannot be removed without potentially breaking users in the future.
|
||||
* Refactoring: large code improvements are important, but can be harder or take longer to review and merge.
|
||||
* Documentation: small updates to fill in or correct missing documentation is helpful, however large documentation additions can be hard to maintain over time.
|
||||
* Documentation: small updates to fill in or correct missing documentation are helpful, however large documentation additions can be hard to maintain over time.
|
||||
|
||||
### Issues that may not be accepted
|
||||
|
||||
@@ -43,7 +43,7 @@ Tips for proposals:
|
||||
* Explain how the change will be tested.
|
||||
|
||||
Additionally, for bonus points: Provide draft documentation you would expect to
|
||||
see if the change were accepted.
|
||||
see if the changes were accepted.
|
||||
|
||||
## Pull requests
|
||||
|
||||
@@ -66,7 +66,6 @@ Examples:
|
||||
|
||||
llm/backend/mlx: support the llama architecture
|
||||
CONTRIBUTING: provide clarity on good commit messages, and bad
|
||||
docs: simplify manual installation with shorter curl commands
|
||||
|
||||
Bad Examples:
|
||||
|
||||
|
||||
187
Dockerfile
@@ -1,128 +1,178 @@
|
||||
# vim: filetype=dockerfile
|
||||
|
||||
ARG FLAVOR=${TARGETARCH}
|
||||
ARG PARALLEL=8
|
||||
|
||||
ARG ROCMVERSION=6.3.3
|
||||
ARG ROCMVERSION=7.2.1
|
||||
ARG JETPACK5VERSION=r35.4.1
|
||||
ARG JETPACK6VERSION=r36.4.0
|
||||
ARG CMAKEVERSION=3.31.2
|
||||
ARG NINJAVERSION=1.12.1
|
||||
ARG VULKANVERSION=1.4.321.1
|
||||
|
||||
# We require gcc v10 minimum. v10.3 has regressions, so the rockylinux 8.5 AppStream has the latest compatible version
|
||||
# Default empty stages for local MLX source overrides.
|
||||
# Override with: docker build --build-context local-mlx=../mlx --build-context local-mlx-c=../mlx-c
|
||||
FROM scratch AS local-mlx
|
||||
FROM scratch AS local-mlx-c
|
||||
|
||||
FROM --platform=linux/amd64 rocm/dev-almalinux-8:${ROCMVERSION}-complete AS base-amd64
|
||||
RUN yum install -y yum-utils \
|
||||
&& yum-config-manager --add-repo https://dl.rockylinux.org/vault/rocky/8.5/AppStream/\$basearch/os/ \
|
||||
&& rpm --import https://dl.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-8 \
|
||||
&& dnf install -y yum-utils ccache gcc-toolset-10-gcc-10.2.1-8.2.el8 gcc-toolset-10-gcc-c++-10.2.1-8.2.el8 gcc-toolset-10-binutils-2.35-11.el8 \
|
||||
&& dnf install -y ccache \
|
||||
RUN dnf install -y yum-utils ccache gcc-toolset-11-gcc gcc-toolset-11-gcc-c++ gcc-toolset-11-binutils \
|
||||
&& yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo
|
||||
ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||
ARG VULKANVERSION
|
||||
RUN wget https://sdk.lunarg.com/sdk/download/${VULKANVERSION}/linux/vulkansdk-linux-x86_64-${VULKANVERSION}.tar.xz -O /tmp/vulkansdk-linux-x86_64-${VULKANVERSION}.tar.xz \
|
||||
&& tar xvf /tmp/vulkansdk-linux-x86_64-${VULKANVERSION}.tar.xz \
|
||||
&& dnf -y install ninja-build \
|
||||
&& ln -s /usr/bin/python3 /usr/bin/python \
|
||||
&& /${VULKANVERSION}/vulkansdk -j 8 vulkan-headers \
|
||||
&& /${VULKANVERSION}/vulkansdk -j 8 shaderc
|
||||
RUN cp -r /${VULKANVERSION}/x86_64/include/* /usr/local/include/ \
|
||||
&& cp -r /${VULKANVERSION}/x86_64/lib/* /usr/local/lib
|
||||
ENV PATH=/${VULKANVERSION}/x86_64/bin:$PATH
|
||||
ENV PATH=/opt/rh/gcc-toolset-11/root/usr/bin:$PATH
|
||||
|
||||
FROM --platform=linux/arm64 almalinux:8 AS base-arm64
|
||||
# install epel-release for ccache
|
||||
RUN yum install -y yum-utils epel-release \
|
||||
&& dnf install -y clang ccache \
|
||||
&& dnf install -y clang ccache git \
|
||||
&& yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo
|
||||
ENV CC=clang CXX=clang++
|
||||
|
||||
FROM base-${TARGETARCH} AS base
|
||||
ARG CMAKEVERSION
|
||||
ARG NINJAVERSION
|
||||
RUN curl -fsSL https://github.com/Kitware/CMake/releases/download/v${CMAKEVERSION}/cmake-${CMAKEVERSION}-linux-$(uname -m).tar.gz | tar xz -C /usr/local --strip-components 1
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
RUN dnf install -y unzip \
|
||||
&& curl -fsSL -o /tmp/ninja.zip https://github.com/ninja-build/ninja/releases/download/v${NINJAVERSION}/ninja-linux$([ "$(uname -m)" = "aarch64" ] && echo "-aarch64").zip \
|
||||
&& unzip /tmp/ninja.zip -d /usr/local/bin \
|
||||
&& rm /tmp/ninja.zip
|
||||
ENV CMAKE_GENERATOR=Ninja
|
||||
ENV LDFLAGS=-s
|
||||
|
||||
FROM base AS cpu
|
||||
RUN dnf install -y gcc-toolset-11-gcc gcc-toolset-11-gcc-c++
|
||||
ENV PATH=/opt/rh/gcc-toolset-11/root/usr/bin:$PATH
|
||||
ARG PARALLEL
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
cmake --preset 'CPU' \
|
||||
&& cmake --build --parallel ${PARALLEL} --preset 'CPU' \
|
||||
&& cmake --install build --component CPU --strip --parallel ${PARALLEL}
|
||||
&& cmake --build --preset 'CPU' -- -l $(nproc) \
|
||||
&& cmake --install build --component CPU --strip
|
||||
|
||||
FROM base AS cuda-11
|
||||
ARG CUDA11VERSION=11.8
|
||||
RUN dnf install -y cuda-toolkit-${CUDA11VERSION//./-}
|
||||
ENV PATH=/usr/local/cuda-11/bin:$PATH
|
||||
ARG PARALLEL
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
cmake --preset 'CUDA 11' \
|
||||
&& cmake --build --parallel ${PARALLEL} --preset 'CUDA 11' \
|
||||
&& cmake --install build --component CUDA --strip --parallel ${PARALLEL}
|
||||
&& cmake --build --preset 'CUDA 11' -- -l $(nproc) \
|
||||
&& cmake --install build --component CUDA --strip
|
||||
|
||||
FROM base AS cuda-12
|
||||
ARG CUDA12VERSION=12.8
|
||||
RUN dnf install -y cuda-toolkit-${CUDA12VERSION//./-}
|
||||
ENV PATH=/usr/local/cuda-12/bin:$PATH
|
||||
ARG PARALLEL
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
cmake --preset 'CUDA 12' \
|
||||
&& cmake --build --parallel ${PARALLEL} --preset 'CUDA 12' \
|
||||
&& cmake --install build --component CUDA --strip --parallel ${PARALLEL}
|
||||
&& cmake --build --preset 'CUDA 12' -- -l $(nproc) \
|
||||
&& cmake --install build --component CUDA --strip
|
||||
|
||||
|
||||
FROM base AS cuda-13
|
||||
ARG CUDA13VERSION=13.0
|
||||
RUN dnf install -y cuda-toolkit-${CUDA13VERSION//./-}
|
||||
ENV PATH=/usr/local/cuda-13/bin:$PATH
|
||||
ARG PARALLEL
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
cmake --preset 'CUDA 13' \
|
||||
&& cmake --build --parallel ${PARALLEL} --preset 'CUDA 13' \
|
||||
&& cmake --install build --component CUDA --strip --parallel ${PARALLEL}
|
||||
&& cmake --build --preset 'CUDA 13' -- -l $(nproc) \
|
||||
&& cmake --install build --component CUDA --strip
|
||||
|
||||
|
||||
FROM base AS rocm-6
|
||||
FROM base AS rocm-7
|
||||
ENV PATH=/opt/rocm/hcc/bin:/opt/rocm/hip/bin:/opt/rocm/bin:/opt/rocm/hcc/bin:$PATH
|
||||
ARG PARALLEL
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
cmake --preset 'ROCm 6' \
|
||||
&& cmake --build --parallel ${PARALLEL} --preset 'ROCm 6' \
|
||||
&& cmake --install build --component HIP --strip --parallel ${PARALLEL}
|
||||
cmake --preset 'ROCm 7' \
|
||||
&& cmake --build --preset 'ROCm 7' -- -l $(nproc) \
|
||||
&& cmake --install build --component HIP --strip
|
||||
RUN rm -f dist/lib/ollama/rocm/rocblas/library/*gfx90[06]*
|
||||
|
||||
FROM --platform=linux/arm64 nvcr.io/nvidia/l4t-jetpack:${JETPACK5VERSION} AS jetpack-5
|
||||
ARG CMAKEVERSION
|
||||
RUN apt-get update && apt-get install -y curl ccache \
|
||||
&& curl -fsSL https://github.com/Kitware/CMake/releases/download/v${CMAKEVERSION}/cmake-${CMAKEVERSION}-linux-$(uname -m).tar.gz | tar xz -C /usr/local --strip-components 1
|
||||
ARG NINJAVERSION
|
||||
RUN apt-get update && apt-get install -y curl ccache unzip \
|
||||
&& curl -fsSL https://github.com/Kitware/CMake/releases/download/v${CMAKEVERSION}/cmake-${CMAKEVERSION}-linux-$(uname -m).tar.gz | tar xz -C /usr/local --strip-components 1 \
|
||||
&& curl -fsSL -o /tmp/ninja.zip https://github.com/ninja-build/ninja/releases/download/v${NINJAVERSION}/ninja-linux-aarch64.zip \
|
||||
&& unzip /tmp/ninja.zip -d /usr/local/bin \
|
||||
&& rm /tmp/ninja.zip
|
||||
ENV CMAKE_GENERATOR=Ninja
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
ARG PARALLEL
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
cmake --preset 'JetPack 5' \
|
||||
&& cmake --build --parallel ${PARALLEL} --preset 'JetPack 5' \
|
||||
&& cmake --install build --component CUDA --strip --parallel ${PARALLEL}
|
||||
&& cmake --build --preset 'JetPack 5' -- -l $(nproc) \
|
||||
&& cmake --install build --component CUDA --strip
|
||||
|
||||
FROM --platform=linux/arm64 nvcr.io/nvidia/l4t-jetpack:${JETPACK6VERSION} AS jetpack-6
|
||||
ARG CMAKEVERSION
|
||||
RUN apt-get update && apt-get install -y curl ccache \
|
||||
&& curl -fsSL https://github.com/Kitware/CMake/releases/download/v${CMAKEVERSION}/cmake-${CMAKEVERSION}-linux-$(uname -m).tar.gz | tar xz -C /usr/local --strip-components 1
|
||||
ARG NINJAVERSION
|
||||
RUN apt-get update && apt-get install -y curl ccache unzip \
|
||||
&& curl -fsSL https://github.com/Kitware/CMake/releases/download/v${CMAKEVERSION}/cmake-${CMAKEVERSION}-linux-$(uname -m).tar.gz | tar xz -C /usr/local --strip-components 1 \
|
||||
&& curl -fsSL -o /tmp/ninja.zip https://github.com/ninja-build/ninja/releases/download/v${NINJAVERSION}/ninja-linux-aarch64.zip \
|
||||
&& unzip /tmp/ninja.zip -d /usr/local/bin \
|
||||
&& rm /tmp/ninja.zip
|
||||
ENV CMAKE_GENERATOR=Ninja
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
ARG PARALLEL
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
cmake --preset 'JetPack 6' \
|
||||
&& cmake --build --parallel ${PARALLEL} --preset 'JetPack 6' \
|
||||
&& cmake --install build --component CUDA --strip --parallel ${PARALLEL}
|
||||
&& cmake --build --preset 'JetPack 6' -- -l $(nproc) \
|
||||
&& cmake --install build --component CUDA --strip
|
||||
|
||||
FROM base AS vulkan
|
||||
ARG VULKANVERSION
|
||||
RUN ln -s /usr/bin/python3 /usr/bin/python \
|
||||
&& wget https://sdk.lunarg.com/sdk/download/${VULKANVERSION}/linux/vulkansdk-linux-x86_64-${VULKANVERSION}.tar.xz -O /tmp/vulkansdk.tar.xz \
|
||||
&& tar xvf /tmp/vulkansdk.tar.xz -C /tmp \
|
||||
&& /tmp/${VULKANVERSION}/vulkansdk -j 8 vulkan-headers \
|
||||
&& /tmp/${VULKANVERSION}/vulkansdk -j 8 shaderc \
|
||||
&& cp -r /tmp/${VULKANVERSION}/x86_64/include/* /usr/local/include/ \
|
||||
&& cp -r /tmp/${VULKANVERSION}/x86_64/lib/* /usr/local/lib \
|
||||
&& cp -r /tmp/${VULKANVERSION}/x86_64/bin/* /usr/local/bin/ \
|
||||
&& rm -rf /tmp/${VULKANVERSION} /tmp/vulkansdk.tar.xz
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
cmake --preset 'Vulkan' \
|
||||
&& cmake --build --parallel --preset 'Vulkan' \
|
||||
&& cmake --install build --component Vulkan --strip --parallel 8
|
||||
&& cmake --build --preset 'Vulkan' -- -l $(nproc) \
|
||||
&& cmake --install build --component Vulkan --strip
|
||||
|
||||
FROM base AS mlx
|
||||
ARG CUDA13VERSION=13.0
|
||||
RUN dnf install -y cuda-toolkit-${CUDA13VERSION//./-} \
|
||||
&& dnf install -y openblas-devel lapack-devel \
|
||||
&& dnf install -y libcudnn9-cuda-13 libcudnn9-devel-cuda-13 \
|
||||
&& dnf install -y libnccl libnccl-devel
|
||||
ENV PATH=/usr/local/cuda-13/bin:$PATH
|
||||
ENV BLAS_INCLUDE_DIRS=/usr/include/openblas
|
||||
ENV LAPACK_INCLUDE_DIRS=/usr/include/openblas
|
||||
ENV CGO_LDFLAGS="-L/usr/local/cuda-13/lib64 -L/usr/local/cuda-13/targets/x86_64-linux/lib/stubs"
|
||||
WORKDIR /go/src/github.com/ollama/ollama
|
||||
COPY CMakeLists.txt CMakePresets.json .
|
||||
COPY ml/backend/ggml/ggml ml/backend/ggml/ggml
|
||||
COPY x/imagegen/mlx x/imagegen/mlx
|
||||
COPY go.mod go.sum .
|
||||
COPY MLX_VERSION MLX_C_VERSION .
|
||||
RUN curl -fsSL https://golang.org/dl/go$(awk '/^go/ { print $2 }' go.mod).linux-$(case $(uname -m) in x86_64) echo amd64 ;; aarch64) echo arm64 ;; esac).tar.gz | tar xz -C /usr/local
|
||||
ENV PATH=/usr/local/go/bin:$PATH
|
||||
RUN go mod download
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
--mount=type=bind,from=local-mlx,target=/tmp/local-mlx \
|
||||
--mount=type=bind,from=local-mlx-c,target=/tmp/local-mlx-c \
|
||||
if [ -f /tmp/local-mlx/CMakeLists.txt ]; then \
|
||||
export OLLAMA_MLX_SOURCE=/tmp/local-mlx; \
|
||||
fi \
|
||||
&& if [ -f /tmp/local-mlx-c/CMakeLists.txt ]; then \
|
||||
export OLLAMA_MLX_C_SOURCE=/tmp/local-mlx-c; \
|
||||
fi \
|
||||
&& cmake --preset 'MLX CUDA 13' -DBLAS_INCLUDE_DIRS=/usr/include/openblas -DLAPACK_INCLUDE_DIRS=/usr/include/openblas \
|
||||
&& cmake --build --preset 'MLX CUDA 13' -- -l $(nproc) \
|
||||
&& cmake --install build --component MLX --strip
|
||||
|
||||
FROM base AS build
|
||||
WORKDIR /go/src/github.com/ollama/ollama
|
||||
@@ -135,6 +185,8 @@ ARG GOFLAGS="'-ldflags=-w -s'"
|
||||
ENV CGO_ENABLED=1
|
||||
ARG CGO_CFLAGS
|
||||
ARG CGO_CXXFLAGS
|
||||
ENV CGO_CFLAGS="${CGO_CFLAGS}"
|
||||
ENV CGO_CXXFLAGS="${CGO_CXXFLAGS}"
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
go build -trimpath -buildmode=pie -o /bin/ollama .
|
||||
|
||||
@@ -143,6 +195,7 @@ FROM --platform=linux/amd64 scratch AS amd64
|
||||
COPY --from=cuda-12 dist/lib/ollama /lib/ollama/
|
||||
COPY --from=cuda-13 dist/lib/ollama /lib/ollama/
|
||||
COPY --from=vulkan dist/lib/ollama /lib/ollama/
|
||||
COPY --from=mlx /go/src/github.com/ollama/ollama/dist/lib/ollama /lib/ollama/
|
||||
|
||||
FROM --platform=linux/arm64 scratch AS arm64
|
||||
# COPY --from=cuda-11 dist/lib/ollama/ /lib/ollama/
|
||||
@@ -152,41 +205,15 @@ COPY --from=jetpack-5 dist/lib/ollama/ /lib/ollama/
|
||||
COPY --from=jetpack-6 dist/lib/ollama/ /lib/ollama/
|
||||
|
||||
FROM scratch AS rocm
|
||||
COPY --from=rocm-6 dist/lib/ollama /lib/ollama
|
||||
COPY --from=rocm-7 dist/lib/ollama /lib/ollama
|
||||
|
||||
FROM ${FLAVOR} AS archive
|
||||
ARG VULKANVERSION
|
||||
COPY --from=cpu dist/lib/ollama /lib/ollama
|
||||
COPY --from=build /bin/ollama /bin/ollama
|
||||
|
||||
# Temporary opt-out stages for Vulkan
|
||||
FROM --platform=linux/amd64 scratch AS amd64_novulkan
|
||||
# COPY --from=cuda-11 dist/lib/ollama/ /lib/ollama/
|
||||
COPY --from=cuda-12 dist/lib/ollama /lib/ollama/
|
||||
COPY --from=cuda-13 dist/lib/ollama /lib/ollama/
|
||||
FROM arm64 AS arm64_novulkan
|
||||
FROM ${FLAVOR}_novulkan AS archive_novulkan
|
||||
COPY --from=cpu dist/lib/ollama /lib/ollama
|
||||
COPY --from=build /bin/ollama /bin/ollama
|
||||
FROM ubuntu:24.04 AS novulkan
|
||||
FROM ubuntu:24.04
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y ca-certificates \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=archive_novulkan /bin /usr/bin
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
COPY --from=archive_novulkan /lib/ollama /usr/lib/ollama
|
||||
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||
ENV OLLAMA_HOST=0.0.0.0:11434
|
||||
EXPOSE 11434
|
||||
ENTRYPOINT ["/bin/ollama"]
|
||||
CMD ["serve"]
|
||||
|
||||
FROM ubuntu:24.04 AS default
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y ca-certificates libvulkan1 \
|
||||
&& apt-get install -y ca-certificates libvulkan1 libopenblas0 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=archive /bin /usr/bin
|
||||
|
||||
1
MLX_C_VERSION
Normal file
@@ -0,0 +1 @@
|
||||
0726ca922fc902c4c61ef9c27d94132be418e945
|
||||
1
MLX_VERSION
Normal file
@@ -0,0 +1 @@
|
||||
38ad257088fb2193ad47e527cf6534a689f30943
|
||||
@@ -1,6 +1,6 @@
|
||||
UPSTREAM=https://github.com/ggml-org/llama.cpp.git
|
||||
WORKDIR=llama/vendor
|
||||
FETCH_HEAD=3cfa9c3f125763305b4226bc032f1954f08990dc
|
||||
FETCH_HEAD=ec98e2002
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@@ -57,7 +57,7 @@ checkout: $(WORKDIR)
|
||||
$(WORKDIR):
|
||||
git clone $(UPSTREAM) $(WORKDIR)
|
||||
|
||||
.PHONE: format-patches
|
||||
.PHONY: format-patches
|
||||
format-patches: llama/patches
|
||||
git -C $(WORKDIR) format-patch \
|
||||
--no-signature \
|
||||
@@ -66,7 +66,11 @@ format-patches: llama/patches
|
||||
-o $(realpath $<) \
|
||||
$(FETCH_HEAD)
|
||||
|
||||
.PHONE: clean
|
||||
.PHONY: clean
|
||||
clean: checkout
|
||||
@git -C $(WORKDIR) am --abort || true
|
||||
$(RM) llama/patches/.*.patched
|
||||
|
||||
.PHONY: print-base
|
||||
print-base:
|
||||
@echo $(FETCH_HEAD)
|
||||
869
README.md
@@ -1,20 +1,30 @@
|
||||
<div align="center">
|
||||
<a href="https://ollama.com">
|
||||
<img alt="ollama" width="240" src="https://github.com/ollama/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
||||
<p align="center">
|
||||
<a href="https://ollama.com">
|
||||
<img src="https://github.com/ollama/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7" alt="ollama" width="200"/>
|
||||
</a>
|
||||
</div>
|
||||
</p>
|
||||
|
||||
# Ollama
|
||||
|
||||
Get up and running with large language models.
|
||||
Start building with open models.
|
||||
|
||||
## Download
|
||||
|
||||
### macOS
|
||||
|
||||
[Download](https://ollama.com/download/Ollama.dmg)
|
||||
```shell
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
```
|
||||
|
||||
or [download manually](https://ollama.com/download/Ollama.dmg)
|
||||
|
||||
### Windows
|
||||
|
||||
[Download](https://ollama.com/download/OllamaSetup.exe)
|
||||
```shell
|
||||
irm https://ollama.com/install.ps1 | iex
|
||||
```
|
||||
|
||||
or [download manually](https://ollama.com/download/OllamaSetup.exe)
|
||||
|
||||
### Linux
|
||||
|
||||
@@ -36,606 +46,311 @@ The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `olla
|
||||
### Community
|
||||
|
||||
- [Discord](https://discord.gg/ollama)
|
||||
- [𝕏 (Twitter)](https://x.com/ollama)
|
||||
- [Reddit](https://reddit.com/r/ollama)
|
||||
|
||||
## Quickstart
|
||||
## Get started
|
||||
|
||||
To run and chat with [Gemma 3](https://ollama.com/library/gemma3):
|
||||
```
|
||||
ollama
|
||||
```
|
||||
|
||||
```shell
|
||||
You'll be prompted to run a model or connect Ollama to your existing agents or applications such as `Claude Code`, `OpenClaw`, `OpenCode` , `Codex`, `Copilot`, and more.
|
||||
|
||||
### Coding
|
||||
|
||||
To launch a specific integration:
|
||||
|
||||
```
|
||||
ollama launch claude
|
||||
```
|
||||
|
||||
Supported integrations include [Claude Code](https://docs.ollama.com/integrations/claude-code), [Codex](https://docs.ollama.com/integrations/codex), [Copilot CLI](https://docs.ollama.com/integrations/copilot-cli), [Droid](https://docs.ollama.com/integrations/droid), and [OpenCode](https://docs.ollama.com/integrations/opencode).
|
||||
|
||||
### AI assistant
|
||||
|
||||
Use [OpenClaw](https://docs.ollama.com/integrations/openclaw) to turn Ollama into a personal AI assistant across WhatsApp, Telegram, Slack, Discord, and more:
|
||||
|
||||
```
|
||||
ollama launch openclaw
|
||||
```
|
||||
|
||||
### Chat with a model
|
||||
|
||||
Run and chat with [Gemma 3](https://ollama.com/library/gemma3):
|
||||
|
||||
```
|
||||
ollama run gemma3
|
||||
```
|
||||
|
||||
## Model library
|
||||
See [ollama.com/library](https://ollama.com/library) for the full list.
|
||||
|
||||
Ollama supports a list of models available on [ollama.com/library](https://ollama.com/library 'ollama model library')
|
||||
|
||||
Here are some example models that can be downloaded:
|
||||
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------ | ---------- | ----- | -------------------------------- |
|
||||
| Gemma 3 | 1B | 815MB | `ollama run gemma3:1b` |
|
||||
| Gemma 3 | 4B | 3.3GB | `ollama run gemma3` |
|
||||
| Gemma 3 | 12B | 8.1GB | `ollama run gemma3:12b` |
|
||||
| Gemma 3 | 27B | 17GB | `ollama run gemma3:27b` |
|
||||
| QwQ | 32B | 20GB | `ollama run qwq` |
|
||||
| DeepSeek-R1 | 7B | 4.7GB | `ollama run deepseek-r1` |
|
||||
| DeepSeek-R1 | 671B | 404GB | `ollama run deepseek-r1:671b` |
|
||||
| Llama 4 | 109B | 67GB | `ollama run llama4:scout` |
|
||||
| Llama 4 | 400B | 245GB | `ollama run llama4:maverick` |
|
||||
| Llama 3.3 | 70B | 43GB | `ollama run llama3.3` |
|
||||
| Llama 3.2 | 3B | 2.0GB | `ollama run llama3.2` |
|
||||
| Llama 3.2 | 1B | 1.3GB | `ollama run llama3.2:1b` |
|
||||
| Llama 3.2 Vision | 11B | 7.9GB | `ollama run llama3.2-vision` |
|
||||
| Llama 3.2 Vision | 90B | 55GB | `ollama run llama3.2-vision:90b` |
|
||||
| Llama 3.1 | 8B | 4.7GB | `ollama run llama3.1` |
|
||||
| Llama 3.1 | 405B | 231GB | `ollama run llama3.1:405b` |
|
||||
| Phi 4 | 14B | 9.1GB | `ollama run phi4` |
|
||||
| Phi 4 Mini | 3.8B | 2.5GB | `ollama run phi4-mini` |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||
| Granite-3.3 | 8B | 4.9GB | `ollama run granite3.3` |
|
||||
|
||||
> [!NOTE]
|
||||
> You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||
|
||||
## Customize a model
|
||||
|
||||
### Import from GGUF
|
||||
|
||||
Ollama supports importing GGUF models in the Modelfile:
|
||||
|
||||
1. Create a file named `Modelfile`, with a `FROM` instruction with the local filepath to the model you want to import.
|
||||
|
||||
```
|
||||
FROM ./vicuna-33b.Q4_0.gguf
|
||||
```
|
||||
|
||||
2. Create the model in Ollama
|
||||
|
||||
```shell
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
3. Run the model
|
||||
|
||||
```shell
|
||||
ollama run example
|
||||
```
|
||||
|
||||
### Import from Safetensors
|
||||
|
||||
See the [guide](https://docs.ollama.com/import) on importing models for more information.
|
||||
|
||||
### Customize a prompt
|
||||
|
||||
Models from the Ollama library can be customized with a prompt. For example, to customize the `llama3.2` model:
|
||||
|
||||
```shell
|
||||
ollama pull llama3.2
|
||||
```
|
||||
|
||||
Create a `Modelfile`:
|
||||
|
||||
```
|
||||
FROM llama3.2
|
||||
|
||||
# set the temperature to 1 [higher is more creative, lower is more coherent]
|
||||
PARAMETER temperature 1
|
||||
|
||||
# set the system message
|
||||
SYSTEM """
|
||||
You are Mario from Super Mario Bros. Answer as Mario, the assistant, only.
|
||||
"""
|
||||
```
|
||||
|
||||
Next, create and run the model:
|
||||
|
||||
```
|
||||
ollama create mario -f ./Modelfile
|
||||
ollama run mario
|
||||
>>> hi
|
||||
Hello! It's your friend Mario.
|
||||
```
|
||||
|
||||
For more information on working with a Modelfile, see the [Modelfile](https://docs.ollama.com/modelfile) documentation.
|
||||
|
||||
## CLI Reference
|
||||
|
||||
### Create a model
|
||||
|
||||
`ollama create` is used to create a model from a Modelfile.
|
||||
|
||||
```shell
|
||||
ollama create mymodel -f ./Modelfile
|
||||
```
|
||||
|
||||
### Pull a model
|
||||
|
||||
```shell
|
||||
ollama pull llama3.2
|
||||
```
|
||||
|
||||
> This command can also be used to update a local model. Only the diff will be pulled.
|
||||
|
||||
### Remove a model
|
||||
|
||||
```shell
|
||||
ollama rm llama3.2
|
||||
```
|
||||
|
||||
### Copy a model
|
||||
|
||||
```shell
|
||||
ollama cp llama3.2 my-model
|
||||
```
|
||||
|
||||
### Multiline input
|
||||
|
||||
For multiline input, you can wrap text with `"""`:
|
||||
|
||||
```
|
||||
>>> """Hello,
|
||||
... world!
|
||||
... """
|
||||
I'm a basic program that prints the famous "Hello, world!" message to the console.
|
||||
```
|
||||
|
||||
### Multimodal models
|
||||
|
||||
```
|
||||
ollama run llava "What's in this image? /Users/jmorgan/Desktop/smile.png"
|
||||
```
|
||||
|
||||
> **Output**: The image features a yellow smiley face, which is likely the central focus of the picture.
|
||||
|
||||
### Pass the prompt as an argument
|
||||
|
||||
```shell
|
||||
ollama run llama3.2 "Summarize this file: $(cat README.md)"
|
||||
```
|
||||
|
||||
> **Output**: Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
||||
|
||||
### Show model information
|
||||
|
||||
```shell
|
||||
ollama show llama3.2
|
||||
```
|
||||
|
||||
### List models on your computer
|
||||
|
||||
```shell
|
||||
ollama list
|
||||
```
|
||||
|
||||
### List which models are currently loaded
|
||||
|
||||
```shell
|
||||
ollama ps
|
||||
```
|
||||
|
||||
### Stop a model which is currently running
|
||||
|
||||
```shell
|
||||
ollama stop llama3.2
|
||||
```
|
||||
|
||||
### Generate embeddings from the CLI
|
||||
|
||||
```shell
|
||||
ollama run embeddinggemma "Your text to embed"
|
||||
```
|
||||
|
||||
You can also pipe text for scripted workflows:
|
||||
|
||||
```shell
|
||||
echo "Your text to embed" | ollama run embeddinggemma
|
||||
```
|
||||
|
||||
### Start Ollama
|
||||
|
||||
`ollama serve` is used when you want to start ollama without running the desktop application.
|
||||
|
||||
## Building
|
||||
|
||||
See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
||||
|
||||
### Running local builds
|
||||
|
||||
Next, start the server:
|
||||
|
||||
```shell
|
||||
./ollama serve
|
||||
```
|
||||
|
||||
Finally, in a separate shell, run a model:
|
||||
|
||||
```shell
|
||||
./ollama run llama3.2
|
||||
```
|
||||
See the [quickstart guide](https://docs.ollama.com/quickstart) for more details.
|
||||
|
||||
## REST API
|
||||
|
||||
Ollama has a REST API for running and managing models.
|
||||
|
||||
### Generate a response
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"prompt":"Why is the sky blue?"
|
||||
}'
|
||||
```
|
||||
|
||||
### Chat with a model
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"messages": [
|
||||
{ "role": "user", "content": "why is the sky blue?" }
|
||||
]
|
||||
"model": "gemma3",
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": "Why is the sky blue?"
|
||||
}],
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
See the [API documentation](./docs/api.md) for all endpoints.
|
||||
See the [API documentation](https://docs.ollama.com/api) for all endpoints.
|
||||
|
||||
### Python
|
||||
|
||||
```
|
||||
pip install ollama
|
||||
```
|
||||
|
||||
```python
|
||||
from ollama import chat
|
||||
|
||||
response = chat(model='gemma3', messages=[
|
||||
{
|
||||
'role': 'user',
|
||||
'content': 'Why is the sky blue?',
|
||||
},
|
||||
])
|
||||
print(response.message.content)
|
||||
```
|
||||
|
||||
### JavaScript
|
||||
|
||||
```
|
||||
npm i ollama
|
||||
```
|
||||
|
||||
```javascript
|
||||
import ollama from "ollama";
|
||||
|
||||
const response = await ollama.chat({
|
||||
model: "gemma3",
|
||||
messages: [{ role: "user", content: "Why is the sky blue?" }],
|
||||
});
|
||||
console.log(response.message.content);
|
||||
```
|
||||
|
||||
## Supported backends
|
||||
|
||||
- [llama.cpp](https://github.com/ggml-org/llama.cpp) project founded by Georgi Gerganov.
|
||||
|
||||
## Documentation
|
||||
|
||||
- [CLI reference](https://docs.ollama.com/cli)
|
||||
- [REST API reference](https://docs.ollama.com/api)
|
||||
- [Importing models](https://docs.ollama.com/import)
|
||||
- [Modelfile reference](https://docs.ollama.com/modelfile)
|
||||
- [Building from source](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
||||
|
||||
## Community Integrations
|
||||
|
||||
### Web & Desktop
|
||||
> Want to add your project? Open a pull request.
|
||||
|
||||
- [Open WebUI](https://github.com/open-webui/open-webui)
|
||||
- [SwiftChat (macOS with ReactNative)](https://github.com/aws-samples/swift-chat)
|
||||
- [Enchanted (macOS native)](https://github.com/AugustDev/enchanted)
|
||||
- [Hollama](https://github.com/fmaclen/hollama)
|
||||
- [Lollms WebUI (Single user)](https://github.com/ParisNeo/lollms-webui)
|
||||
- [Lollms (Multi users)](https://github.com/ParisNeo/lollms)
|
||||
- [LibreChat](https://github.com/danny-avila/LibreChat)
|
||||
- [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt)
|
||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||
- [Saddle](https://github.com/jikkuatwork/saddle)
|
||||
- [TagSpaces](https://www.tagspaces.org) (A platform for file-based apps, [utilizing Ollama](https://docs.tagspaces.org/ai/) for the generation of tags and descriptions)
|
||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||
- [Chatbot UI v2](https://github.com/mckaywrigley/chatbot-ui)
|
||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
||||
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
||||
- [big-AGI](https://github.com/enricoros/big-AGI)
|
||||
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
||||
- [Amica](https://github.com/semperai/amica)
|
||||
- [chatd](https://github.com/BruceMacD/chatd)
|
||||
- [Ollama-SwiftUI](https://github.com/kghandour/Ollama-SwiftUI)
|
||||
- [Dify.AI](https://github.com/langgenius/dify)
|
||||
- [MindMac](https://mindmac.app)
|
||||
- [NextJS Web Interface for Ollama](https://github.com/jakobhoeg/nextjs-ollama-llm-ui)
|
||||
- [Msty](https://msty.app)
|
||||
- [Chatbox](https://github.com/Bin-Huang/Chatbox)
|
||||
- [WinForm Ollama Copilot](https://github.com/tgraupmann/WinForm_Ollama_Copilot)
|
||||
- [NextChat](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web) with [Get Started Doc](https://docs.nextchat.dev/models/ollama)
|
||||
- [Alpaca WebUI](https://github.com/mmo80/alpaca-webui)
|
||||
- [OllamaGUI](https://github.com/enoch1118/ollamaGUI)
|
||||
- [OpenAOE](https://github.com/InternLM/OpenAOE)
|
||||
- [Odin Runes](https://github.com/leonid20000/OdinRunes)
|
||||
- [LLM-X](https://github.com/mrdjohnson/llm-x) (Progressive Web App)
|
||||
- [AnythingLLM (Docker + MacOs/Windows/Linux native app)](https://github.com/Mintplex-Labs/anything-llm)
|
||||
- [Ollama Basic Chat: Uses HyperDiv Reactive UI](https://github.com/rapidarchitect/ollama_basic_chat)
|
||||
- [Ollama-chats RPG](https://github.com/drazdra/ollama-chats)
|
||||
- [IntelliBar](https://intellibar.app/) (AI-powered assistant for macOS)
|
||||
- [Jirapt](https://github.com/AliAhmedNada/jirapt) (Jira Integration to generate issues, tasks, epics)
|
||||
- [ojira](https://github.com/AliAhmedNada/ojira) (Jira chrome plugin to easily generate descriptions for tasks)
|
||||
- [QA-Pilot](https://github.com/reid41/QA-Pilot) (Interactive chat tool that can leverage Ollama models for rapid understanding and navigation of GitHub code repositories)
|
||||
- [ChatOllama](https://github.com/sugarforever/chat-ollama) (Open Source Chatbot based on Ollama with Knowledge Bases)
|
||||
- [CRAG Ollama Chat](https://github.com/Nagi-ovo/CRAG-Ollama-Chat) (Simple Web Search with Corrective RAG)
|
||||
- [RAGFlow](https://github.com/infiniflow/ragflow) (Open-source Retrieval-Augmented Generation engine based on deep document understanding)
|
||||
- [StreamDeploy](https://github.com/StreamDeploy-DevRel/streamdeploy-llm-app-scaffold) (LLM Application Scaffold)
|
||||
- [chat](https://github.com/swuecho/chat) (chat web app for teams)
|
||||
- [Lobe Chat](https://github.com/lobehub/lobe-chat) with [Integrating Doc](https://lobehub.com/docs/self-hosting/examples/ollama)
|
||||
- [Ollama RAG Chatbot](https://github.com/datvodinh/rag-chatbot.git) (Local Chat with multiple PDFs using Ollama and RAG)
|
||||
- [BrainSoup](https://www.nurgo-software.com/products/brainsoup) (Flexible native client with RAG & multi-agent automation)
|
||||
- [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends)
|
||||
- [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) (RWKV offline LLM deployment tool, also usable as a client for ChatGPT and Ollama)
|
||||
- [Ollama Grid Search](https://github.com/dezoito/ollama-grid-search) (app to evaluate and compare models)
|
||||
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
||||
- [Casibase](https://casibase.org) (An open source AI knowledge base and dialogue system combining the latest RAG, SSO, ollama support, and multiple large language models.)
|
||||
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
||||
- [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama)
|
||||
- [Shinkai Desktop](https://github.com/dcSpark/shinkai-apps) (Two click install Local AI using Ollama + Files + RAG)
|
||||
- [AiLama](https://github.com/zeyoyt/ailama) (A Discord User App that allows you to interact with Ollama anywhere in Discord)
|
||||
- [Ollama with Google Mesop](https://github.com/rapidarchitect/ollama_mesop/) (Mesop Chat Client implementation with Ollama)
|
||||
- [R2R](https://github.com/SciPhi-AI/R2R) (Open-source RAG engine)
|
||||
- [Ollama-Kis](https://github.com/elearningshow/ollama-kis) (A simple easy-to-use GUI with sample custom LLM for Drivers Education)
|
||||
- [OpenGPA](https://opengpa.org) (Open-source offline-first Enterprise Agentic Application)
|
||||
- [Painting Droid](https://github.com/mateuszmigas/painting-droid) (Painting app with AI integrations)
|
||||
- [Kerlig AI](https://www.kerlig.com/) (AI writing assistant for macOS)
|
||||
- [AI Studio](https://github.com/MindWorkAI/AI-Studio)
|
||||
- [Sidellama](https://github.com/gyopak/sidellama) (browser-based LLM client)
|
||||
- [LLMStack](https://github.com/trypromptly/LLMStack) (No-code multi-agent framework to build LLM agents and workflows)
|
||||
- [BoltAI for Mac](https://boltai.com) (AI Chat Client for Mac)
|
||||
- [Harbor](https://github.com/av/harbor) (Containerized LLM Toolkit with Ollama as default backend)
|
||||
- [PyGPT](https://github.com/szczyglis-dev/py-gpt) (AI desktop assistant for Linux, Windows, and Mac)
|
||||
- [Alpaca](https://github.com/Jeffser/Alpaca) (An Ollama client application for Linux and macOS made with GTK4 and Adwaita)
|
||||
- [AutoGPT](https://github.com/Significant-Gravitas/AutoGPT/blob/master/docs/content/platform/ollama.md) (AutoGPT Ollama integration)
|
||||
- [Go-CREW](https://www.jonathanhecl.com/go-crew/) (Powerful Offline RAG in Golang)
|
||||
- [PartCAD](https://github.com/openvmp/partcad/) (CAD model generation with OpenSCAD and CadQuery)
|
||||
- [Ollama4j Web UI](https://github.com/ollama4j/ollama4j-web-ui) - Java-based Web UI for Ollama built with Vaadin, Spring Boot, and Ollama4j
|
||||
- [PyOllaMx](https://github.com/kspviswa/pyOllaMx) - macOS application capable of chatting with both Ollama and Apple MLX models.
|
||||
- [Cline](https://github.com/cline/cline) - Formerly known as Claude Dev is a VSCode extension for multi-file/whole-repo coding
|
||||
- [Cherry Studio](https://github.com/kangfenmao/cherry-studio) (Desktop client with Ollama support)
|
||||
- [ConfiChat](https://github.com/1runeberg/confichat) (Lightweight, standalone, multi-platform, and privacy-focused LLM chat interface with optional encryption)
|
||||
- [Archyve](https://github.com/nickthecook/archyve) (RAG-enabling document library)
|
||||
- [crewAI with Mesop](https://github.com/rapidarchitect/ollama-crew-mesop) (Mesop Web Interface to run crewAI with Ollama)
|
||||
- [Tkinter-based client](https://github.com/chyok/ollama-gui) (Python tkinter-based Client for Ollama)
|
||||
- [LLMChat](https://github.com/trendy-design/llmchat) (Privacy focused, 100% local, intuitive all-in-one chat interface)
|
||||
- [Local Multimodal AI Chat](https://github.com/Leon-Sander/Local-Multimodal-AI-Chat) (Ollama-based LLM Chat with support for multiple features, including PDF RAG, voice chat, image-based interactions, and integration with OpenAI.)
|
||||
- [ARGO](https://github.com/xark-argo/argo) (Locally download and run Ollama and Huggingface models with RAG and deep research on Mac/Windows/Linux)
|
||||
- [OrionChat](https://github.com/EliasPereirah/OrionChat) - OrionChat is a web interface for chatting with different AI providers
|
||||
- [G1](https://github.com/bklieger-groq/g1) (Prototype of using prompting strategies to improve the LLM's reasoning through o1-like reasoning chains.)
|
||||
- [Web management](https://github.com/lemonit-eric-mao/ollama-web-management) (Web management page)
|
||||
- [Promptery](https://github.com/promptery/promptery) (desktop client for Ollama.)
|
||||
- [Ollama App](https://github.com/JHubi1/ollama-app) (Modern and easy-to-use multi-platform client for Ollama)
|
||||
- [chat-ollama](https://github.com/annilq/chat-ollama) (a React Native client for Ollama)
|
||||
- [SpaceLlama](https://github.com/tcsenpai/spacellama) (Firefox and Chrome extension to quickly summarize web pages with ollama in a sidebar)
|
||||
- [YouLama](https://github.com/tcsenpai/youlama) (Webapp to quickly summarize any YouTube video, supporting Invidious as well)
|
||||
- [DualMind](https://github.com/tcsenpai/dualmind) (Experimental app allowing two models to talk to each other in the terminal or in a web interface)
|
||||
- [ollamarama-matrix](https://github.com/h1ddenpr0cess20/ollamarama-matrix) (Ollama chatbot for the Matrix chat protocol)
|
||||
- [ollama-chat-app](https://github.com/anan1213095357/ollama-chat-app) (Flutter-based chat app)
|
||||
- [Perfect Memory AI](https://www.perfectmemory.ai/) (Productivity AI assists personalized by what you have seen on your screen, heard, and said in the meetings)
|
||||
- [Hexabot](https://github.com/hexastack/hexabot) (A conversational AI builder)
|
||||
- [Reddit Rate](https://github.com/rapidarchitect/reddit_analyzer) (Search and Rate Reddit topics with a weighted summation)
|
||||
- [OpenTalkGpt](https://github.com/adarshM84/OpenTalkGpt) (Chrome Extension to manage open-source models supported by Ollama, create custom models, and chat with models from a user-friendly UI)
|
||||
- [VT](https://github.com/vinhnx/vt.ai) (A minimal multimodal AI chat app, with dynamic conversation routing. Supports local models via Ollama)
|
||||
- [Nosia](https://github.com/nosia-ai/nosia) (Easy to install and use RAG platform based on Ollama)
|
||||
- [Witsy](https://github.com/nbonamy/witsy) (An AI Desktop application available for Mac/Windows/Linux)
|
||||
- [Abbey](https://github.com/US-Artificial-Intelligence/abbey) (A configurable AI interface server with notebooks, document storage, and YouTube support)
|
||||
- [Minima](https://github.com/dmayboroda/minima) (RAG with on-premises or fully local workflow)
|
||||
- [aidful-ollama-model-delete](https://github.com/AidfulAI/aidful-ollama-model-delete) (User interface for simplified model cleanup)
|
||||
- [Perplexica](https://github.com/ItzCrazyKns/Perplexica) (An AI-powered search engine & an open-source alternative to Perplexity AI)
|
||||
- [Ollama Chat WebUI for Docker ](https://github.com/oslook/ollama-webui) (Support for local docker deployment, lightweight ollama webui)
|
||||
- [AI Toolkit for Visual Studio Code](https://aka.ms/ai-tooklit/ollama-docs) (Microsoft-official VSCode extension to chat, test, evaluate models with Ollama support, and use them in your AI applications.)
|
||||
- [MinimalNextOllamaChat](https://github.com/anilkay/MinimalNextOllamaChat) (Minimal Web UI for Chat and Model Control)
|
||||
- [Chipper](https://github.com/TilmanGriesel/chipper) AI interface for tinkerers (Ollama, Haystack RAG, Python)
|
||||
- [ChibiChat](https://github.com/CosmicEventHorizon/ChibiChat) (Kotlin-based Android app to chat with Ollama and Koboldcpp API endpoints)
|
||||
- [LocalLLM](https://github.com/qusaismael/localllm) (Minimal Web-App to run ollama models on it with a GUI)
|
||||
- [Ollamazing](https://github.com/buiducnhat/ollamazing) (Web extension to run Ollama models)
|
||||
- [OpenDeepResearcher-via-searxng](https://github.com/benhaotang/OpenDeepResearcher-via-searxng) (A Deep Research equivalent endpoint with Ollama support for running locally)
|
||||
- [AntSK](https://github.com/AIDotNet/AntSK) (Out-of-the-box & Adaptable RAG Chatbot)
|
||||
- [MaxKB](https://github.com/1Panel-dev/MaxKB/) (Ready-to-use & flexible RAG Chatbot)
|
||||
- [yla](https://github.com/danielekp/yla) (Web interface to freely interact with your customized models)
|
||||
- [LangBot](https://github.com/RockChinQ/LangBot) (LLM-based instant messaging bots platform, with Agents, RAG features, supports multiple platforms)
|
||||
- [1Panel](https://github.com/1Panel-dev/1Panel/) (Web-based Linux Server Management Tool)
|
||||
- [AstrBot](https://github.com/Soulter/AstrBot/) (User-friendly LLM-based multi-platform chatbot with a WebUI, supporting RAG, LLM agents, and plugins integration)
|
||||
- [Reins](https://github.com/ibrahimcetin/reins) (Easily tweak parameters, customize system prompts per chat, and enhance your AI experiments with reasoning model support.)
|
||||
- [Flufy](https://github.com/Aharon-Bensadoun/Flufy) (A beautiful chat interface for interacting with Ollama's API. Built with React, TypeScript, and Material-UI.)
|
||||
- [Ellama](https://github.com/zeozeozeo/ellama) (Friendly native app to chat with an Ollama instance)
|
||||
- [screenpipe](https://github.com/mediar-ai/screenpipe) Build agents powered by your screen history
|
||||
- [Ollamb](https://github.com/hengkysteen/ollamb) (Simple yet rich in features, cross-platform built with Flutter and designed for Ollama. Try the [web demo](https://hengkysteen.github.io/demo/ollamb/).)
|
||||
- [Writeopia](https://github.com/Writeopia/Writeopia) (Text editor with integration with Ollama)
|
||||
- [AppFlowy](https://github.com/AppFlowy-IO/AppFlowy) (AI collaborative workspace with Ollama, cross-platform and self-hostable)
|
||||
- [Lumina](https://github.com/cushydigit/lumina.git) (A lightweight, minimal React.js frontend for interacting with Ollama servers)
|
||||
- [Tiny Notepad](https://pypi.org/project/tiny-notepad) (A lightweight, notepad-like interface to chat with ollama available on PyPI)
|
||||
- [macLlama (macOS native)](https://github.com/hellotunamayo/macLlama) (A native macOS GUI application for interacting with Ollama models, featuring a chat interface.)
|
||||
- [GPTranslate](https://github.com/philberndt/GPTranslate) (A fast and lightweight, AI powered desktop translation application written with Rust and Tauri. Features real-time translation with OpenAI/Azure/Ollama.)
|
||||
- [ollama launcher](https://github.com/NGC13009/ollama-launcher) (A launcher for Ollama, aiming to provide users with convenient functions such as ollama server launching, management, or configuration.)
|
||||
- [ai-hub](https://github.com/Aj-Seven/ai-hub) (AI Hub supports multiple models via API keys and Chat support via Ollama API.)
|
||||
- [Mayan EDMS](https://gitlab.com/mayan-edms/mayan-edms) (Open source document management system to organize, tag, search, and automate your files with powerful Ollama driven workflows.)
|
||||
- [Serene Pub](https://github.com/doolijb/serene-pub) (Beginner friendly, open source AI Roleplaying App for Windows, Mac OS and Linux. Search, download and use models with Ollama all inside the app.)
|
||||
- [Andes](https://github.com/aqerd/andes) (A Visual Studio Code extension that provides a local UI interface for Ollama models)
|
||||
- [Clueless](https://github.com/KashyapTan/clueless) (Open Source & Local Cluely: A desktop application LLM assistant to help you talk to anything on your screen using locally served Ollama models. Also undetectable to screenshare)
|
||||
- [ollama-co2](https://github.com/carbonatedWaterOrg/ollama-co2) (FastAPI web interface for monitoring and managing local and remote Ollama servers with real-time model monitoring and concurrent downloads)
|
||||
- [Hillnote](https://hillnote.com) (A Markdown-first workspace designed to supercharge your AI workflow. Create documents ready to integrate with Claude, ChatGPT, Gemini, Cursor, and more - all while keeping your work on your device.)
|
||||
### Chat Interfaces
|
||||
|
||||
### Cloud
|
||||
#### Web
|
||||
|
||||
- [Open WebUI](https://github.com/open-webui/open-webui) - Extensible, self-hosted AI interface
|
||||
- [Onyx](https://github.com/onyx-dot-app/onyx) - Connected AI workspace
|
||||
- [LibreChat](https://github.com/danny-avila/LibreChat) - Enhanced ChatGPT clone with multi-provider support
|
||||
- [Lobe Chat](https://github.com/lobehub/lobe-chat) - Modern chat framework with plugin ecosystem ([docs](https://lobehub.com/docs/self-hosting/examples/ollama))
|
||||
- [NextChat](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web) - Cross-platform ChatGPT UI ([docs](https://docs.nextchat.dev/models/ollama))
|
||||
- [Perplexica](https://github.com/ItzCrazyKns/Perplexica) - AI-powered search engine, open-source Perplexity alternative
|
||||
- [big-AGI](https://github.com/enricoros/big-AGI) - AI suite for professionals
|
||||
- [Lollms WebUI](https://github.com/ParisNeo/lollms-webui) - Multi-model web interface
|
||||
- [ChatOllama](https://github.com/sugarforever/chat-ollama) - Chatbot with knowledge bases
|
||||
- [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt) - On-premise AI platform
|
||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama) - ChatGPT-style web interface
|
||||
- [Hollama](https://github.com/fmaclen/hollama) - Minimal web interface
|
||||
- [Chatbox](https://github.com/Bin-Huang/Chatbox) - Desktop and web AI client
|
||||
- [chat](https://github.com/swuecho/chat) - Chat web app for teams
|
||||
- [Ollama RAG Chatbot](https://github.com/datvodinh/rag-chatbot.git) - Chat with multiple PDFs using RAG
|
||||
- [Tkinter-based client](https://github.com/chyok/ollama-gui) - Python desktop client
|
||||
|
||||
#### Desktop
|
||||
|
||||
- [Dify.AI](https://github.com/langgenius/dify) - LLM app development platform
|
||||
- [AnythingLLM](https://github.com/Mintplex-Labs/anything-llm) - All-in-one AI app for Mac, Windows, and Linux
|
||||
- [Maid](https://github.com/Mobile-Artificial-Intelligence/maid) - Cross-platform mobile and desktop client
|
||||
- [Witsy](https://github.com/nbonamy/witsy) - AI desktop app for Mac, Windows, and Linux
|
||||
- [Cherry Studio](https://github.com/kangfenmao/cherry-studio) - Multi-provider desktop client
|
||||
- [Ollama App](https://github.com/JHubi1/ollama-app) - Multi-platform client for desktop and mobile
|
||||
- [PyGPT](https://github.com/szczyglis-dev/py-gpt) - AI desktop assistant for Linux, Windows, and Mac
|
||||
- [Alpaca](https://github.com/Jeffser/Alpaca) - GTK4 client for Linux and macOS
|
||||
- [SwiftChat](https://github.com/aws-samples/swift-chat) - Cross-platform including iOS, Android, and Apple Vision Pro
|
||||
- [Enchanted](https://github.com/AugustDev/enchanted) - Native macOS and iOS client
|
||||
- [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) - Multi-model desktop runner
|
||||
- [Ollama Grid Search](https://github.com/dezoito/ollama-grid-search) - Evaluate and compare models
|
||||
- [macai](https://github.com/Renset/macai) - macOS client for Ollama and ChatGPT
|
||||
- [AI Studio](https://github.com/MindWorkAI/AI-Studio) - Multi-provider desktop IDE
|
||||
- [Reins](https://github.com/ibrahimcetin/reins) - Parameter tuning and reasoning model support
|
||||
- [ConfiChat](https://github.com/1runeberg/confichat) - Privacy-focused with optional encryption
|
||||
- [LLocal.in](https://github.com/kartikm7/llocal) - Electron desktop client
|
||||
- [MindMac](https://mindmac.app) - AI chat client for Mac
|
||||
- [Msty](https://msty.app) - Multi-model desktop client
|
||||
- [BoltAI for Mac](https://boltai.com) - AI chat client for Mac
|
||||
- [IntelliBar](https://intellibar.app/) - AI-powered assistant for macOS
|
||||
- [Kerlig AI](https://www.kerlig.com/) - AI writing assistant for macOS
|
||||
- [Hillnote](https://hillnote.com) - Markdown-first AI workspace
|
||||
- [Perfect Memory AI](https://www.perfectmemory.ai/) - Productivity AI personalized by screen and meeting history
|
||||
|
||||
#### Mobile
|
||||
|
||||
- [Ollama Android Chat](https://github.com/sunshine0523/OllamaServer) - One-click Ollama on Android
|
||||
|
||||
> SwiftChat, Enchanted, Maid, Ollama App, Reins, and ConfiChat listed above also support mobile platforms.
|
||||
|
||||
### Code Editors & Development
|
||||
|
||||
- [Cline](https://github.com/cline/cline) - VS Code extension for multi-file/whole-repo coding
|
||||
- [Continue](https://github.com/continuedev/continue) - Open-source AI code assistant for any IDE
|
||||
- [Void](https://github.com/voideditor/void) - Open source AI code editor, Cursor alternative
|
||||
- [Copilot for Obsidian](https://github.com/logancyang/obsidian-copilot) - AI assistant for Obsidian
|
||||
- [twinny](https://github.com/rjmacarthy/twinny) - Copilot and Copilot chat alternative
|
||||
- [gptel Emacs client](https://github.com/karthink/gptel) - LLM client for Emacs
|
||||
- [Ollama Copilot](https://github.com/bernardo-bruning/ollama-copilot) - Use Ollama as GitHub Copilot
|
||||
- [Obsidian Local GPT](https://github.com/pfrankov/obsidian-local-gpt) - Local AI for Obsidian
|
||||
- [Ellama Emacs client](https://github.com/s-kostyaev/ellama) - LLM tool for Emacs
|
||||
- [orbiton](https://github.com/xyproto/orbiton) - Config-free text editor with Ollama tab completion
|
||||
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) - Sublime Text 4 AI assistant
|
||||
- [VT Code](https://github.com/vinhnx/vtcode) - Rust-based terminal coding agent with Tree-sitter
|
||||
- [QodeAssist](https://github.com/Palm1r/QodeAssist) - AI coding assistant for Qt Creator
|
||||
- [AI Toolkit for VS Code](https://aka.ms/ai-tooklit/ollama-docs) - Microsoft-official VS Code extension
|
||||
- [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama) - Natural language interface for computers
|
||||
|
||||
### Libraries & SDKs
|
||||
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm) - Unified API for 100+ LLM providers
|
||||
- [Semantic Kernel](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai/ollama) - Microsoft AI orchestration SDK
|
||||
- [LangChain4j](https://github.com/langchain4j/langchain4j) - Java LangChain ([example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java))
|
||||
- [LangChainGo](https://github.com/tmc/langchaingo/) - Go LangChain ([example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example))
|
||||
- [Spring AI](https://github.com/spring-projects/spring-ai) - Spring framework AI support ([docs](https://docs.spring.io/spring-ai/reference/api/chat/ollama-chat.html))
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/chat/ollama/) and [LangChain.js](https://js.langchain.com/docs/integrations/chat/ollama/) with [example](https://js.langchain.com/docs/tutorials/local_rag/)
|
||||
- [Ollama for Ruby](https://github.com/crmne/ruby_llm) - Ruby LLM library
|
||||
- [any-llm](https://github.com/mozilla-ai/any-llm) - Unified LLM interface by Mozilla
|
||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp) - .NET SDK
|
||||
- [LangChainRust](https://github.com/Abraxas-365/langchain-rust) - Rust LangChain ([example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs))
|
||||
- [Agents-Flex for Java](https://github.com/agents-flex/agents-flex) - Java agent framework ([example](https://github.com/agents-flex/agents-flex/tree/main/agents-flex-llm/agents-flex-llm-ollama/src/test/java/com/agentsflex/llm/ollama))
|
||||
- [Elixir LangChain](https://github.com/brainlid/langchain) - Elixir LangChain
|
||||
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs) - Rust SDK
|
||||
- [LangChain for .NET](https://github.com/tryAGI/LangChain) - .NET LangChain ([example](https://github.com/tryAGI/LangChain/blob/main/examples/LangChain.Samples.OpenAI/Program.cs))
|
||||
- [chromem-go](https://github.com/philippgille/chromem-go) - Go vector database with Ollama embeddings ([example](https://github.com/philippgille/chromem-go/tree/v0.5.0/examples/rag-wikipedia-ollama))
|
||||
- [LangChainDart](https://github.com/davidmigloz/langchain_dart) - Dart LangChain
|
||||
- [LlmTornado](https://github.com/lofcz/llmtornado) - Unified C# interface for multiple inference APIs
|
||||
- [Ollama4j for Java](https://github.com/ollama4j/ollama4j) - Java SDK
|
||||
- [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel) - Laravel integration
|
||||
- [Ollama for Swift](https://github.com/mattt/ollama-swift) - Swift SDK
|
||||
- [LlamaIndex](https://docs.llamaindex.ai/en/stable/examples/llm/ollama/) and [LlamaIndexTS](https://ts.llamaindex.ai/modules/llms/available_llms/ollama) - Data framework for LLM apps
|
||||
- [Haystack](https://github.com/deepset-ai/haystack-integrations/blob/main/integrations/ollama.md) - AI pipeline framework
|
||||
- [Firebase Genkit](https://firebase.google.com/docs/genkit/plugins/ollama) - Google AI framework
|
||||
- [Ollama-hpp for C++](https://github.com/jmont-dev/ollama-hpp) - C++ SDK
|
||||
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) - Julia LLM toolkit ([example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama))
|
||||
- [Ollama for R - rollama](https://github.com/JBGruber/rollama) - R SDK
|
||||
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama) - AI gateway
|
||||
- [Testcontainers](https://testcontainers.com/modules/ollama/) - Container-based testing
|
||||
- [LLPhant](https://github.com/theodo-group/LLPhant?tab=readme-ov-file#ollama) - PHP AI framework
|
||||
|
||||
### Frameworks & Agents
|
||||
|
||||
- [AutoGPT](https://github.com/Significant-Gravitas/AutoGPT/blob/master/docs/content/platform/ollama.md) - Autonomous AI agent platform
|
||||
- [crewAI](https://github.com/crewAIInc/crewAI) - Multi-agent orchestration framework
|
||||
- [Strands Agents](https://github.com/strands-agents/sdk-python) - Model-driven agent building by AWS
|
||||
- [Cheshire Cat](https://github.com/cheshire-cat-ai/core) - AI assistant framework
|
||||
- [any-agent](https://github.com/mozilla-ai/any-agent) - Unified agent framework interface by Mozilla
|
||||
- [Stakpak](https://github.com/stakpak/agent) - Open source DevOps agent
|
||||
- [Hexabot](https://github.com/hexastack/hexabot) - Conversational AI builder
|
||||
- [Neuro SAN](https://github.com/cognizant-ai-lab/neuro-san-studio) - Multi-agent orchestration ([docs](https://github.com/cognizant-ai-lab/neuro-san-studio/blob/main/docs/user_guide.md#ollama))
|
||||
|
||||
### RAG & Knowledge Bases
|
||||
|
||||
- [RAGFlow](https://github.com/infiniflow/ragflow) - RAG engine based on deep document understanding
|
||||
- [R2R](https://github.com/SciPhi-AI/R2R) - Open-source RAG engine
|
||||
- [MaxKB](https://github.com/1Panel-dev/MaxKB/) - Ready-to-use RAG chatbot
|
||||
- [Minima](https://github.com/dmayboroda/minima) - On-premises or fully local RAG
|
||||
- [Chipper](https://github.com/TilmanGriesel/chipper) - AI interface with Haystack RAG
|
||||
- [ARGO](https://github.com/xark-argo/argo) - RAG and deep research on Mac/Windows/Linux
|
||||
- [Archyve](https://github.com/nickthecook/archyve) - RAG-enabling document library
|
||||
- [Casibase](https://casibase.org) - AI knowledge base with RAG and SSO
|
||||
- [BrainSoup](https://www.nurgo-software.com/products/brainsoup) - Native client with RAG and multi-agent automation
|
||||
|
||||
### Bots & Messaging
|
||||
|
||||
- [LangBot](https://github.com/RockChinQ/LangBot) - Multi-platform messaging bots with agents and RAG
|
||||
- [AstrBot](https://github.com/Soulter/AstrBot/) - Multi-platform chatbot with RAG and plugins
|
||||
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) - TypeScript Discord bot
|
||||
- [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram) - Telegram bot
|
||||
- [LLM Telegram Bot](https://github.com/innightwolfsleep/llm_telegram_bot) - Telegram bot for roleplay
|
||||
|
||||
### Terminal & CLI
|
||||
|
||||
- [aichat](https://github.com/sigoden/aichat) - All-in-one LLM CLI with Shell Assistant, RAG, and AI tools
|
||||
- [oterm](https://github.com/ggozad/oterm) - Terminal client for Ollama
|
||||
- [gollama](https://github.com/sammcj/gollama) - Go-based model manager for Ollama
|
||||
- [tlm](https://github.com/yusufcanb/tlm) - Local shell copilot
|
||||
- [tenere](https://github.com/pythops/tenere) - TUI for LLMs
|
||||
- [ParLlama](https://github.com/paulrobello/parllama) - TUI for Ollama
|
||||
- [llm-ollama](https://github.com/taketwo/llm-ollama) - Plugin for [Datasette's LLM CLI](https://llm.datasette.io/en/stable/)
|
||||
- [ShellOracle](https://github.com/djcopley/ShellOracle) - Shell command suggestions
|
||||
- [LLM-X](https://github.com/mrdjohnson/llm-x) - Progressive web app for LLMs
|
||||
- [cmdh](https://github.com/pgibler/cmdh) - Natural language to shell commands
|
||||
- [VT](https://github.com/vinhnx/vt.ai) - Minimal multimodal AI chat app
|
||||
|
||||
### Productivity & Apps
|
||||
|
||||
- [AppFlowy](https://github.com/AppFlowy-IO/AppFlowy) - AI collaborative workspace, self-hostable Notion alternative
|
||||
- [Screenpipe](https://github.com/mediar-ai/screenpipe) - 24/7 screen and mic recording with AI-powered search
|
||||
- [Vibe](https://github.com/thewh1teagle/vibe) - Transcribe and analyze meetings
|
||||
- [Page Assist](https://github.com/n4ze3m/page-assist) - Chrome extension for AI-powered browsing
|
||||
- [NativeMind](https://github.com/NativeMindBrowser/NativeMindExtension) - Private, on-device browser AI assistant
|
||||
- [Ollama Fortress](https://github.com/ParisNeo/ollama_proxy_server) - Security proxy for Ollama
|
||||
- [1Panel](https://github.com/1Panel-dev/1Panel/) - Web-based Linux server management
|
||||
- [Writeopia](https://github.com/Writeopia/Writeopia) - Text editor with Ollama integration
|
||||
- [QA-Pilot](https://github.com/reid41/QA-Pilot) - GitHub code repository understanding
|
||||
- [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama) - Ollama in Raycast
|
||||
- [Painting Droid](https://github.com/mateuszmigas/painting-droid) - Painting app with AI integrations
|
||||
- [Serene Pub](https://github.com/doolijb/serene-pub) - AI roleplaying app
|
||||
- [Mayan EDMS](https://gitlab.com/mayan-edms/mayan-edms) - Document management with Ollama workflows
|
||||
- [TagSpaces](https://www.tagspaces.org) - File management with [AI tagging](https://docs.tagspaces.org/ai/)
|
||||
|
||||
### Observability & Monitoring
|
||||
|
||||
- [Opik](https://www.comet.com/docs/opik/cookbook/ollama) - Debug, evaluate, and monitor LLM applications
|
||||
- [OpenLIT](https://github.com/openlit/openlit) - OpenTelemetry-native monitoring for Ollama and GPUs
|
||||
- [Lunary](https://lunary.ai/docs/integrations/ollama) - LLM observability with analytics and PII masking
|
||||
- [Langfuse](https://langfuse.com/docs/integrations/ollama) - Open source LLM observability
|
||||
- [HoneyHive](https://docs.honeyhive.ai/integrations/ollama) - AI observability and evaluation for agents
|
||||
- [MLflow Tracing](https://mlflow.org/docs/latest/llms/tracing/index.html#automatic-tracing) - Open source LLM observability
|
||||
|
||||
### Database & Embeddings
|
||||
|
||||
- [pgai](https://github.com/timescale/pgai) - PostgreSQL as a vector database ([guide](https://github.com/timescale/pgai/blob/main/docs/vectorizer-quick-start.md))
|
||||
- [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md) - Connect Ollama with 200+ data platforms
|
||||
- [chromem-go](https://github.com/philippgille/chromem-go/blob/v0.5.0/embed_ollama.go) - Embeddable vector database for Go ([example](https://github.com/philippgille/chromem-go/tree/v0.5.0/examples/rag-wikipedia-ollama))
|
||||
- [Kangaroo](https://github.com/dbkangaroo/kangaroo) - AI-powered SQL client
|
||||
|
||||
### Infrastructure & Deployment
|
||||
|
||||
#### Cloud
|
||||
|
||||
- [Google Cloud](https://cloud.google.com/run/docs/tutorials/gpu-gemma2-with-ollama)
|
||||
- [Fly.io](https://fly.io/docs/python/do-more/add-ollama/)
|
||||
- [Koyeb](https://www.koyeb.com/deploy/ollama)
|
||||
- [Harbor](https://github.com/av/harbor) - Containerized LLM toolkit with Ollama as default backend
|
||||
|
||||
### Tutorial
|
||||
|
||||
- [handy-ollama](https://github.com/datawhalechina/handy-ollama) (Chinese Tutorial for Ollama by [Datawhale ](https://github.com/datawhalechina) - China's Largest Open Source AI Learning Community)
|
||||
|
||||
### Terminal
|
||||
|
||||
- [oterm](https://github.com/ggozad/oterm)
|
||||
- [Ellama Emacs client](https://github.com/s-kostyaev/ellama)
|
||||
- [Emacs client](https://github.com/zweifisch/ollama)
|
||||
- [neollama](https://github.com/paradoxical-dev/neollama) UI client for interacting with models from within Neovim
|
||||
- [gen.nvim](https://github.com/David-Kunz/gen.nvim)
|
||||
- [ollama.nvim](https://github.com/nomnivore/ollama.nvim)
|
||||
- [ollero.nvim](https://github.com/marco-souza/ollero.nvim)
|
||||
- [ollama-chat.nvim](https://github.com/gerazov/ollama-chat.nvim)
|
||||
- [ogpt.nvim](https://github.com/huynle/ogpt.nvim)
|
||||
- [gptel Emacs client](https://github.com/karthink/gptel)
|
||||
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
||||
- [cmdh](https://github.com/pgibler/cmdh)
|
||||
- [ooo](https://github.com/npahlfer/ooo)
|
||||
- [shell-pilot](https://github.com/reid41/shell-pilot)(Interact with models via pure shell scripts on Linux or macOS)
|
||||
- [tenere](https://github.com/pythops/tenere)
|
||||
- [llm-ollama](https://github.com/taketwo/llm-ollama) for [Datasette's LLM CLI](https://llm.datasette.io/en/stable/).
|
||||
- [typechat-cli](https://github.com/anaisbetts/typechat-cli)
|
||||
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
||||
- [tlm](https://github.com/yusufcanb/tlm)
|
||||
- [podman-ollama](https://github.com/ericcurtin/podman-ollama)
|
||||
- [gollama](https://github.com/sammcj/gollama)
|
||||
- [ParLlama](https://github.com/paulrobello/parllama)
|
||||
- [Ollama eBook Summary](https://github.com/cognitivetech/ollama-ebook-summary/)
|
||||
- [Ollama Mixture of Experts (MOE) in 50 lines of code](https://github.com/rapidarchitect/ollama_moe)
|
||||
- [vim-intelligence-bridge](https://github.com/pepo-ec/vim-intelligence-bridge) Simple interaction of "Ollama" with the Vim editor
|
||||
- [x-cmd ollama](https://x-cmd.com/mod/ollama)
|
||||
- [bb7](https://github.com/drunkwcodes/bb7)
|
||||
- [SwollamaCLI](https://github.com/marcusziade/Swollama) bundled with the Swollama Swift package. [Demo](https://github.com/marcusziade/Swollama?tab=readme-ov-file#cli-usage)
|
||||
- [aichat](https://github.com/sigoden/aichat) All-in-one LLM CLI tool featuring Shell Assistant, Chat-REPL, RAG, AI tools & agents, with access to OpenAI, Claude, Gemini, Ollama, Groq, and more.
|
||||
- [PowershAI](https://github.com/rrg92/powershai) PowerShell module that brings AI to terminal on Windows, including support for Ollama
|
||||
- [DeepShell](https://github.com/Abyss-c0re/deepshell) Your self-hosted AI assistant. Interactive Shell, Files and Folders analysis.
|
||||
- [orbiton](https://github.com/xyproto/orbiton) Configuration-free text editor and IDE with support for tab completion with Ollama.
|
||||
- [orca-cli](https://github.com/molbal/orca-cli) Ollama Registry CLI Application - Browse, pull, and download models from Ollama Registry in your terminal.
|
||||
- [GGUF-to-Ollama](https://github.com/jonathanhecl/gguf-to-ollama) - Importing GGUF to Ollama made easy (multiplatform)
|
||||
- [AWS-Strands-With-Ollama](https://github.com/rapidarchitect/ollama_strands) - AWS Strands Agents with Ollama Examples
|
||||
- [ollama-multirun](https://github.com/attogram/ollama-multirun) - A bash shell script to run a single prompt against any or all of your locally installed ollama models, saving the output and performance statistics as easily navigable web pages. ([Demo](https://attogram.github.io/ai_test_zone/))
|
||||
- [ollama-bash-toolshed](https://github.com/attogram/ollama-bash-toolshed) - Bash scripts to chat with tool using models. Add new tools to your shed with ease. Runs on Ollama.
|
||||
- [hle-eval-ollama](https://github.com/mags0ft/hle-eval-ollama) - Runs benchmarks like "Humanity's Last Exam" (HLE) on your favorite local Ollama models and evaluates the quality of their responses
|
||||
- [VT Code](https://github.com/vinhnx/vtcode) - VT Code is a Rust-based terminal coding agent with semantic code intelligence via Tree-sitter. Ollama integration for running local/cloud models with configurable endpoints.
|
||||
|
||||
### Apple Vision Pro
|
||||
|
||||
- [SwiftChat](https://github.com/aws-samples/swift-chat) (Cross-platform AI chat app supporting Apple Vision Pro via "Designed for iPad")
|
||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||
|
||||
### Database
|
||||
|
||||
- [pgai](https://github.com/timescale/pgai) - PostgreSQL as a vector database (Create and search embeddings from Ollama models using pgvector)
|
||||
- [Get started guide](https://github.com/timescale/pgai/blob/main/docs/vectorizer-quick-start.md)
|
||||
- [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md) (Connects Ollama models with nearly 200 data platforms and apps)
|
||||
- [chromem-go](https://github.com/philippgille/chromem-go/blob/v0.5.0/embed_ollama.go) with [example](https://github.com/philippgille/chromem-go/tree/v0.5.0/examples/rag-wikipedia-ollama)
|
||||
- [Kangaroo](https://github.com/dbkangaroo/kangaroo) (AI-powered SQL client and admin tool for popular databases)
|
||||
|
||||
### Package managers
|
||||
#### Package Managers
|
||||
|
||||
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
||||
- [Gentoo](https://github.com/gentoo/guru/tree/master/app-misc/ollama)
|
||||
- [Homebrew](https://formulae.brew.sh/formula/ollama)
|
||||
- [Helm Chart](https://artifacthub.io/packages/helm/ollama-helm/ollama)
|
||||
- [Guix channel](https://codeberg.org/tusharhero/ollama-guix)
|
||||
- [Nix package](https://search.nixos.org/packages?show=ollama&from=0&size=50&sort=relevance&type=packages&query=ollama)
|
||||
- [Helm Chart](https://artifacthub.io/packages/helm/ollama-helm/ollama)
|
||||
- [Gentoo](https://github.com/gentoo/guru/tree/master/app-misc/ollama)
|
||||
- [Flox](https://flox.dev/blog/ollama-part-one)
|
||||
|
||||
### Libraries
|
||||
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/chat/ollama/) and [LangChain.js](https://js.langchain.com/docs/integrations/chat/ollama/) with [example](https://js.langchain.com/docs/tutorials/local_rag/)
|
||||
- [Firebase Genkit](https://firebase.google.com/docs/genkit/plugins/ollama)
|
||||
- [crewAI](https://github.com/crewAIInc/crewAI)
|
||||
- [Yacana](https://remembersoftwares.github.io/yacana/) (User-friendly multi-agent framework for brainstorming and executing predetermined flows with built-in tool integration)
|
||||
- [Strands Agents](https://github.com/strands-agents/sdk-python) (A model-driven approach to building AI agents in just a few lines of code)
|
||||
- [Spring AI](https://github.com/spring-projects/spring-ai) with [reference](https://docs.spring.io/spring-ai/reference/api/chat/ollama-chat.html) and [example](https://github.com/tzolov/ollama-tools)
|
||||
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
||||
- [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java)
|
||||
- [LangChainRust](https://github.com/Abraxas-365/langchain-rust) with [example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs)
|
||||
- [LangChain for .NET](https://github.com/tryAGI/LangChain) with [example](https://github.com/tryAGI/LangChain/blob/main/examples/LangChain.Samples.OpenAI/Program.cs)
|
||||
- [LLPhant](https://github.com/theodo-group/LLPhant?tab=readme-ov-file#ollama)
|
||||
- [LlamaIndex](https://docs.llamaindex.ai/en/stable/examples/llm/ollama/) and [LlamaIndexTS](https://ts.llamaindex.ai/modules/llms/available_llms/ollama)
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||
- [OllamaFarm for Go](https://github.com/presbrey/ollamafarm)
|
||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||
- [Ollama for Ruby](https://github.com/gbaptista/ollama-ai)
|
||||
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
||||
- [Ollama-hpp for C++](https://github.com/jmont-dev/ollama-hpp)
|
||||
- [Ollama4j for Java](https://github.com/ollama4j/ollama4j)
|
||||
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
||||
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
||||
- [Ollama for Dart](https://github.com/breitburg/dart-ollama)
|
||||
- [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel)
|
||||
- [LangChainDart](https://github.com/davidmigloz/langchain_dart)
|
||||
- [Semantic Kernel - Python](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai/ollama)
|
||||
- [Haystack](https://github.com/deepset-ai/haystack-integrations/blob/main/integrations/ollama.md)
|
||||
- [Elixir LangChain](https://github.com/brainlid/langchain)
|
||||
- [Ollama for R - rollama](https://github.com/JBGruber/rollama)
|
||||
- [Ollama for R - ollama-r](https://github.com/hauselin/ollama-r)
|
||||
- [Ollama-ex for Elixir](https://github.com/lebrunel/ollama-ex)
|
||||
- [Ollama Connector for SAP ABAP](https://github.com/b-tocs/abap_btocs_ollama)
|
||||
- [Testcontainers](https://testcontainers.com/modules/ollama/)
|
||||
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama)
|
||||
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama)
|
||||
- [LlamaScript](https://github.com/Project-Llama/llamascript)
|
||||
- [llm-axe](https://github.com/emirsahin1/llm-axe) (Python Toolkit for Building LLM Powered Apps)
|
||||
- [Gollm](https://docs.gollm.co/examples/ollama-example)
|
||||
- [Gollama for Golang](https://github.com/jonathanhecl/gollama)
|
||||
- [Ollamaclient for Golang](https://github.com/xyproto/ollamaclient)
|
||||
- [High-level function abstraction in Go](https://gitlab.com/tozd/go/fun)
|
||||
- [Ollama PHP](https://github.com/ArdaGnsrn/ollama-php)
|
||||
- [Agents-Flex for Java](https://github.com/agents-flex/agents-flex) with [example](https://github.com/agents-flex/agents-flex/tree/main/agents-flex-llm/agents-flex-llm-ollama/src/test/java/com/agentsflex/llm/ollama)
|
||||
- [Parakeet](https://github.com/parakeet-nest/parakeet) is a GoLang library, made to simplify the development of small generative AI applications with Ollama.
|
||||
- [Haverscript](https://github.com/andygill/haverscript) with [examples](https://github.com/andygill/haverscript/tree/main/examples)
|
||||
- [Ollama for Swift](https://github.com/mattt/ollama-swift)
|
||||
- [Swollama for Swift](https://github.com/marcusziade/Swollama) with [DocC](https://marcusziade.github.io/Swollama/documentation/swollama/)
|
||||
- [GoLamify](https://github.com/prasad89/golamify)
|
||||
- [Ollama for Haskell](https://github.com/tusharad/ollama-haskell)
|
||||
- [multi-llm-ts](https://github.com/nbonamy/multi-llm-ts) (A Typescript/JavaScript library allowing access to different LLM in a unified API)
|
||||
- [LlmTornado](https://github.com/lofcz/llmtornado) (C# library providing a unified interface for major FOSS & Commercial inference APIs)
|
||||
- [Ollama for Zig](https://github.com/dravenk/ollama-zig)
|
||||
- [Abso](https://github.com/lunary-ai/abso) (OpenAI-compatible TypeScript SDK for any LLM provider)
|
||||
- [Nichey](https://github.com/goodreasonai/nichey) is a Python package for generating custom wikis for your research topic
|
||||
- [Ollama for D](https://github.com/kassane/ollama-d)
|
||||
- [OllamaPlusPlus](https://github.com/HardCodeDev777/OllamaPlusPlus) (Very simple C++ library for Ollama)
|
||||
- [any-llm](https://github.com/mozilla-ai/any-llm) (A single interface to use different llm providers by [mozilla.ai](https://www.mozilla.ai/))
|
||||
- [any-agent](https://github.com/mozilla-ai/any-agent) (A single interface to use and evaluate different agent frameworks by [mozilla.ai](https://www.mozilla.ai/))
|
||||
- [Neuro SAN](https://github.com/cognizant-ai-lab/neuro-san-studio) (Data-driven multi-agent orchestration framework) with [example](https://github.com/cognizant-ai-lab/neuro-san-studio/blob/main/docs/user_guide.md#ollama)
|
||||
- [achatbot-go](https://github.com/ai-bot-pro/achatbot-go) a multimodal(text/audio/image) chatbot.
|
||||
- [Ollama Bash Lib](https://github.com/attogram/ollama-bash-lib) - A Bash Library for Ollama. Run LLM prompts straight from your shell, and more
|
||||
|
||||
### Mobile
|
||||
|
||||
- [SwiftChat](https://github.com/aws-samples/swift-chat) (Lightning-fast Cross-platform AI chat app with native UI for Android, iOS, and iPad)
|
||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||
- [Maid](https://github.com/Mobile-Artificial-Intelligence/maid)
|
||||
- [Ollama App](https://github.com/JHubi1/ollama-app) (Modern and easy-to-use multi-platform client for Ollama)
|
||||
- [ConfiChat](https://github.com/1runeberg/confichat) (Lightweight, standalone, multi-platform, and privacy-focused LLM chat interface with optional encryption)
|
||||
- [Ollama Android Chat](https://github.com/sunshine0523/OllamaServer) (No need for Termux, start the Ollama service with one click on an Android device)
|
||||
- [Reins](https://github.com/ibrahimcetin/reins) (Easily tweak parameters, customize system prompts per chat, and enhance your AI experiments with reasoning model support.)
|
||||
|
||||
### Extensions & Plugins
|
||||
|
||||
- [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama)
|
||||
- [Discollama](https://github.com/mxyng/discollama) (Discord bot inside the Ollama discord channel)
|
||||
- [Continue](https://github.com/continuedev/continue)
|
||||
- [Vibe](https://github.com/thewh1teagle/vibe) (Transcribe and analyze meetings with Ollama)
|
||||
- [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama)
|
||||
- [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq)
|
||||
- [NotesOllama](https://github.com/andersrex/notesollama) (Apple Notes Ollama plugin)
|
||||
- [Dagger Chatbot](https://github.com/samalba/dagger-chatbot)
|
||||
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot)
|
||||
- [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram)
|
||||
- [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation)
|
||||
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
||||
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
||||
- [Cliobot](https://github.com/herval/cliobot) (Telegram bot with Ollama support)
|
||||
- [Copilot for Obsidian plugin](https://github.com/logancyang/obsidian-copilot)
|
||||
- [Obsidian Local GPT plugin](https://github.com/pfrankov/obsidian-local-gpt)
|
||||
- [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama)
|
||||
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
||||
- [Ollama Copilot](https://github.com/bernardo-bruning/ollama-copilot) (Proxy that allows you to use Ollama as a copilot like GitHub Copilot)
|
||||
- [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama)
|
||||
- [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and Hugging Face)
|
||||
- [Page Assist](https://github.com/n4ze3m/page-assist) (Chrome Extension)
|
||||
- [Plasmoid Ollama Control](https://github.com/imoize/plasmoid-ollamacontrol) (KDE Plasma extension that allows you to quickly manage/control Ollama model)
|
||||
- [AI Telegram Bot](https://github.com/tusharhero/aitelegrambot) (Telegram bot using Ollama in backend)
|
||||
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
|
||||
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
|
||||
- [ChatGPTBox: All in one browser extension](https://github.com/josStorer/chatGPTBox) with [Integrating Tutorial](https://github.com/josStorer/chatGPTBox/issues/616#issuecomment-1975186467)
|
||||
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
|
||||
- [Headless Ollama](https://github.com/nischalj10/headless-ollama) (Scripts to automatically install ollama client & models on any OS for apps that depend on ollama server)
|
||||
- [Terraform AWS Ollama & Open WebUI](https://github.com/xuyangbocn/terraform-aws-self-host-llm) (A Terraform module to deploy on AWS a ready-to-use Ollama service, together with its front-end Open WebUI service.)
|
||||
- [node-red-contrib-ollama](https://github.com/jakubburkiewicz/node-red-contrib-ollama)
|
||||
- [Local AI Helper](https://github.com/ivostoykov/localAI) (Chrome and Firefox extensions that enable interactions with the active tab and customisable API endpoints. Includes secure storage for user prompts.)
|
||||
- [LSP-AI](https://github.com/SilasMarvin/lsp-ai) (Open-source language server for AI-powered functionality)
|
||||
- [QodeAssist](https://github.com/Palm1r/QodeAssist) (AI-powered coding assistant plugin for Qt Creator)
|
||||
- [Obsidian Quiz Generator plugin](https://github.com/ECuiDev/obsidian-quiz-generator)
|
||||
- [AI Summmary Helper plugin](https://github.com/philffm/ai-summary-helper)
|
||||
- [TextCraft](https://github.com/suncloudsmoon/TextCraft) (Copilot in Word alternative using Ollama)
|
||||
- [Alfred Ollama](https://github.com/zeitlings/alfred-ollama) (Alfred Workflow)
|
||||
- [TextLLaMA](https://github.com/adarshM84/TextLLaMA) A Chrome Extension that helps you write emails, correct grammar, and translate into any language
|
||||
- [Simple-Discord-AI](https://github.com/zyphixor/simple-discord-ai)
|
||||
- [LLM Telegram Bot](https://github.com/innightwolfsleep/llm_telegram_bot) (telegram bot, primary for RP. Oobabooga-like buttons, [A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) API integration e.t.c)
|
||||
- [mcp-llm](https://github.com/sammcj/mcp-llm) (MCP Server to allow LLMs to call other LLMs)
|
||||
- [SimpleOllamaUnity](https://github.com/HardCodeDev777/SimpleOllamaUnity) (Unity Engine extension for communicating with Ollama in a few lines of code. Also works at runtime)
|
||||
- [UnityCodeLama](https://github.com/HardCodeDev777/UnityCodeLama) (Unity Edtior tool to analyze scripts via Ollama)
|
||||
- [NativeMind](https://github.com/NativeMindBrowser/NativeMindExtension) (Private, on-device AI Assistant, no cloud dependencies)
|
||||
- [GMAI - Gradle Managed AI](https://gmai.premex.se/) (Gradle plugin for automated Ollama lifecycle management during build phases)
|
||||
- [NOMYO Router](https://github.com/nomyo-ai/nomyo-router) (A transparent Ollama proxy with model deployment aware routing which auto-manages multiple Ollama instances in a given network)
|
||||
|
||||
### Supported backends
|
||||
|
||||
- [llama.cpp](https://github.com/ggml-org/llama.cpp) project founded by Georgi Gerganov.
|
||||
|
||||
### Observability
|
||||
- [Opik](https://www.comet.com/docs/opik/cookbook/ollama) is an open-source platform to debug, evaluate, and monitor your LLM applications, RAG systems, and agentic workflows with comprehensive tracing, automated evaluations, and production-ready dashboards. Opik supports native intergration to Ollama.
|
||||
- [Lunary](https://lunary.ai/docs/integrations/ollama) is the leading open-source LLM observability platform. It provides a variety of enterprise-grade features such as real-time analytics, prompt templates management, PII masking, and comprehensive agent tracing.
|
||||
- [OpenLIT](https://github.com/openlit/openlit) is an OpenTelemetry-native tool for monitoring Ollama Applications & GPUs using traces and metrics.
|
||||
- [HoneyHive](https://docs.honeyhive.ai/integrations/ollama) is an AI observability and evaluation platform for AI agents. Use HoneyHive to evaluate agent performance, interrogate failures, and monitor quality in production.
|
||||
- [Langfuse](https://langfuse.com/docs/integrations/ollama) is an open source LLM observability platform that enables teams to collaboratively monitor, evaluate and debug AI applications.
|
||||
- [MLflow Tracing](https://mlflow.org/docs/latest/llms/tracing/index.html#automatic-tracing) is an open source LLM observability tool with a convenient API to log and visualize traces, making it easy to debug and evaluate GenAI applications.
|
||||
- [Guix channel](https://codeberg.org/tusharhero/ollama-guix)
|
||||
|
||||
@@ -14,7 +14,7 @@ Please include the following details in your report:
|
||||
|
||||
## Security best practices
|
||||
|
||||
While the maintainer team does their best to secure Ollama, users are encouraged to implement their own security best practices, such as:
|
||||
While the maintainer team does its best to secure Ollama, users are encouraged to implement their own security best practices, such as:
|
||||
|
||||
- Regularly updating to the latest version of Ollama
|
||||
- Securing access to hosted instances of Ollama
|
||||
|
||||
1189
anthropic/anthropic.go
Executable file
1672
anthropic/anthropic_test.go
Executable file
352
anthropic/trace.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package anthropic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
// Trace truncation limits.
|
||||
const (
|
||||
TraceMaxStringRunes = 240
|
||||
TraceMaxSliceItems = 8
|
||||
TraceMaxMapEntries = 16
|
||||
TraceMaxDepth = 4
|
||||
)
|
||||
|
||||
// TraceTruncateString shortens s to TraceMaxStringRunes, appending a count of
|
||||
// omitted characters when truncated.
|
||||
func TraceTruncateString(s string) string {
|
||||
if len(s) == 0 {
|
||||
return s
|
||||
}
|
||||
runes := []rune(s)
|
||||
if len(runes) <= TraceMaxStringRunes {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("%s...(+%d chars)", string(runes[:TraceMaxStringRunes]), len(runes)-TraceMaxStringRunes)
|
||||
}
|
||||
|
||||
// TraceJSON round-trips v through JSON and returns a compacted representation.
|
||||
func TraceJSON(v any) any {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return map[string]any{"marshal_error": err.Error(), "type": fmt.Sprintf("%T", v)}
|
||||
}
|
||||
var out any
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
return TraceTruncateString(string(data))
|
||||
}
|
||||
return TraceCompactValue(out, 0)
|
||||
}
|
||||
|
||||
// TraceCompactValue recursively truncates strings, slices, and maps for trace
|
||||
// output. depth tracks recursion to enforce TraceMaxDepth.
|
||||
func TraceCompactValue(v any, depth int) any {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
if depth >= TraceMaxDepth {
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
return TraceTruncateString(t)
|
||||
case []any:
|
||||
return fmt.Sprintf("<array len=%d>", len(t))
|
||||
case map[string]any:
|
||||
return fmt.Sprintf("<object keys=%d>", len(t))
|
||||
default:
|
||||
return fmt.Sprintf("<%T>", v)
|
||||
}
|
||||
}
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
return TraceTruncateString(t)
|
||||
case []any:
|
||||
limit := min(len(t), TraceMaxSliceItems)
|
||||
out := make([]any, 0, limit+1)
|
||||
for i := range limit {
|
||||
out = append(out, TraceCompactValue(t[i], depth+1))
|
||||
}
|
||||
if len(t) > limit {
|
||||
out = append(out, fmt.Sprintf("... +%d more items", len(t)-limit))
|
||||
}
|
||||
return out
|
||||
case map[string]any:
|
||||
keys := make([]string, 0, len(t))
|
||||
for k := range t {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
limit := min(len(keys), TraceMaxMapEntries)
|
||||
out := make(map[string]any, limit+1)
|
||||
for i := range limit {
|
||||
out[keys[i]] = TraceCompactValue(t[keys[i]], depth+1)
|
||||
}
|
||||
if len(keys) > limit {
|
||||
out["__truncated_keys"] = len(keys) - limit
|
||||
}
|
||||
return out
|
||||
default:
|
||||
return t
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Anthropic request/response tracing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// TraceMessagesRequest returns a compact trace representation of a MessagesRequest.
|
||||
func TraceMessagesRequest(r MessagesRequest) map[string]any {
|
||||
return map[string]any{
|
||||
"model": r.Model,
|
||||
"max_tokens": r.MaxTokens,
|
||||
"messages": traceMessageParams(r.Messages),
|
||||
"system": traceAnthropicContent(r.System),
|
||||
"stream": r.Stream,
|
||||
"tools": traceTools(r.Tools),
|
||||
"tool_choice": TraceJSON(r.ToolChoice),
|
||||
"thinking": TraceJSON(r.Thinking),
|
||||
"stop_sequences": r.StopSequences,
|
||||
"temperature": ptrVal(r.Temperature),
|
||||
"top_p": ptrVal(r.TopP),
|
||||
"top_k": ptrVal(r.TopK),
|
||||
}
|
||||
}
|
||||
|
||||
// TraceMessagesResponse returns a compact trace representation of a MessagesResponse.
|
||||
func TraceMessagesResponse(r MessagesResponse) map[string]any {
|
||||
return map[string]any{
|
||||
"id": r.ID,
|
||||
"model": r.Model,
|
||||
"content": TraceJSON(r.Content),
|
||||
"stop_reason": r.StopReason,
|
||||
"usage": r.Usage,
|
||||
}
|
||||
}
|
||||
|
||||
func traceMessageParams(msgs []MessageParam) []map[string]any {
|
||||
out := make([]map[string]any, 0, len(msgs))
|
||||
for _, m := range msgs {
|
||||
out = append(out, map[string]any{
|
||||
"role": m.Role,
|
||||
"content": traceAnthropicContent(m.Content),
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func traceAnthropicContent(content any) any {
|
||||
switch c := content.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case string:
|
||||
return TraceTruncateString(c)
|
||||
case []any:
|
||||
blocks := make([]any, 0, len(c))
|
||||
for _, block := range c {
|
||||
blockMap, ok := block.(map[string]any)
|
||||
if !ok {
|
||||
blocks = append(blocks, TraceCompactValue(block, 0))
|
||||
continue
|
||||
}
|
||||
blocks = append(blocks, traceAnthropicBlock(blockMap))
|
||||
}
|
||||
return blocks
|
||||
default:
|
||||
return TraceJSON(c)
|
||||
}
|
||||
}
|
||||
|
||||
func traceAnthropicBlock(block map[string]any) map[string]any {
|
||||
blockType, _ := block["type"].(string)
|
||||
out := map[string]any{"type": blockType}
|
||||
switch blockType {
|
||||
case "text":
|
||||
if text, ok := block["text"].(string); ok {
|
||||
out["text"] = TraceTruncateString(text)
|
||||
} else {
|
||||
out["text"] = TraceCompactValue(block["text"], 0)
|
||||
}
|
||||
case "thinking":
|
||||
if thinking, ok := block["thinking"].(string); ok {
|
||||
out["thinking"] = TraceTruncateString(thinking)
|
||||
} else {
|
||||
out["thinking"] = TraceCompactValue(block["thinking"], 0)
|
||||
}
|
||||
case "tool_use", "server_tool_use":
|
||||
out["id"] = block["id"]
|
||||
out["name"] = block["name"]
|
||||
out["input"] = TraceCompactValue(block["input"], 0)
|
||||
case "tool_result", "web_search_tool_result":
|
||||
out["tool_use_id"] = block["tool_use_id"]
|
||||
out["content"] = TraceCompactValue(block["content"], 0)
|
||||
case "image":
|
||||
if source, ok := block["source"].(map[string]any); ok {
|
||||
out["source"] = map[string]any{
|
||||
"type": source["type"],
|
||||
"media_type": source["media_type"],
|
||||
"url": source["url"],
|
||||
"data_len": len(fmt.Sprint(source["data"])),
|
||||
}
|
||||
}
|
||||
default:
|
||||
out["block"] = TraceCompactValue(block, 0)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func traceTools(tools []Tool) []map[string]any {
|
||||
out := make([]map[string]any, 0, len(tools))
|
||||
for _, t := range tools {
|
||||
out = append(out, TraceTool(t))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// TraceTool returns a compact trace representation of an Anthropic Tool.
|
||||
func TraceTool(t Tool) map[string]any {
|
||||
return map[string]any{
|
||||
"type": t.Type,
|
||||
"name": t.Name,
|
||||
"description": TraceTruncateString(t.Description),
|
||||
"input_schema": TraceJSON(t.InputSchema),
|
||||
"max_uses": t.MaxUses,
|
||||
}
|
||||
}
|
||||
|
||||
// ContentBlockTypes returns the type strings from content (when it's []any blocks).
|
||||
func ContentBlockTypes(content any) []string {
|
||||
blocks, ok := content.([]any)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
types := make([]string, 0, len(blocks))
|
||||
for _, block := range blocks {
|
||||
blockMap, ok := block.(map[string]any)
|
||||
if !ok {
|
||||
types = append(types, fmt.Sprintf("%T", block))
|
||||
continue
|
||||
}
|
||||
t, _ := blockMap["type"].(string)
|
||||
types = append(types, t)
|
||||
}
|
||||
return types
|
||||
}
|
||||
|
||||
func ptrVal[T any](v *T) any {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
return *v
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Ollama api.* tracing (shared between anthropic and middleware packages)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// TraceChatRequest returns a compact trace representation of an Ollama ChatRequest.
|
||||
func TraceChatRequest(req *api.ChatRequest) map[string]any {
|
||||
if req == nil {
|
||||
return nil
|
||||
}
|
||||
stream := false
|
||||
if req.Stream != nil {
|
||||
stream = *req.Stream
|
||||
}
|
||||
return map[string]any{
|
||||
"model": req.Model,
|
||||
"messages": TraceAPIMessages(req.Messages),
|
||||
"tools": TraceAPITools(req.Tools),
|
||||
"stream": stream,
|
||||
"options": req.Options,
|
||||
"think": TraceJSON(req.Think),
|
||||
}
|
||||
}
|
||||
|
||||
// TraceChatResponse returns a compact trace representation of an Ollama ChatResponse.
|
||||
func TraceChatResponse(resp api.ChatResponse) map[string]any {
|
||||
return map[string]any{
|
||||
"model": resp.Model,
|
||||
"done": resp.Done,
|
||||
"done_reason": resp.DoneReason,
|
||||
"message": TraceAPIMessage(resp.Message),
|
||||
"metrics": TraceJSON(resp.Metrics),
|
||||
}
|
||||
}
|
||||
|
||||
// TraceAPIMessages returns compact trace representations for a slice of api.Message.
|
||||
func TraceAPIMessages(msgs []api.Message) []map[string]any {
|
||||
out := make([]map[string]any, 0, len(msgs))
|
||||
for _, m := range msgs {
|
||||
out = append(out, TraceAPIMessage(m))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// TraceAPIMessage returns a compact trace representation of a single api.Message.
|
||||
func TraceAPIMessage(m api.Message) map[string]any {
|
||||
return map[string]any{
|
||||
"role": m.Role,
|
||||
"content": TraceTruncateString(m.Content),
|
||||
"thinking": TraceTruncateString(m.Thinking),
|
||||
"images": traceImageSizes(m.Images),
|
||||
"tool_calls": traceToolCalls(m.ToolCalls),
|
||||
"tool_name": m.ToolName,
|
||||
"tool_call_id": m.ToolCallID,
|
||||
}
|
||||
}
|
||||
|
||||
func traceImageSizes(images []api.ImageData) []int {
|
||||
if len(images) == 0 {
|
||||
return nil
|
||||
}
|
||||
sizes := make([]int, 0, len(images))
|
||||
for _, img := range images {
|
||||
sizes = append(sizes, len(img))
|
||||
}
|
||||
return sizes
|
||||
}
|
||||
|
||||
// TraceAPITools returns compact trace representations for a slice of api.Tool.
|
||||
func TraceAPITools(tools api.Tools) []map[string]any {
|
||||
out := make([]map[string]any, 0, len(tools))
|
||||
for _, t := range tools {
|
||||
out = append(out, TraceAPITool(t))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// TraceAPITool returns a compact trace representation of a single api.Tool.
|
||||
func TraceAPITool(t api.Tool) map[string]any {
|
||||
return map[string]any{
|
||||
"type": t.Type,
|
||||
"name": t.Function.Name,
|
||||
"description": TraceTruncateString(t.Function.Description),
|
||||
"parameters": TraceJSON(t.Function.Parameters),
|
||||
}
|
||||
}
|
||||
|
||||
// TraceToolCall returns a compact trace representation of an api.ToolCall.
|
||||
func TraceToolCall(tc api.ToolCall) map[string]any {
|
||||
return map[string]any{
|
||||
"id": tc.ID,
|
||||
"name": tc.Function.Name,
|
||||
"args": TraceJSON(tc.Function.Arguments),
|
||||
}
|
||||
}
|
||||
|
||||
func traceToolCalls(tcs []api.ToolCall) []map[string]any {
|
||||
if len(tcs) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]map[string]any, 0, len(tcs))
|
||||
for _, tc := range tcs {
|
||||
out = append(out, TraceToolCall(tc))
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxBufferSize = 512 * format.KiloByte
|
||||
const maxBufferSize = 8 * format.MegaByte
|
||||
|
||||
func (c *Client) stream(ctx context.Context, method, path string, data any, fn func([]byte) error) error {
|
||||
var buf io.Reader
|
||||
@@ -226,7 +226,14 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
|
||||
|
||||
bts := scanner.Bytes()
|
||||
if err := json.Unmarshal(bts, &errorResponse); err != nil {
|
||||
return fmt.Errorf("unmarshal: %w", err)
|
||||
if response.StatusCode >= http.StatusBadRequest {
|
||||
return StatusError{
|
||||
StatusCode: response.StatusCode,
|
||||
Status: response.Status,
|
||||
ErrorMessage: string(bts),
|
||||
}
|
||||
}
|
||||
return errors.New(string(bts))
|
||||
}
|
||||
|
||||
if response.StatusCode == http.StatusUnauthorized {
|
||||
@@ -340,7 +347,7 @@ type CreateProgressFunc func(ProgressResponse) error
|
||||
// Create creates a model from a [Modelfile]. fn is a progress function that
|
||||
// behaves similarly to other methods (see [Client.Pull]).
|
||||
//
|
||||
// [Modelfile]: https://github.com/ollama/ollama/blob/main/docs/modelfile.md
|
||||
// [Modelfile]: https://github.com/ollama/ollama/blob/main/docs/modelfile.mdx
|
||||
func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error {
|
||||
return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error {
|
||||
var resp ProgressResponse
|
||||
@@ -442,6 +449,16 @@ func (c *Client) Version(ctx context.Context) (string, error) {
|
||||
return version.Version, nil
|
||||
}
|
||||
|
||||
// CloudStatusExperimental returns whether cloud features are disabled on the server.
|
||||
func (c *Client) CloudStatusExperimental(ctx context.Context) (*StatusResponse, error) {
|
||||
var status StatusResponse
|
||||
if err := c.do(ctx, http.MethodGet, "/api/status", nil, &status); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
// Signout will signout a client for a local ollama server.
|
||||
func (c *Client) Signout(ctx context.Context) error {
|
||||
return c.do(ctx, http.MethodPost, "/api/signout", nil, nil)
|
||||
|
||||
@@ -55,6 +55,7 @@ func TestClientFromEnvironment(t *testing.T) {
|
||||
type testError struct {
|
||||
message string
|
||||
statusCode int
|
||||
raw bool // if true, write message as-is instead of JSON encoding
|
||||
}
|
||||
|
||||
func (e testError) Error() string {
|
||||
@@ -111,6 +112,20 @@ func TestClientStream(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "plain text error response",
|
||||
responses: []any{
|
||||
"internal server error",
|
||||
},
|
||||
wantErr: "internal server error",
|
||||
},
|
||||
{
|
||||
name: "HTML error page",
|
||||
responses: []any{
|
||||
"<html><body>404 Not Found</body></html>",
|
||||
},
|
||||
wantErr: "404 Not Found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -135,6 +150,12 @@ func TestClientStream(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
if str, ok := resp.(string); ok {
|
||||
fmt.Fprintln(w, str)
|
||||
flusher.Flush()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
t.Fatalf("failed to encode response: %v", err)
|
||||
}
|
||||
@@ -173,9 +194,10 @@ func TestClientStream(t *testing.T) {
|
||||
|
||||
func TestClientDo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
response any
|
||||
wantErr string
|
||||
name string
|
||||
response any
|
||||
wantErr string
|
||||
wantStatusCode int
|
||||
}{
|
||||
{
|
||||
name: "immediate error response",
|
||||
@@ -183,7 +205,8 @@ func TestClientDo(t *testing.T) {
|
||||
message: "test error message",
|
||||
statusCode: http.StatusBadRequest,
|
||||
},
|
||||
wantErr: "test error message",
|
||||
wantErr: "test error message",
|
||||
wantStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "server error response",
|
||||
@@ -191,7 +214,8 @@ func TestClientDo(t *testing.T) {
|
||||
message: "internal error",
|
||||
statusCode: http.StatusInternalServerError,
|
||||
},
|
||||
wantErr: "internal error",
|
||||
wantErr: "internal error",
|
||||
wantStatusCode: http.StatusInternalServerError,
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
@@ -203,6 +227,26 @@ func TestClientDo(t *testing.T) {
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "plain text error response",
|
||||
response: testError{
|
||||
message: "internal server error",
|
||||
statusCode: http.StatusInternalServerError,
|
||||
raw: true,
|
||||
},
|
||||
wantErr: "internal server error",
|
||||
wantStatusCode: http.StatusInternalServerError,
|
||||
},
|
||||
{
|
||||
name: "HTML error page",
|
||||
response: testError{
|
||||
message: "<html><body>404 Not Found</body></html>",
|
||||
statusCode: http.StatusNotFound,
|
||||
raw: true,
|
||||
},
|
||||
wantErr: "<html><body>404 Not Found</body></html>",
|
||||
wantStatusCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -210,11 +254,16 @@ func TestClientDo(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if errResp, ok := tc.response.(testError); ok {
|
||||
w.WriteHeader(errResp.statusCode)
|
||||
err := json.NewEncoder(w).Encode(map[string]string{
|
||||
"error": errResp.message,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal("failed to encode error response:", err)
|
||||
if !errResp.raw {
|
||||
err := json.NewEncoder(w).Encode(map[string]string{
|
||||
"error": errResp.message,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal("failed to encode error response:", err)
|
||||
}
|
||||
} else {
|
||||
// Write raw message (simulates non-JSON error responses)
|
||||
fmt.Fprint(w, errResp.message)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -241,6 +290,15 @@ func TestClientDo(t *testing.T) {
|
||||
if err.Error() != tc.wantErr {
|
||||
t.Errorf("error message mismatch: got %q, want %q", err.Error(), tc.wantErr)
|
||||
}
|
||||
if tc.wantStatusCode != 0 {
|
||||
if statusErr, ok := err.(StatusError); ok {
|
||||
if statusErr.StatusCode != tc.wantStatusCode {
|
||||
t.Errorf("status code mismatch: got %d, want %d", statusErr.StatusCode, tc.wantStatusCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("expected StatusError, got %T", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -15,19 +15,19 @@ func main() {
|
||||
}
|
||||
|
||||
messages := []api.Message{
|
||||
api.Message{
|
||||
{
|
||||
Role: "system",
|
||||
Content: "Provide very brief, concise responses",
|
||||
},
|
||||
api.Message{
|
||||
{
|
||||
Role: "user",
|
||||
Content: "Name some unusual animals",
|
||||
},
|
||||
api.Message{
|
||||
{
|
||||
Role: "assistant",
|
||||
Content: "Monotreme, platypus, echidna",
|
||||
},
|
||||
api.Message{
|
||||
{
|
||||
Role: "user",
|
||||
Content: "which of these is the most dangerous?",
|
||||
},
|
||||
|
||||
248
api/types.go
@@ -3,6 +3,7 @@ package api
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"iter"
|
||||
"log/slog"
|
||||
"math"
|
||||
"os"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/internal/orderedmap"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
)
|
||||
|
||||
@@ -117,6 +119,28 @@ type GenerateRequest struct {
|
||||
// DebugRenderOnly is a debug option that, when set to true, returns the rendered
|
||||
// template instead of calling the model.
|
||||
DebugRenderOnly bool `json:"_debug_render_only,omitempty"`
|
||||
|
||||
// Logprobs specifies whether to return log probabilities of the output tokens.
|
||||
Logprobs bool `json:"logprobs,omitempty"`
|
||||
|
||||
// TopLogprobs is the number of most likely tokens to return at each token position,
|
||||
// each with an associated log probability. Only applies when Logprobs is true.
|
||||
// Valid values are 0-20. Default is 0 (only return the selected token's logprob).
|
||||
TopLogprobs int `json:"top_logprobs,omitempty"`
|
||||
|
||||
// Experimental: Image generation fields (may change or be removed)
|
||||
|
||||
// Width is the width of the generated image in pixels.
|
||||
// Only used for image generation models.
|
||||
Width int32 `json:"width,omitempty"`
|
||||
|
||||
// Height is the height of the generated image in pixels.
|
||||
// Only used for image generation models.
|
||||
Height int32 `json:"height,omitempty"`
|
||||
|
||||
// Steps is the number of diffusion steps for image generation.
|
||||
// Only used for image generation models.
|
||||
Steps int32 `json:"steps,omitempty"`
|
||||
}
|
||||
|
||||
// ChatRequest describes a request sent by [Client.Chat].
|
||||
@@ -159,6 +183,14 @@ type ChatRequest struct {
|
||||
// DebugRenderOnly is a debug option that, when set to true, returns the rendered
|
||||
// template instead of calling the model.
|
||||
DebugRenderOnly bool `json:"_debug_render_only,omitempty"`
|
||||
|
||||
// Logprobs specifies whether to return log probabilities of the output tokens.
|
||||
Logprobs bool `json:"logprobs,omitempty"`
|
||||
|
||||
// TopLogprobs is the number of most likely tokens to return at each token position,
|
||||
// each with an associated log probability. Only applies when Logprobs is true.
|
||||
// Valid values are 0-20. Default is 0 (only return the selected token's logprob).
|
||||
TopLogprobs int `json:"top_logprobs,omitempty"`
|
||||
}
|
||||
|
||||
type Tools []Tool
|
||||
@@ -211,13 +243,79 @@ type ToolCallFunction struct {
|
||||
Arguments ToolCallFunctionArguments `json:"arguments"`
|
||||
}
|
||||
|
||||
type ToolCallFunctionArguments map[string]any
|
||||
// ToolCallFunctionArguments holds tool call arguments in insertion order.
|
||||
type ToolCallFunctionArguments struct {
|
||||
om *orderedmap.Map[string, any]
|
||||
}
|
||||
|
||||
// NewToolCallFunctionArguments creates a new empty ToolCallFunctionArguments.
|
||||
func NewToolCallFunctionArguments() ToolCallFunctionArguments {
|
||||
return ToolCallFunctionArguments{om: orderedmap.New[string, any]()}
|
||||
}
|
||||
|
||||
// Get retrieves a value by key.
|
||||
func (t *ToolCallFunctionArguments) Get(key string) (any, bool) {
|
||||
if t == nil || t.om == nil {
|
||||
return nil, false
|
||||
}
|
||||
return t.om.Get(key)
|
||||
}
|
||||
|
||||
// Set sets a key-value pair, preserving insertion order.
|
||||
func (t *ToolCallFunctionArguments) Set(key string, value any) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
if t.om == nil {
|
||||
t.om = orderedmap.New[string, any]()
|
||||
}
|
||||
t.om.Set(key, value)
|
||||
}
|
||||
|
||||
// Len returns the number of arguments.
|
||||
func (t *ToolCallFunctionArguments) Len() int {
|
||||
if t == nil || t.om == nil {
|
||||
return 0
|
||||
}
|
||||
return t.om.Len()
|
||||
}
|
||||
|
||||
// All returns an iterator over all key-value pairs in insertion order.
|
||||
func (t *ToolCallFunctionArguments) All() iter.Seq2[string, any] {
|
||||
if t == nil || t.om == nil {
|
||||
return func(yield func(string, any) bool) {}
|
||||
}
|
||||
return t.om.All()
|
||||
}
|
||||
|
||||
// ToMap returns a regular map (order not preserved).
|
||||
func (t *ToolCallFunctionArguments) ToMap() map[string]any {
|
||||
if t == nil || t.om == nil {
|
||||
return nil
|
||||
}
|
||||
return t.om.ToMap()
|
||||
}
|
||||
|
||||
func (t *ToolCallFunctionArguments) String() string {
|
||||
bts, _ := json.Marshal(t)
|
||||
if t == nil || t.om == nil {
|
||||
return "{}"
|
||||
}
|
||||
bts, _ := json.Marshal(t.om)
|
||||
return string(bts)
|
||||
}
|
||||
|
||||
func (t *ToolCallFunctionArguments) UnmarshalJSON(data []byte) error {
|
||||
t.om = orderedmap.New[string, any]()
|
||||
return json.Unmarshal(data, t.om)
|
||||
}
|
||||
|
||||
func (t ToolCallFunctionArguments) MarshalJSON() ([]byte, error) {
|
||||
if t.om == nil {
|
||||
return []byte("{}"), nil
|
||||
}
|
||||
return json.Marshal(t.om)
|
||||
}
|
||||
|
||||
type Tool struct {
|
||||
Type string `json:"type"`
|
||||
Items any `json:"items,omitempty"`
|
||||
@@ -266,12 +364,79 @@ func (pt PropertyType) String() string {
|
||||
return fmt.Sprintf("%v", []string(pt))
|
||||
}
|
||||
|
||||
// ToolPropertiesMap holds tool properties in insertion order.
|
||||
type ToolPropertiesMap struct {
|
||||
om *orderedmap.Map[string, ToolProperty]
|
||||
}
|
||||
|
||||
// NewToolPropertiesMap creates a new empty ToolPropertiesMap.
|
||||
func NewToolPropertiesMap() *ToolPropertiesMap {
|
||||
return &ToolPropertiesMap{om: orderedmap.New[string, ToolProperty]()}
|
||||
}
|
||||
|
||||
// Get retrieves a property by name.
|
||||
func (t *ToolPropertiesMap) Get(key string) (ToolProperty, bool) {
|
||||
if t == nil || t.om == nil {
|
||||
return ToolProperty{}, false
|
||||
}
|
||||
return t.om.Get(key)
|
||||
}
|
||||
|
||||
// Set sets a property, preserving insertion order.
|
||||
func (t *ToolPropertiesMap) Set(key string, value ToolProperty) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
if t.om == nil {
|
||||
t.om = orderedmap.New[string, ToolProperty]()
|
||||
}
|
||||
t.om.Set(key, value)
|
||||
}
|
||||
|
||||
// Len returns the number of properties.
|
||||
func (t *ToolPropertiesMap) Len() int {
|
||||
if t == nil || t.om == nil {
|
||||
return 0
|
||||
}
|
||||
return t.om.Len()
|
||||
}
|
||||
|
||||
// All returns an iterator over all properties in insertion order.
|
||||
func (t *ToolPropertiesMap) All() iter.Seq2[string, ToolProperty] {
|
||||
if t == nil || t.om == nil {
|
||||
return func(yield func(string, ToolProperty) bool) {}
|
||||
}
|
||||
return t.om.All()
|
||||
}
|
||||
|
||||
// ToMap returns a regular map (order not preserved).
|
||||
func (t *ToolPropertiesMap) ToMap() map[string]ToolProperty {
|
||||
if t == nil || t.om == nil {
|
||||
return nil
|
||||
}
|
||||
return t.om.ToMap()
|
||||
}
|
||||
|
||||
func (t ToolPropertiesMap) MarshalJSON() ([]byte, error) {
|
||||
if t.om == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return json.Marshal(t.om)
|
||||
}
|
||||
|
||||
func (t *ToolPropertiesMap) UnmarshalJSON(data []byte) error {
|
||||
t.om = orderedmap.New[string, ToolProperty]()
|
||||
return json.Unmarshal(data, t.om)
|
||||
}
|
||||
|
||||
type ToolProperty struct {
|
||||
AnyOf []ToolProperty `json:"anyOf,omitempty"`
|
||||
Type PropertyType `json:"type,omitempty"`
|
||||
Items any `json:"items,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Enum []any `json:"enum,omitempty"`
|
||||
AnyOf []ToolProperty `json:"anyOf,omitempty"`
|
||||
Type PropertyType `json:"type,omitempty"`
|
||||
Items any `json:"items,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Enum []any `json:"enum,omitempty"`
|
||||
Properties *ToolPropertiesMap `json:"properties,omitempty"`
|
||||
Required []string `json:"required,omitempty"`
|
||||
}
|
||||
|
||||
// ToTypeScriptType converts a ToolProperty to a TypeScript type string
|
||||
@@ -320,11 +485,11 @@ func mapToTypeScriptType(jsonType string) string {
|
||||
}
|
||||
|
||||
type ToolFunctionParameters struct {
|
||||
Type string `json:"type"`
|
||||
Defs any `json:"$defs,omitempty"`
|
||||
Items any `json:"items,omitempty"`
|
||||
Required []string `json:"required"`
|
||||
Properties map[string]ToolProperty `json:"properties"`
|
||||
Type string `json:"type"`
|
||||
Defs any `json:"$defs,omitempty"`
|
||||
Items any `json:"items,omitempty"`
|
||||
Required []string `json:"required,omitempty"`
|
||||
Properties *ToolPropertiesMap `json:"properties"`
|
||||
}
|
||||
|
||||
func (t *ToolFunctionParameters) String() string {
|
||||
@@ -343,6 +508,27 @@ func (t *ToolFunction) String() string {
|
||||
return string(bts)
|
||||
}
|
||||
|
||||
// TokenLogprob represents log probability information for a single token alternative.
|
||||
type TokenLogprob struct {
|
||||
// Token is the text representation of the token.
|
||||
Token string `json:"token"`
|
||||
|
||||
// Logprob is the log probability of this token.
|
||||
Logprob float64 `json:"logprob"`
|
||||
|
||||
// Bytes contains the raw byte representation of the token
|
||||
Bytes []int `json:"bytes,omitempty"`
|
||||
}
|
||||
|
||||
// Logprob contains log probability information for a generated token.
|
||||
type Logprob struct {
|
||||
TokenLogprob
|
||||
|
||||
// TopLogprobs contains the most likely tokens and their log probabilities
|
||||
// at this position, if requested via TopLogprobs parameter.
|
||||
TopLogprobs []TokenLogprob `json:"top_logprobs,omitempty"`
|
||||
}
|
||||
|
||||
// ChatResponse is the response returned by [Client.Chat]. Its fields are
|
||||
// similar to [GenerateResponse].
|
||||
type ChatResponse struct {
|
||||
@@ -369,6 +555,10 @@ type ChatResponse struct {
|
||||
|
||||
DebugInfo *DebugInfo `json:"_debug_info,omitempty"`
|
||||
|
||||
// Logprobs contains log probability information for the generated tokens,
|
||||
// if requested via the Logprobs parameter.
|
||||
Logprobs []Logprob `json:"logprobs,omitempty"`
|
||||
|
||||
Metrics
|
||||
}
|
||||
|
||||
@@ -512,6 +702,9 @@ type CreateRequest struct {
|
||||
Renderer string `json:"renderer,omitempty"`
|
||||
Parser string `json:"parser,omitempty"`
|
||||
|
||||
// Requires is the minimum version of Ollama required by the model.
|
||||
Requires string `json:"requires,omitempty"`
|
||||
|
||||
// Info is a map of additional information for the model
|
||||
Info map[string]any `json:"info,omitempty"`
|
||||
|
||||
@@ -557,11 +750,12 @@ type ShowResponse struct {
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
RemoteModel string `json:"remote_model,omitempty"`
|
||||
RemoteHost string `json:"remote_host,omitempty"`
|
||||
ModelInfo map[string]any `json:"model_info,omitempty"`
|
||||
ModelInfo map[string]any `json:"model_info"`
|
||||
ProjectorInfo map[string]any `json:"projector_info,omitempty"`
|
||||
Tensors []Tensor `json:"tensors,omitempty"`
|
||||
Capabilities []model.Capability `json:"capabilities,omitempty"`
|
||||
ModifiedAt time.Time `json:"modified_at,omitempty"`
|
||||
Requires string `json:"requires,omitempty"`
|
||||
}
|
||||
|
||||
// CopyRequest is the request passed to [Client.Copy].
|
||||
@@ -641,6 +835,16 @@ type TokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type CloudStatus struct {
|
||||
Disabled bool `json:"disabled"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
// StatusResponse is the response from [Client.CloudStatusExperimental].
|
||||
type StatusResponse struct {
|
||||
Cloud CloudStatus `json:"cloud"`
|
||||
}
|
||||
|
||||
// GenerateResponse is the response passed into [GenerateResponseFunc].
|
||||
type GenerateResponse struct {
|
||||
// Model is the model name that generated the response.
|
||||
@@ -677,6 +881,24 @@ type GenerateResponse struct {
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
||||
|
||||
DebugInfo *DebugInfo `json:"_debug_info,omitempty"`
|
||||
|
||||
// Logprobs contains log probability information for the generated tokens,
|
||||
// if requested via the Logprobs parameter.
|
||||
Logprobs []Logprob `json:"logprobs,omitempty"`
|
||||
|
||||
// Experimental: Image generation fields (may change or be removed)
|
||||
|
||||
// Image contains a base64-encoded generated image.
|
||||
// Only present for image generation models.
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
// Completed is the number of completed steps in image generation.
|
||||
// Only present for image generation models during streaming.
|
||||
Completed int64 `json:"completed,omitempty"`
|
||||
|
||||
// Total is the total number of steps for image generation.
|
||||
// Only present for image generation models during streaming.
|
||||
Total int64 `json:"total,omitempty"`
|
||||
}
|
||||
|
||||
// ModelDetails provides details about a model.
|
||||
|
||||
@@ -11,6 +11,24 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testPropsMap creates a ToolPropertiesMap from a map (convenience function for tests, order not preserved)
|
||||
func testPropsMap(m map[string]ToolProperty) *ToolPropertiesMap {
|
||||
props := NewToolPropertiesMap()
|
||||
for k, v := range m {
|
||||
props.Set(k, v)
|
||||
}
|
||||
return props
|
||||
}
|
||||
|
||||
// testArgs creates ToolCallFunctionArguments from a map (convenience function for tests, order not preserved)
|
||||
func testArgs(m map[string]any) ToolCallFunctionArguments {
|
||||
args := NewToolCallFunctionArguments()
|
||||
for k, v := range m {
|
||||
args.Set(k, v)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func TestKeepAliveParsingFromJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -298,10 +316,48 @@ func TestToolFunction_UnmarshalJSON(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolFunctionParameters_MarshalJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input ToolFunctionParameters
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple object with string property",
|
||||
input: ToolFunctionParameters{
|
||||
Type: "object",
|
||||
Required: []string{"name"},
|
||||
Properties: testPropsMap(map[string]ToolProperty{
|
||||
"name": {Type: PropertyType{"string"}},
|
||||
}),
|
||||
},
|
||||
expected: `{"type":"object","required":["name"],"properties":{"name":{"type":"string"}}}`,
|
||||
},
|
||||
{
|
||||
name: "no required",
|
||||
input: ToolFunctionParameters{
|
||||
Type: "object",
|
||||
Properties: testPropsMap(map[string]ToolProperty{
|
||||
"name": {Type: PropertyType{"string"}},
|
||||
}),
|
||||
},
|
||||
expected: `{"type":"object","properties":{"name":{"type":"string"}}}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
data, err := json.Marshal(test.input)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expected, string(data))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolCallFunction_IndexAlwaysMarshals(t *testing.T) {
|
||||
fn := ToolCallFunction{
|
||||
Name: "echo",
|
||||
Arguments: ToolCallFunctionArguments{"message": "hi"},
|
||||
Arguments: testArgs(map[string]any{"message": "hi"}),
|
||||
}
|
||||
|
||||
data, err := json.Marshal(fn)
|
||||
@@ -466,6 +522,116 @@ func TestThinking_UnmarshalJSON(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolPropertyNestedProperties(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected ToolProperty
|
||||
}{
|
||||
{
|
||||
name: "nested object properties",
|
||||
input: `{
|
||||
"type": "object",
|
||||
"description": "Location details",
|
||||
"properties": {
|
||||
"address": {
|
||||
"type": "string",
|
||||
"description": "Street address"
|
||||
},
|
||||
"city": {
|
||||
"type": "string",
|
||||
"description": "City name"
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expected: ToolProperty{
|
||||
Type: PropertyType{"object"},
|
||||
Description: "Location details",
|
||||
Properties: testPropsMap(map[string]ToolProperty{
|
||||
"address": {
|
||||
Type: PropertyType{"string"},
|
||||
Description: "Street address",
|
||||
},
|
||||
"city": {
|
||||
Type: PropertyType{"string"},
|
||||
Description: "City name",
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deeply nested properties",
|
||||
input: `{
|
||||
"type": "object",
|
||||
"description": "Event",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "object",
|
||||
"description": "Location",
|
||||
"properties": {
|
||||
"coordinates": {
|
||||
"type": "object",
|
||||
"description": "GPS coordinates",
|
||||
"properties": {
|
||||
"lat": {"type": "number", "description": "Latitude"},
|
||||
"lng": {"type": "number", "description": "Longitude"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expected: ToolProperty{
|
||||
Type: PropertyType{"object"},
|
||||
Description: "Event",
|
||||
Properties: testPropsMap(map[string]ToolProperty{
|
||||
"location": {
|
||||
Type: PropertyType{"object"},
|
||||
Description: "Location",
|
||||
Properties: testPropsMap(map[string]ToolProperty{
|
||||
"coordinates": {
|
||||
Type: PropertyType{"object"},
|
||||
Description: "GPS coordinates",
|
||||
Properties: testPropsMap(map[string]ToolProperty{
|
||||
"lat": {Type: PropertyType{"number"}, Description: "Latitude"},
|
||||
"lng": {Type: PropertyType{"number"}, Description: "Longitude"},
|
||||
}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var prop ToolProperty
|
||||
err := json.Unmarshal([]byte(tt.input), &prop)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare JSON representations since pointer comparison doesn't work
|
||||
expectedJSON, err := json.Marshal(tt.expected)
|
||||
require.NoError(t, err)
|
||||
actualJSON, err := json.Marshal(prop)
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(t, string(expectedJSON), string(actualJSON))
|
||||
|
||||
// Round-trip test: marshal and unmarshal again
|
||||
data, err := json.Marshal(prop)
|
||||
require.NoError(t, err)
|
||||
|
||||
var prop2 ToolProperty
|
||||
err = json.Unmarshal(data, &prop2)
|
||||
require.NoError(t, err)
|
||||
|
||||
prop2JSON, err := json.Marshal(prop2)
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(t, string(expectedJSON), string(prop2JSON))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolFunctionParameters_String(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -477,12 +643,12 @@ func TestToolFunctionParameters_String(t *testing.T) {
|
||||
params: ToolFunctionParameters{
|
||||
Type: "object",
|
||||
Required: []string{"name"},
|
||||
Properties: map[string]ToolProperty{
|
||||
Properties: testPropsMap(map[string]ToolProperty{
|
||||
"name": {
|
||||
Type: PropertyType{"string"},
|
||||
Description: "The name of the person",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
expected: `{"type":"object","required":["name"],"properties":{"name":{"type":"string","description":"The name of the person"}}}`,
|
||||
},
|
||||
@@ -499,7 +665,7 @@ func TestToolFunctionParameters_String(t *testing.T) {
|
||||
s.Self = s
|
||||
return s
|
||||
}(),
|
||||
Properties: map[string]ToolProperty{},
|
||||
Properties: testPropsMap(map[string]ToolProperty{}),
|
||||
},
|
||||
expected: "",
|
||||
},
|
||||
@@ -512,3 +678,235 @@ func TestToolFunctionParameters_String(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolCallFunctionArguments_OrderPreservation(t *testing.T) {
|
||||
t.Run("marshal preserves insertion order", func(t *testing.T) {
|
||||
args := NewToolCallFunctionArguments()
|
||||
args.Set("zebra", "z")
|
||||
args.Set("apple", "a")
|
||||
args.Set("mango", "m")
|
||||
|
||||
data, err := json.Marshal(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should preserve insertion order, not alphabetical
|
||||
assert.Equal(t, `{"zebra":"z","apple":"a","mango":"m"}`, string(data))
|
||||
})
|
||||
|
||||
t.Run("unmarshal preserves JSON order", func(t *testing.T) {
|
||||
jsonData := `{"zebra":"z","apple":"a","mango":"m"}`
|
||||
|
||||
var args ToolCallFunctionArguments
|
||||
err := json.Unmarshal([]byte(jsonData), &args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify iteration order matches JSON order
|
||||
var keys []string
|
||||
for k := range args.All() {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
assert.Equal(t, []string{"zebra", "apple", "mango"}, keys)
|
||||
})
|
||||
|
||||
t.Run("round trip preserves order", func(t *testing.T) {
|
||||
original := `{"z":1,"a":2,"m":3,"b":4}`
|
||||
|
||||
var args ToolCallFunctionArguments
|
||||
err := json.Unmarshal([]byte(original), &args)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := json.Marshal(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, original, string(data))
|
||||
})
|
||||
|
||||
t.Run("String method returns ordered JSON", func(t *testing.T) {
|
||||
args := NewToolCallFunctionArguments()
|
||||
args.Set("c", 3)
|
||||
args.Set("a", 1)
|
||||
args.Set("b", 2)
|
||||
|
||||
assert.Equal(t, `{"c":3,"a":1,"b":2}`, args.String())
|
||||
})
|
||||
|
||||
t.Run("Get retrieves correct values", func(t *testing.T) {
|
||||
args := NewToolCallFunctionArguments()
|
||||
args.Set("key1", "value1")
|
||||
args.Set("key2", 42)
|
||||
|
||||
v, ok := args.Get("key1")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "value1", v)
|
||||
|
||||
v, ok = args.Get("key2")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 42, v)
|
||||
|
||||
_, ok = args.Get("nonexistent")
|
||||
assert.False(t, ok)
|
||||
})
|
||||
|
||||
t.Run("Len returns correct count", func(t *testing.T) {
|
||||
args := NewToolCallFunctionArguments()
|
||||
assert.Equal(t, 0, args.Len())
|
||||
|
||||
args.Set("a", 1)
|
||||
assert.Equal(t, 1, args.Len())
|
||||
|
||||
args.Set("b", 2)
|
||||
assert.Equal(t, 2, args.Len())
|
||||
})
|
||||
|
||||
t.Run("empty args marshal to empty object", func(t *testing.T) {
|
||||
args := NewToolCallFunctionArguments()
|
||||
data, err := json.Marshal(args)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, `{}`, string(data))
|
||||
})
|
||||
|
||||
t.Run("zero value args marshal to empty object", func(t *testing.T) {
|
||||
var args ToolCallFunctionArguments
|
||||
assert.Equal(t, "{}", args.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestToolPropertiesMap_OrderPreservation(t *testing.T) {
|
||||
t.Run("marshal preserves insertion order", func(t *testing.T) {
|
||||
props := NewToolPropertiesMap()
|
||||
props.Set("zebra", ToolProperty{Type: PropertyType{"string"}})
|
||||
props.Set("apple", ToolProperty{Type: PropertyType{"number"}})
|
||||
props.Set("mango", ToolProperty{Type: PropertyType{"boolean"}})
|
||||
|
||||
data, err := json.Marshal(props)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should preserve insertion order, not alphabetical
|
||||
expected := `{"zebra":{"type":"string"},"apple":{"type":"number"},"mango":{"type":"boolean"}}`
|
||||
assert.Equal(t, expected, string(data))
|
||||
})
|
||||
|
||||
t.Run("unmarshal preserves JSON order", func(t *testing.T) {
|
||||
jsonData := `{"zebra":{"type":"string"},"apple":{"type":"number"},"mango":{"type":"boolean"}}`
|
||||
|
||||
var props ToolPropertiesMap
|
||||
err := json.Unmarshal([]byte(jsonData), &props)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify iteration order matches JSON order
|
||||
var keys []string
|
||||
for k := range props.All() {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
assert.Equal(t, []string{"zebra", "apple", "mango"}, keys)
|
||||
})
|
||||
|
||||
t.Run("round trip preserves order", func(t *testing.T) {
|
||||
original := `{"z":{"type":"string"},"a":{"type":"number"},"m":{"type":"boolean"}}`
|
||||
|
||||
var props ToolPropertiesMap
|
||||
err := json.Unmarshal([]byte(original), &props)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := json.Marshal(props)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, original, string(data))
|
||||
})
|
||||
|
||||
t.Run("Get retrieves correct values", func(t *testing.T) {
|
||||
props := NewToolPropertiesMap()
|
||||
props.Set("name", ToolProperty{Type: PropertyType{"string"}, Description: "The name"})
|
||||
props.Set("age", ToolProperty{Type: PropertyType{"integer"}, Description: "The age"})
|
||||
|
||||
v, ok := props.Get("name")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "The name", v.Description)
|
||||
|
||||
v, ok = props.Get("age")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "The age", v.Description)
|
||||
|
||||
_, ok = props.Get("nonexistent")
|
||||
assert.False(t, ok)
|
||||
})
|
||||
|
||||
t.Run("Len returns correct count", func(t *testing.T) {
|
||||
props := NewToolPropertiesMap()
|
||||
assert.Equal(t, 0, props.Len())
|
||||
|
||||
props.Set("a", ToolProperty{})
|
||||
assert.Equal(t, 1, props.Len())
|
||||
|
||||
props.Set("b", ToolProperty{})
|
||||
assert.Equal(t, 2, props.Len())
|
||||
})
|
||||
|
||||
t.Run("nil props marshal to null", func(t *testing.T) {
|
||||
var props *ToolPropertiesMap
|
||||
data, err := json.Marshal(props)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, `null`, string(data))
|
||||
})
|
||||
|
||||
t.Run("ToMap returns regular map", func(t *testing.T) {
|
||||
props := NewToolPropertiesMap()
|
||||
props.Set("a", ToolProperty{Type: PropertyType{"string"}})
|
||||
props.Set("b", ToolProperty{Type: PropertyType{"number"}})
|
||||
|
||||
m := props.ToMap()
|
||||
assert.Equal(t, 2, len(m))
|
||||
assert.Equal(t, PropertyType{"string"}, m["a"].Type)
|
||||
assert.Equal(t, PropertyType{"number"}, m["b"].Type)
|
||||
})
|
||||
}
|
||||
|
||||
func TestToolCallFunctionArguments_ComplexValues(t *testing.T) {
|
||||
t.Run("nested objects preserve order", func(t *testing.T) {
|
||||
jsonData := `{"outer":{"z":1,"a":2},"simple":"value"}`
|
||||
|
||||
var args ToolCallFunctionArguments
|
||||
err := json.Unmarshal([]byte(jsonData), &args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Outer keys should be in order
|
||||
var keys []string
|
||||
for k := range args.All() {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
assert.Equal(t, []string{"outer", "simple"}, keys)
|
||||
})
|
||||
|
||||
t.Run("arrays as values", func(t *testing.T) {
|
||||
args := NewToolCallFunctionArguments()
|
||||
args.Set("items", []string{"a", "b", "c"})
|
||||
args.Set("numbers", []int{1, 2, 3})
|
||||
|
||||
data, err := json.Marshal(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, `{"items":["a","b","c"],"numbers":[1,2,3]}`, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestToolPropertiesMap_NestedProperties(t *testing.T) {
|
||||
t.Run("nested properties preserve order", func(t *testing.T) {
|
||||
props := NewToolPropertiesMap()
|
||||
|
||||
nestedProps := NewToolPropertiesMap()
|
||||
nestedProps.Set("z_field", ToolProperty{Type: PropertyType{"string"}})
|
||||
nestedProps.Set("a_field", ToolProperty{Type: PropertyType{"number"}})
|
||||
|
||||
props.Set("outer", ToolProperty{
|
||||
Type: PropertyType{"object"},
|
||||
Properties: nestedProps,
|
||||
})
|
||||
|
||||
data, err := json.Marshal(props)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Both outer and inner should preserve order
|
||||
expected := `{"outer":{"type":"object","properties":{"z_field":{"type":"string"},"a_field":{"type":"number"}}}}`
|
||||
assert.Equal(t, expected, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -48,16 +48,6 @@ The `-dev` flag enables:
|
||||
- CORS headers for cross-origin requests
|
||||
- Hot-reload support for UI development
|
||||
|
||||
#### Run Storybook
|
||||
|
||||
Inside the `ui/app` directory, run:
|
||||
|
||||
```bash
|
||||
npm run storybook
|
||||
```
|
||||
|
||||
For now we're writing stories as siblings of the component they're testing. So for example, `src/components/Message.stories.tsx` is the story for `src/components/Message.tsx`.
|
||||
|
||||
## Build
|
||||
|
||||
|
||||
@@ -85,9 +75,9 @@ For now we're writing stories as siblings of the component they're testing. So f
|
||||
CI builds with Xcode 14.1 for OS compatibility prior to v13. If you want to manually build v11+ support, you can download the older Xcode [here](https://developer.apple.com/services-account/download?path=/Developer_Tools/Xcode_14.1/Xcode_14.1.xip), extract, then `mv ./Xcode.app /Applications/Xcode_14.1.0.app` then activate with:
|
||||
|
||||
```
|
||||
export CGO_CFLAGS=-mmacosx-version-min=12.0
|
||||
export CGO_CXXFLAGS=-mmacosx-version-min=12.0
|
||||
export CGO_LDFLAGS=-mmacosx-version-min=12.0
|
||||
export CGO_CFLAGS="-O3 -mmacosx-version-min=12.0"
|
||||
export CGO_CXXFLAGS="-O3 -mmacosx-version-min=12.0"
|
||||
export CGO_LDFLAGS="-mmacosx-version-min=12.0"
|
||||
export SDKROOT=/Applications/Xcode_14.1.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk
|
||||
export DEVELOPER_DIR=/Applications/Xcode_14.1.0.app/Contents/Developer
|
||||
```
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
var (
|
||||
wv = &Webview{}
|
||||
uiServerPort int
|
||||
appStore *store.Store
|
||||
)
|
||||
|
||||
var debug = strings.EqualFold(os.Getenv("OLLAMA_DEBUG"), "true") || os.Getenv("OLLAMA_DEBUG") == "1"
|
||||
@@ -208,6 +209,7 @@ func main() {
|
||||
uiServerPort = port
|
||||
|
||||
st := &store.Store{}
|
||||
appStore = st
|
||||
|
||||
// Enable CORS in development mode
|
||||
if devMode {
|
||||
@@ -253,6 +255,8 @@ func main() {
|
||||
done <- osrv.Run(octx)
|
||||
}()
|
||||
|
||||
upd := &updater.Updater{Store: st}
|
||||
|
||||
uiServer := ui.Server{
|
||||
Token: token,
|
||||
Restart: func() {
|
||||
@@ -267,16 +271,16 @@ func main() {
|
||||
ToolRegistry: toolRegistry,
|
||||
Dev: devMode,
|
||||
Logger: slog.Default(),
|
||||
Updater: upd,
|
||||
UpdateAvailableFunc: func() {
|
||||
UpdateAvailable("")
|
||||
},
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: uiServer.Handler(),
|
||||
}
|
||||
|
||||
if _, err := uiServer.UserData(ctx); err != nil {
|
||||
slog.Warn("failed to load user data", "error", err)
|
||||
}
|
||||
|
||||
// Start the UI server
|
||||
slog.Info("starting ui server", "port", port)
|
||||
go func() {
|
||||
@@ -288,8 +292,20 @@ func main() {
|
||||
slog.Debug("background desktop server done")
|
||||
}()
|
||||
|
||||
updater := &updater.Updater{Store: st}
|
||||
updater.StartBackgroundUpdaterChecker(ctx, UpdateAvailable)
|
||||
upd.StartBackgroundUpdaterChecker(ctx, UpdateAvailable)
|
||||
|
||||
// Check for pending updates on startup (show tray notification if update is ready)
|
||||
if updater.IsUpdatePending() {
|
||||
// On Windows, the tray is initialized in osRun(). Calling UpdateAvailable
|
||||
// before that would dereference a nil tray callback.
|
||||
// TODO: refactor so the update check runs after platform init on all platforms.
|
||||
if runtime.GOOS == "windows" {
|
||||
slog.Debug("update pending on startup, deferring tray notification until tray initialization")
|
||||
} else {
|
||||
slog.Debug("update pending on startup, showing tray notification")
|
||||
UpdateAvailable("")
|
||||
}
|
||||
}
|
||||
|
||||
hasCompletedFirstRun, err := st.HasCompletedFirstRun()
|
||||
if err != nil {
|
||||
@@ -320,6 +336,17 @@ func main() {
|
||||
slog.Debug("no URL scheme request to handle")
|
||||
}
|
||||
|
||||
go func() {
|
||||
slog.Debug("waiting for ollama server to be ready")
|
||||
if err := ui.WaitForServer(ctx, 10*time.Second); err != nil {
|
||||
slog.Warn("ollama server not ready, continuing anyway", "error", err)
|
||||
}
|
||||
|
||||
if _, err := uiServer.UserData(ctx); err != nil {
|
||||
slog.Warn("failed to load user data", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
osRun(cancel, hasCompletedFirstRun, startHidden)
|
||||
|
||||
slog.Info("shutting down desktop server")
|
||||
@@ -341,6 +368,17 @@ func startHiddenTasks() {
|
||||
// CLI triggered app startup use-case
|
||||
slog.Info("deferring pending update for fast startup")
|
||||
} else {
|
||||
// Check if auto-update is enabled before automatically upgrading
|
||||
settings, err := appStore.Settings()
|
||||
if err != nil {
|
||||
slog.Warn("failed to load settings for upgrade check", "error", err)
|
||||
} else if !settings.AutoUpdateEnabled {
|
||||
slog.Info("auto-update disabled, skipping automatic upgrade at startup")
|
||||
// Still show tray notification so user knows update is ready
|
||||
UpdateAvailable("")
|
||||
return
|
||||
}
|
||||
|
||||
if err := updater.DoUpgradeAtStartup(); err != nil {
|
||||
slog.Info("unable to perform upgrade at startup", "error", err)
|
||||
// Make sure the restart to upgrade menu shows so we can attempt an interactive upgrade to get authorization
|
||||
@@ -361,7 +399,7 @@ func checkUserLoggedIn(uiServerPort int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/api/v1/me", uiServerPort))
|
||||
resp, err := http.Post(fmt.Sprintf("http://127.0.0.1:%d/api/me", uiServerPort), "application/json", nil)
|
||||
if err != nil {
|
||||
slog.Debug("failed to call local auth endpoint", "error", err)
|
||||
return false
|
||||
@@ -397,8 +435,8 @@ func checkUserLoggedIn(uiServerPort int) bool {
|
||||
// handleConnectURLScheme fetches the connect URL and opens it in the browser
|
||||
func handleConnectURLScheme() {
|
||||
if checkUserLoggedIn(uiServerPort) {
|
||||
slog.Info("user is already logged in, opening settings instead")
|
||||
sendUIRequestMessage("/")
|
||||
slog.Info("user is already logged in, opening app instead")
|
||||
showWindow(wv.webview.Window())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -434,37 +472,30 @@ func openInBrowser(url string) {
|
||||
}
|
||||
}
|
||||
|
||||
// parseURLScheme parses an ollama:// URL and returns whether it's a connect URL and the UI path
|
||||
func parseURLScheme(urlSchemeRequest string) (isConnect bool, uiPath string, err error) {
|
||||
// parseURLScheme parses an ollama:// URL and validates it
|
||||
// Supports: ollama:// (open app) and ollama://connect (OAuth)
|
||||
func parseURLScheme(urlSchemeRequest string) (isConnect bool, err error) {
|
||||
parsedURL, err := url.Parse(urlSchemeRequest)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
return false, fmt.Errorf("invalid URL: %w", err)
|
||||
}
|
||||
|
||||
// Check if this is a connect URL
|
||||
if parsedURL.Host == "connect" || strings.TrimPrefix(parsedURL.Path, "/") == "connect" {
|
||||
return true, "", nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Extract the UI path
|
||||
path := "/"
|
||||
if parsedURL.Path != "" && parsedURL.Path != "/" {
|
||||
// For URLs like ollama:///settings, use the path directly
|
||||
path = parsedURL.Path
|
||||
} else if parsedURL.Host != "" {
|
||||
// For URLs like ollama://settings (without triple slash),
|
||||
// the "settings" part is parsed as the host, not the path.
|
||||
// We need to convert it to a path by prepending "/"
|
||||
// This also handles ollama://settings/ where Windows adds a trailing slash
|
||||
path = "/" + parsedURL.Host
|
||||
// Allow bare ollama:// or ollama:/// to open the app
|
||||
if (parsedURL.Host == "" && parsedURL.Path == "") || parsedURL.Path == "/" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, path, nil
|
||||
return false, fmt.Errorf("unsupported ollama:// URL path: %s", urlSchemeRequest)
|
||||
}
|
||||
|
||||
// handleURLSchemeInCurrentInstance processes URL scheme requests in the current instance
|
||||
func handleURLSchemeInCurrentInstance(urlSchemeRequest string) {
|
||||
isConnect, uiPath, err := parseURLScheme(urlSchemeRequest)
|
||||
isConnect, err := parseURLScheme(urlSchemeRequest)
|
||||
if err != nil {
|
||||
slog.Error("failed to parse URL scheme request", "url", urlSchemeRequest, "error", err)
|
||||
return
|
||||
@@ -473,6 +504,8 @@ func handleURLSchemeInCurrentInstance(urlSchemeRequest string) {
|
||||
if isConnect {
|
||||
handleConnectURLScheme()
|
||||
} else {
|
||||
sendUIRequestMessage(uiPath)
|
||||
if wv.webview != nil {
|
||||
showWindow(wv.webview.Window())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,13 +191,6 @@ func LaunchNewApp() {
|
||||
C.launchApp(appName)
|
||||
}
|
||||
|
||||
// Send a request to the main app thread to load a UI page
|
||||
func sendUIRequestMessage(path string) {
|
||||
p := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(p))
|
||||
C.uiRequest(p)
|
||||
}
|
||||
|
||||
func registerLaunchAgent(hasCompletedFirstRun bool) {
|
||||
// Remove any stale Login Item registrations
|
||||
C.unregisterSelfFromLoginItem()
|
||||
|
||||
@@ -14,6 +14,7 @@ extern NSString *SystemWidePath;
|
||||
@interface AppDelegate () <NSWindowDelegate, WKNavigationDelegate, WKUIDelegate>
|
||||
@property(strong, nonatomic) NSStatusItem *statusItem;
|
||||
@property(assign, nonatomic) BOOL updateAvailable;
|
||||
@property(assign, nonatomic) BOOL systemShutdownInProgress;
|
||||
@end
|
||||
|
||||
@implementation AppDelegate
|
||||
@@ -24,27 +25,14 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
||||
for (NSURL *url in urls) {
|
||||
if ([url.scheme isEqualToString:@"ollama"]) {
|
||||
NSString *path = url.path;
|
||||
if (!path || [path isEqualToString:@""]) {
|
||||
// For URLs like ollama://settings (without triple slash),
|
||||
// the "settings" part is parsed as the host, not the path.
|
||||
// We need to convert it to a path by prepending "/"
|
||||
if (url.host && ![url.host isEqualToString:@""]) {
|
||||
path = [@"/" stringByAppendingString:url.host];
|
||||
} else {
|
||||
path = @"/";
|
||||
}
|
||||
}
|
||||
|
||||
if ([path isEqualToString:@"/connect"] || [url.host isEqualToString:@"connect"]) {
|
||||
|
||||
if (path && ([path isEqualToString:@"/connect"] || [url.host isEqualToString:@"connect"])) {
|
||||
// Special case: handle connect by opening browser instead of app
|
||||
handleConnectURL();
|
||||
} else {
|
||||
// Set app to be active and visible
|
||||
[NSApp setActivationPolicy:NSApplicationActivationPolicyRegular];
|
||||
[NSApp activateIgnoringOtherApps:YES];
|
||||
|
||||
// Open the path with the UI
|
||||
[self uiRequest:path];
|
||||
}
|
||||
|
||||
break;
|
||||
@@ -53,6 +41,13 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
||||
}
|
||||
|
||||
- (void)applicationDidFinishLaunching:(NSNotification *)aNotification {
|
||||
// Register for system shutdown/restart notification so we can allow termination
|
||||
[[[NSWorkspace sharedWorkspace] notificationCenter]
|
||||
addObserver:self
|
||||
selector:@selector(systemWillPowerOff:)
|
||||
name:NSWorkspaceWillPowerOffNotification
|
||||
object:nil];
|
||||
|
||||
// if we're in development mode, set the app icon
|
||||
NSString *bundlePath = [[NSBundle mainBundle] bundlePath];
|
||||
if (![bundlePath hasSuffix:@".app"]) {
|
||||
@@ -260,7 +255,7 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
||||
}
|
||||
|
||||
- (void)openHelp:(id)sender {
|
||||
NSURL *url = [NSURL URLWithString:@"https://github.com/ollama/ollama/tree/main/docs"];
|
||||
NSURL *url = [NSURL URLWithString:@"https://docs.ollama.com/"];
|
||||
[[NSWorkspace sharedWorkspace] openURL:url];
|
||||
}
|
||||
|
||||
@@ -291,7 +286,18 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
||||
[NSApp activateIgnoringOtherApps:YES];
|
||||
}
|
||||
|
||||
- (void)systemWillPowerOff:(NSNotification *)notification {
|
||||
// Set flag so applicationShouldTerminate: knows to allow termination.
|
||||
// The system will call applicationShouldTerminate: after posting this notification.
|
||||
self.systemShutdownInProgress = YES;
|
||||
}
|
||||
|
||||
- (NSApplicationTerminateReply)applicationShouldTerminate:(NSApplication *)sender {
|
||||
// Allow termination if the system is shutting down or restarting
|
||||
if (self.systemShutdownInProgress) {
|
||||
return NSTerminateNow;
|
||||
}
|
||||
// Otherwise just hide the app (for Cmd+Q, close button, etc.)
|
||||
[NSApp hide:nil];
|
||||
[NSApp setActivationPolicy:NSApplicationActivationPolicyAccessory];
|
||||
return NSTerminateCancel;
|
||||
|
||||
@@ -138,7 +138,7 @@ func (app *appCallbacks) HandleURLScheme(urlScheme string) {
|
||||
|
||||
// handleURLSchemeRequest processes URL scheme requests from other instances
|
||||
func handleURLSchemeRequest(urlScheme string) {
|
||||
isConnect, uiPath, err := parseURLScheme(urlScheme)
|
||||
isConnect, err := parseURLScheme(urlScheme)
|
||||
if err != nil {
|
||||
slog.Error("failed to parse URL scheme request", "url", urlScheme, "error", err)
|
||||
return
|
||||
@@ -147,11 +147,17 @@ func handleURLSchemeRequest(urlScheme string) {
|
||||
if isConnect {
|
||||
handleConnectURLScheme()
|
||||
} else {
|
||||
sendUIRequestMessage(uiPath)
|
||||
if wv.webview != nil {
|
||||
showWindow(wv.webview.Window())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func UpdateAvailable(ver string) error {
|
||||
if app.t == nil {
|
||||
slog.Debug("tray not yet initialized, skipping update notification")
|
||||
return nil
|
||||
}
|
||||
return app.t.UpdateAvailable(ver)
|
||||
}
|
||||
|
||||
@@ -163,6 +169,14 @@ func osRun(shutdown func(), hasCompletedFirstRun, startHidden bool) {
|
||||
log.Fatalf("Failed to start: %s", err)
|
||||
}
|
||||
|
||||
// Check for pending updates now that the tray is initialized.
|
||||
// The platform-independent check in app.go fires before osRun,
|
||||
// when app.t is still nil, so we must re-check here.
|
||||
if updater.IsUpdatePending() {
|
||||
slog.Debug("update pending on startup, showing tray notification")
|
||||
UpdateAvailable("")
|
||||
}
|
||||
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
@@ -261,11 +275,6 @@ func createLoginShortcut() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a request to the main app thread to load a UI page
|
||||
func sendUIRequestMessage(path string) {
|
||||
wintray.SendUIRequestMessage(path)
|
||||
}
|
||||
|
||||
func LaunchNewApp() {
|
||||
}
|
||||
|
||||
|
||||
@@ -282,7 +282,7 @@ func (w *Webview) Run(path string) unsafe.Pointer {
|
||||
"go", "rs", "swift", "kt", "scala", "sh", "bat", "yaml", "yml", "toml", "ini",
|
||||
"cfg", "conf", "log", "rtf",
|
||||
}
|
||||
imageExts := []string{"png", "jpg", "jpeg"}
|
||||
imageExts := []string{"png", "jpg", "jpeg", "webp"}
|
||||
allowedExts := append(textExts, imageExts...)
|
||||
|
||||
// Use native multiple file selection with extension filtering
|
||||
|
||||
@@ -169,37 +169,47 @@ DlgResult fileDlg(FileDlgParams* params) {
|
||||
}
|
||||
|
||||
NSArray* urls = [panel URLs];
|
||||
if(self->params->allowMultiple && [urls count] >= 1) {
|
||||
if([urls count] == 0) {
|
||||
return DLG_CANCEL;
|
||||
}
|
||||
|
||||
if(self->params->allowMultiple) {
|
||||
// For multiple files, we need to return all paths separated by null bytes
|
||||
char* bufPtr = self->params->buf;
|
||||
int remainingBuf = self->params->nbuf;
|
||||
|
||||
// Calculate total required buffer size first
|
||||
int totalSize = 0;
|
||||
for(NSURL* url in urls) {
|
||||
char tempBuf[PATH_MAX];
|
||||
if(![url getFileSystemRepresentation:tempBuf maxLength:PATH_MAX]) {
|
||||
return DLG_URLFAIL;
|
||||
}
|
||||
totalSize += strlen(tempBuf) + 1; // +1 for null terminator
|
||||
}
|
||||
totalSize += 1; // Final null terminator
|
||||
// Calculate total required buffer size first
|
||||
int totalSize = 0;
|
||||
for(NSURL* url in urls) {
|
||||
char tempBuf[PATH_MAX];
|
||||
if(![url getFileSystemRepresentation:tempBuf maxLength:PATH_MAX]) {
|
||||
return DLG_URLFAIL;
|
||||
}
|
||||
totalSize += strlen(tempBuf) + 1; // +1 for null terminator
|
||||
}
|
||||
totalSize += 1; // Final null terminator
|
||||
|
||||
if(totalSize > self->params->nbuf) {
|
||||
// Not enough buffer space
|
||||
return DLG_URLFAIL;
|
||||
}
|
||||
if(totalSize > self->params->nbuf) {
|
||||
// Not enough buffer space
|
||||
return DLG_URLFAIL;
|
||||
}
|
||||
|
||||
// Now actually copy the paths (we know we have space)
|
||||
bufPtr = self->params->buf;
|
||||
for(NSURL* url in urls) {
|
||||
char tempBuf[PATH_MAX];
|
||||
[url getFileSystemRepresentation:tempBuf maxLength:PATH_MAX];
|
||||
int pathLen = strlen(tempBuf);
|
||||
strcpy(bufPtr, tempBuf);
|
||||
bufPtr += pathLen + 1;
|
||||
}
|
||||
*bufPtr = '\0'; // Final null terminator
|
||||
// Now actually copy the paths (we know we have space)
|
||||
bufPtr = self->params->buf;
|
||||
for(NSURL* url in urls) {
|
||||
char tempBuf[PATH_MAX];
|
||||
[url getFileSystemRepresentation:tempBuf maxLength:PATH_MAX];
|
||||
int pathLen = strlen(tempBuf);
|
||||
strcpy(bufPtr, tempBuf);
|
||||
bufPtr += pathLen + 1;
|
||||
}
|
||||
*bufPtr = '\0'; // Final null terminator
|
||||
} else {
|
||||
// Single file/directory selection - write path to buffer
|
||||
NSURL* url = [urls firstObject];
|
||||
if(![url getFileSystemRepresentation:self->params->buf maxLength:self->params->nbuf]) {
|
||||
return DLG_URLFAIL;
|
||||
}
|
||||
}
|
||||
|
||||
return DLG_OK;
|
||||
|
||||
@@ -15,7 +15,7 @@ const multiFileBufferSize = w32.MAX_PATH * 10
|
||||
type WinDlgError int
|
||||
|
||||
func (e WinDlgError) Error() string {
|
||||
return fmt.Sprintf("CommDlgExtendedError: %#x", e)
|
||||
return fmt.Sprintf("CommDlgExtendedError: %#x", int(e))
|
||||
}
|
||||
|
||||
func err() error {
|
||||
|
||||
@@ -41,6 +41,11 @@ type InferenceCompute struct {
|
||||
VRAM string
|
||||
}
|
||||
|
||||
type InferenceInfo struct {
|
||||
Computes []InferenceCompute
|
||||
DefaultContextLength int
|
||||
}
|
||||
|
||||
func New(s *store.Store, devMode bool) *Server {
|
||||
p := resolvePath("ollama")
|
||||
return &Server{store: s, bin: p, dev: devMode}
|
||||
@@ -205,6 +210,11 @@ func (s *Server) cmd(ctx context.Context) (*exec.Cmd, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cloudDisabled, err := s.store.CloudDisabled()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := commandContext(ctx, s.bin, "serve")
|
||||
cmd.Stdout, cmd.Stderr = s.log, s.log
|
||||
|
||||
@@ -224,14 +234,17 @@ func (s *Server) cmd(ctx context.Context) (*exec.Cmd, error) {
|
||||
if _, err := os.Stat(settings.Models); err == nil {
|
||||
env["OLLAMA_MODELS"] = settings.Models
|
||||
} else {
|
||||
slog.Warn("models path not accessible, clearing models setting", "path", settings.Models, "err", err)
|
||||
settings.Models = ""
|
||||
s.store.SetSettings(settings)
|
||||
slog.Warn("models path not accessible, using default", "path", settings.Models, "err", err)
|
||||
}
|
||||
}
|
||||
if settings.ContextLength > 0 {
|
||||
env["OLLAMA_CONTEXT_LENGTH"] = strconv.Itoa(settings.ContextLength)
|
||||
}
|
||||
if cloudDisabled {
|
||||
env["OLLAMA_NO_CLOUD"] = "1"
|
||||
} else {
|
||||
env["OLLAMA_NO_CLOUD"] = "0"
|
||||
}
|
||||
cmd.Env = []string{}
|
||||
for k, v := range env {
|
||||
cmd.Env = append(cmd.Env, k+"="+v)
|
||||
@@ -264,9 +277,12 @@ func openRotatingLog() (io.WriteCloser, error) {
|
||||
|
||||
// Attempt to retrieve inference compute information from the server
|
||||
// log. Set ctx to timeout to control how long to wait for the logs to appear
|
||||
func GetInferenceComputer(ctx context.Context) ([]InferenceCompute, error) {
|
||||
inference := []InferenceCompute{}
|
||||
marker := regexp.MustCompile(`inference compute.*library=`)
|
||||
func GetInferenceInfo(ctx context.Context) (*InferenceInfo, error) {
|
||||
info := &InferenceInfo{}
|
||||
computeMarker := regexp.MustCompile(`inference compute.*library=`)
|
||||
defaultCtxMarker := regexp.MustCompile(`vram-based default context`)
|
||||
defaultCtxRegex := regexp.MustCompile(`default_num_ctx=(\d+)`)
|
||||
|
||||
q := `inference compute.*%s=["]([^"]*)["]`
|
||||
nq := `inference compute.*%s=(\S+)\s`
|
||||
type regex struct {
|
||||
@@ -332,8 +348,8 @@ func GetInferenceComputer(ctx context.Context) ([]InferenceCompute, error) {
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
match := marker.FindStringSubmatch(line)
|
||||
if len(match) > 0 {
|
||||
// Check for inference compute lines
|
||||
if computeMarker.MatchString(line) {
|
||||
ic := InferenceCompute{
|
||||
Library: get("library", line),
|
||||
Variant: get("variant", line),
|
||||
@@ -344,12 +360,25 @@ func GetInferenceComputer(ctx context.Context) ([]InferenceCompute, error) {
|
||||
}
|
||||
|
||||
slog.Info("Matched", "inference compute", ic)
|
||||
inference = append(inference, ic)
|
||||
} else {
|
||||
// Break out on first non matching line after we start matching
|
||||
if len(inference) > 0 {
|
||||
return inference, nil
|
||||
info.Computes = append(info.Computes, ic)
|
||||
continue
|
||||
}
|
||||
// Check for default context length line
|
||||
if defaultCtxMarker.MatchString(line) {
|
||||
match := defaultCtxRegex.FindStringSubmatch(line)
|
||||
if len(match) > 1 {
|
||||
numCtx, err := strconv.Atoi(match[1])
|
||||
if err == nil {
|
||||
info.DefaultContextLength = numCtx
|
||||
slog.Info("Matched default context length", "default_num_ctx", numCtx)
|
||||
}
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
// If we've found compute info but hit a non-matching line, return what we have
|
||||
// This handles older server versions that don't log the default context line
|
||||
if len(info.Computes) > 0 {
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
@@ -111,7 +111,7 @@ func TestServerCmd(t *testing.T) {
|
||||
for _, want := range tt.want {
|
||||
found := false
|
||||
for _, env := range cmd.Env {
|
||||
if strings.Contains(env, want) {
|
||||
if strings.HasPrefix(env, want) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func TestServerCmd(t *testing.T) {
|
||||
|
||||
for _, dont := range tt.dont {
|
||||
for _, env := range cmd.Env {
|
||||
if strings.Contains(env, dont) {
|
||||
if strings.HasPrefix(env, dont) {
|
||||
t.Errorf("unexpected environment variable: %s", env)
|
||||
}
|
||||
}
|
||||
@@ -136,44 +136,119 @@ func TestServerCmd(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInferenceComputer(t *testing.T) {
|
||||
func TestServerCmdCloudSettingEnv(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
log string
|
||||
exp []InferenceCompute
|
||||
name string
|
||||
envValue string
|
||||
configContent string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "default cloud enabled",
|
||||
want: "OLLAMA_NO_CLOUD=0",
|
||||
},
|
||||
{
|
||||
name: "env disables cloud",
|
||||
envValue: "1",
|
||||
want: "OLLAMA_NO_CLOUD=1",
|
||||
},
|
||||
{
|
||||
name: "config disables cloud",
|
||||
configContent: `{"disable_ollama_cloud": true}`,
|
||||
want: "OLLAMA_NO_CLOUD=1",
|
||||
},
|
||||
{
|
||||
name: "invalid env disables cloud",
|
||||
envValue: "invalid",
|
||||
want: "OLLAMA_NO_CLOUD=1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpHome := t.TempDir()
|
||||
t.Setenv("HOME", tmpHome)
|
||||
t.Setenv("USERPROFILE", tmpHome)
|
||||
t.Setenv("OLLAMA_NO_CLOUD", tt.envValue)
|
||||
|
||||
if tt.configContent != "" {
|
||||
configDir := filepath.Join(tmpHome, ".ollama")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
t.Fatalf("mkdir config dir: %v", err)
|
||||
}
|
||||
configPath := filepath.Join(configDir, "server.json")
|
||||
if err := os.WriteFile(configPath, []byte(tt.configContent), 0o644); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
st := &store.Store{DBPath: filepath.Join(t.TempDir(), "db.sqlite")}
|
||||
defer st.Close()
|
||||
|
||||
s := &Server{store: st}
|
||||
cmd, err := s.cmd(t.Context())
|
||||
if err != nil {
|
||||
t.Fatalf("s.cmd() error = %v", err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, env := range cmd.Env {
|
||||
if env == tt.want {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("expected environment variable %q in command env", tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInferenceInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
log string
|
||||
expComputes []InferenceCompute
|
||||
expDefaultCtxLen int
|
||||
}{
|
||||
{
|
||||
name: "metal",
|
||||
log: `time=2025-06-30T09:23:07.374-07:00 level=DEBUG source=sched.go:108 msg="starting llm scheduler"
|
||||
time=2025-06-30T09:23:07.416-07:00 level=INFO source=types.go:130 msg="inference compute" id=0 library=metal variant="" compute="" driver=0.0 name="" total="96.0 GiB" available="96.0 GiB"
|
||||
time=2025-06-30T09:23:07.417-07:00 level=INFO source=routes.go:1721 msg="vram-based default context" total_vram="96.0 GiB" default_num_ctx=262144
|
||||
time=2025-06-30T09:25:56.197-07:00 level=DEBUG source=ggml.go:155 msg="key not found" key=general.alignment default=32
|
||||
`,
|
||||
exp: []InferenceCompute{{
|
||||
expComputes: []InferenceCompute{{
|
||||
Library: "metal",
|
||||
Driver: "0.0",
|
||||
VRAM: "96.0 GiB",
|
||||
}},
|
||||
expDefaultCtxLen: 262144,
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
log: `time=2025-07-01T17:59:51.470Z level=INFO source=gpu.go:377 msg="no compatible GPUs were discovered"
|
||||
time=2025-07-01T17:59:51.470Z level=INFO source=types.go:130 msg="inference compute" id=0 library=cpu variant="" compute="" driver=0.0 name="" total="31.3 GiB" available="30.4 GiB"
|
||||
time=2025-07-01T17:59:51.471Z level=INFO source=routes.go:1721 msg="vram-based default context" total_vram="31.3 GiB" default_num_ctx=32768
|
||||
[GIN] 2025/07/01 - 18:00:09 | 200 | 50.263µs | 100.126.204.152 | HEAD "/"
|
||||
`,
|
||||
exp: []InferenceCompute{{
|
||||
expComputes: []InferenceCompute{{
|
||||
Library: "cpu",
|
||||
Driver: "0.0",
|
||||
VRAM: "31.3 GiB",
|
||||
}},
|
||||
expDefaultCtxLen: 32768,
|
||||
},
|
||||
{
|
||||
name: "cuda1",
|
||||
log: `time=2025-07-01T19:33:43.162Z level=DEBUG source=amd_linux.go:419 msg="amdgpu driver not detected /sys/module/amdgpu"
|
||||
releasing cuda driver library
|
||||
time=2025-07-01T19:33:43.162Z level=INFO source=types.go:130 msg="inference compute" id=GPU-452cac9f-6960-839c-4fb3-0cec83699196 library=cuda variant=v12 compute=6.1 driver=12.7 name="NVIDIA GeForce GT 1030" total="3.9 GiB" available="3.9 GiB"
|
||||
time=2025-07-01T19:33:43.163Z level=INFO source=routes.go:1721 msg="vram-based default context" total_vram="3.9 GiB" default_num_ctx=4096
|
||||
[GIN] 2025/07/01 - 18:00:09 | 200 | 50.263µs | 100.126.204.152 | HEAD "/"
|
||||
`,
|
||||
exp: []InferenceCompute{{
|
||||
expComputes: []InferenceCompute{{
|
||||
Library: "cuda",
|
||||
Variant: "v12",
|
||||
Compute: "6.1",
|
||||
@@ -181,6 +256,7 @@ time=2025-07-01T19:33:43.162Z level=INFO source=types.go:130 msg="inference comp
|
||||
Name: "NVIDIA GeForce GT 1030",
|
||||
VRAM: "3.9 GiB",
|
||||
}},
|
||||
expDefaultCtxLen: 4096,
|
||||
},
|
||||
{
|
||||
name: "frank",
|
||||
@@ -188,9 +264,10 @@ time=2025-07-01T19:33:43.162Z level=INFO source=types.go:130 msg="inference comp
|
||||
releasing cuda driver library
|
||||
time=2025-07-01T19:36:13.315Z level=INFO source=types.go:130 msg="inference compute" id=GPU-d6de3398-9932-6902-11ec-fee8e424c8a2 library=cuda variant=v12 compute=7.5 driver=12.8 name="NVIDIA GeForce RTX 2080 Ti" total="10.6 GiB" available="10.4 GiB"
|
||||
time=2025-07-01T19:36:13.315Z level=INFO source=types.go:130 msg="inference compute" id=GPU-9abb57639fa80c50 library=rocm variant="" compute=gfx1030 driver=6.3 name=1002:73bf total="16.0 GiB" available="1.3 GiB"
|
||||
time=2025-07-01T19:36:13.316Z level=INFO source=routes.go:1721 msg="vram-based default context" total_vram="26.6 GiB" default_num_ctx=32768
|
||||
[GIN] 2025/07/01 - 18:00:09 | 200 | 50.263µs | 100.126.204.152 | HEAD "/"
|
||||
`,
|
||||
exp: []InferenceCompute{
|
||||
expComputes: []InferenceCompute{
|
||||
{
|
||||
Library: "cuda",
|
||||
Variant: "v12",
|
||||
@@ -207,6 +284,20 @@ time=2025-07-01T19:33:43.162Z level=INFO source=types.go:130 msg="inference comp
|
||||
VRAM: "16.0 GiB",
|
||||
},
|
||||
},
|
||||
expDefaultCtxLen: 32768,
|
||||
},
|
||||
{
|
||||
name: "missing_default_context",
|
||||
log: `time=2025-06-30T09:23:07.374-07:00 level=DEBUG source=sched.go:108 msg="starting llm scheduler"
|
||||
time=2025-06-30T09:23:07.416-07:00 level=INFO source=types.go:130 msg="inference compute" id=0 library=metal variant="" compute="" driver=0.0 name="" total="96.0 GiB" available="96.0 GiB"
|
||||
time=2025-06-30T09:25:56.197-07:00 level=DEBUG source=ggml.go:155 msg="key not found" key=general.alignment default=32
|
||||
`,
|
||||
expComputes: []InferenceCompute{{
|
||||
Library: "metal",
|
||||
Driver: "0.0",
|
||||
VRAM: "96.0 GiB",
|
||||
}},
|
||||
expDefaultCtxLen: 0, // No default context line, should return 0
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -219,18 +310,21 @@ time=2025-07-01T19:33:43.162Z level=INFO source=types.go:130 msg="inference comp
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
ics, err := GetInferenceComputer(ctx)
|
||||
info, err := GetInferenceInfo(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf(" failed to get inference compute: %v", err)
|
||||
t.Fatalf("failed to get inference info: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(ics, tt.exp) {
|
||||
t.Fatalf("got:\n%#v\nwant:\n%#v", ics, tt.exp)
|
||||
if !reflect.DeepEqual(info.Computes, tt.expComputes) {
|
||||
t.Fatalf("computes mismatch\ngot:\n%#v\nwant:\n%#v", info.Computes, tt.expComputes)
|
||||
}
|
||||
if info.DefaultContextLength != tt.expDefaultCtxLen {
|
||||
t.Fatalf("default context length mismatch: got %d, want %d", info.DefaultContextLength, tt.expDefaultCtxLen)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInferenceComputerTimeout(t *testing.T) {
|
||||
func TestGetInferenceInfoTimeout(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
tmpDir := t.TempDir()
|
||||
@@ -239,7 +333,7 @@ func TestGetInferenceComputerTimeout(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write log file %s: %s", serverLogPath, err)
|
||||
}
|
||||
_, err = GetInferenceComputer(ctx)
|
||||
_, err = GetInferenceInfo(ctx)
|
||||
if err == nil {
|
||||
t.Fatal("expected timeout")
|
||||
}
|
||||
|
||||
128
app/store/cloud_config.go
Normal file
@@ -0,0 +1,128 @@
|
||||
//go:build windows || darwin
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
const serverConfigFilename = "server.json"
|
||||
|
||||
type serverConfig struct {
|
||||
DisableOllamaCloud bool `json:"disable_ollama_cloud,omitempty"`
|
||||
}
|
||||
|
||||
// CloudDisabled returns whether cloud features should be disabled.
|
||||
// The source of truth is: OLLAMA_NO_CLOUD OR ~/.ollama/server.json:disable_ollama_cloud.
|
||||
func (s *Store) CloudDisabled() (bool, error) {
|
||||
disabled, _, err := s.CloudStatus()
|
||||
return disabled, err
|
||||
}
|
||||
|
||||
// CloudStatus returns whether cloud is disabled and the source of that decision.
|
||||
// Source is one of: "none", "env", "config", "both".
|
||||
func (s *Store) CloudStatus() (bool, string, error) {
|
||||
if err := s.ensureDB(); err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
configDisabled, err := readServerConfigCloudDisabled()
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
envDisabled := envconfig.NoCloudEnv()
|
||||
return envDisabled || configDisabled, cloudStatusSource(envDisabled, configDisabled), nil
|
||||
}
|
||||
|
||||
// SetCloudEnabled writes the cloud setting to ~/.ollama/server.json.
|
||||
func (s *Store) SetCloudEnabled(enabled bool) error {
|
||||
if err := s.ensureDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
return setCloudEnabled(enabled)
|
||||
}
|
||||
|
||||
func setCloudEnabled(enabled bool) error {
|
||||
configPath, err := serverConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(configPath), 0o755); err != nil {
|
||||
return fmt.Errorf("create server config directory: %w", err)
|
||||
}
|
||||
|
||||
configMap := map[string]any{}
|
||||
if data, err := os.ReadFile(configPath); err == nil {
|
||||
if err := json.Unmarshal(data, &configMap); err != nil {
|
||||
// If the existing file is invalid JSON, overwrite with a fresh object.
|
||||
configMap = map[string]any{}
|
||||
}
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("read server config: %w", err)
|
||||
}
|
||||
|
||||
configMap["disable_ollama_cloud"] = !enabled
|
||||
|
||||
data, err := json.MarshalIndent(configMap, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal server config: %w", err)
|
||||
}
|
||||
data = append(data, '\n')
|
||||
|
||||
if err := os.WriteFile(configPath, data, 0o644); err != nil {
|
||||
return fmt.Errorf("write server config: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readServerConfigCloudDisabled() (bool, error) {
|
||||
configPath, err := serverConfigPath()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("read server config: %w", err)
|
||||
}
|
||||
|
||||
var cfg serverConfig
|
||||
// Invalid or unexpected JSON should not block startup; treat as default.
|
||||
if json.Unmarshal(data, &cfg) == nil {
|
||||
return cfg.DisableOllamaCloud, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func serverConfigPath() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("resolve home directory: %w", err)
|
||||
}
|
||||
return filepath.Join(home, ".ollama", serverConfigFilename), nil
|
||||
}
|
||||
|
||||
func cloudStatusSource(envDisabled bool, configDisabled bool) string {
|
||||
switch {
|
||||
case envDisabled && configDisabled:
|
||||
return "both"
|
||||
case envDisabled:
|
||||
return "env"
|
||||
case configDisabled:
|
||||
return "config"
|
||||
default:
|
||||
return "none"
|
||||
}
|
||||
}
|
||||
130
app/store/cloud_config_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
//go:build windows || darwin
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCloudDisabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envValue string
|
||||
configContent string
|
||||
wantDisabled bool
|
||||
wantSource string
|
||||
}{
|
||||
{
|
||||
name: "default enabled",
|
||||
wantDisabled: false,
|
||||
wantSource: "none",
|
||||
},
|
||||
{
|
||||
name: "env disables cloud",
|
||||
envValue: "1",
|
||||
wantDisabled: true,
|
||||
wantSource: "env",
|
||||
},
|
||||
{
|
||||
name: "config disables cloud",
|
||||
configContent: `{"disable_ollama_cloud": true}`,
|
||||
wantDisabled: true,
|
||||
wantSource: "config",
|
||||
},
|
||||
{
|
||||
name: "env and config",
|
||||
envValue: "1",
|
||||
configContent: `{"disable_ollama_cloud": false}`,
|
||||
wantDisabled: true,
|
||||
wantSource: "env",
|
||||
},
|
||||
{
|
||||
name: "invalid config is ignored",
|
||||
configContent: `{bad`,
|
||||
wantDisabled: false,
|
||||
wantSource: "none",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpHome := t.TempDir()
|
||||
setTestHome(t, tmpHome)
|
||||
t.Setenv("OLLAMA_NO_CLOUD", tt.envValue)
|
||||
|
||||
if tt.configContent != "" {
|
||||
configDir := filepath.Join(tmpHome, ".ollama")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
t.Fatalf("mkdir config dir: %v", err)
|
||||
}
|
||||
configPath := filepath.Join(configDir, serverConfigFilename)
|
||||
if err := os.WriteFile(configPath, []byte(tt.configContent), 0o644); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
s := &Store{DBPath: filepath.Join(tmpHome, "db.sqlite")}
|
||||
defer s.Close()
|
||||
|
||||
disabled, err := s.CloudDisabled()
|
||||
if err != nil {
|
||||
t.Fatalf("CloudDisabled() error = %v", err)
|
||||
}
|
||||
if disabled != tt.wantDisabled {
|
||||
t.Fatalf("CloudDisabled() = %v, want %v", disabled, tt.wantDisabled)
|
||||
}
|
||||
|
||||
statusDisabled, source, err := s.CloudStatus()
|
||||
if err != nil {
|
||||
t.Fatalf("CloudStatus() error = %v", err)
|
||||
}
|
||||
if statusDisabled != tt.wantDisabled {
|
||||
t.Fatalf("CloudStatus() disabled = %v, want %v", statusDisabled, tt.wantDisabled)
|
||||
}
|
||||
if source != tt.wantSource {
|
||||
t.Fatalf("CloudStatus() source = %v, want %v", source, tt.wantSource)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetCloudEnabled(t *testing.T) {
|
||||
tmpHome := t.TempDir()
|
||||
setTestHome(t, tmpHome)
|
||||
|
||||
configDir := filepath.Join(tmpHome, ".ollama")
|
||||
if err := os.MkdirAll(configDir, 0o755); err != nil {
|
||||
t.Fatalf("mkdir config dir: %v", err)
|
||||
}
|
||||
configPath := filepath.Join(configDir, serverConfigFilename)
|
||||
if err := os.WriteFile(configPath, []byte(`{"another_key":"value","disable_ollama_cloud":true}`), 0o644); err != nil {
|
||||
t.Fatalf("seed config: %v", err)
|
||||
}
|
||||
|
||||
s := &Store{DBPath: filepath.Join(tmpHome, "db.sqlite")}
|
||||
defer s.Close()
|
||||
|
||||
if err := s.SetCloudEnabled(true); err != nil {
|
||||
t.Fatalf("SetCloudEnabled(true) error = %v", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read config: %v", err)
|
||||
}
|
||||
|
||||
var got map[string]any
|
||||
if err := json.Unmarshal(data, &got); err != nil {
|
||||
t.Fatalf("unmarshal config: %v", err)
|
||||
}
|
||||
|
||||
if got["disable_ollama_cloud"] != false {
|
||||
t.Fatalf("disable_ollama_cloud = %v, want false", got["disable_ollama_cloud"])
|
||||
}
|
||||
if got["another_key"] != "value" {
|
||||
t.Fatalf("another_key = %v, want value", got["another_key"])
|
||||
}
|
||||
}
|
||||
@@ -9,12 +9,12 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
sqlite3 "github.com/mattn/go-sqlite3"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// currentSchemaVersion defines the current database schema version.
|
||||
// Increment this when making schema changes that require migrations.
|
||||
const currentSchemaVersion = 12
|
||||
const currentSchemaVersion = 16
|
||||
|
||||
// database wraps the SQLite connection.
|
||||
// SQLite handles its own locking for concurrent access:
|
||||
@@ -73,7 +73,7 @@ func (db *database) init() error {
|
||||
agent BOOLEAN NOT NULL DEFAULT 0,
|
||||
tools BOOLEAN NOT NULL DEFAULT 0,
|
||||
working_dir TEXT NOT NULL DEFAULT '',
|
||||
context_length INTEGER NOT NULL DEFAULT 4096,
|
||||
context_length INTEGER NOT NULL DEFAULT 0,
|
||||
window_width INTEGER NOT NULL DEFAULT 0,
|
||||
window_height INTEGER NOT NULL DEFAULT 0,
|
||||
config_migrated BOOLEAN NOT NULL DEFAULT 0,
|
||||
@@ -82,9 +82,12 @@ func (db *database) init() error {
|
||||
websearch_enabled BOOLEAN NOT NULL DEFAULT 0,
|
||||
selected_model TEXT NOT NULL DEFAULT '',
|
||||
sidebar_open BOOLEAN NOT NULL DEFAULT 0,
|
||||
last_home_view TEXT NOT NULL DEFAULT 'launch',
|
||||
think_enabled BOOLEAN NOT NULL DEFAULT 0,
|
||||
think_level TEXT NOT NULL DEFAULT '',
|
||||
cloud_setting_migrated BOOLEAN NOT NULL DEFAULT 0,
|
||||
remote TEXT NOT NULL DEFAULT '', -- deprecated
|
||||
auto_update_enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
schema_version INTEGER NOT NULL DEFAULT %d
|
||||
);
|
||||
|
||||
@@ -244,6 +247,30 @@ func (db *database) migrate() error {
|
||||
return fmt.Errorf("migrate v11 to v12: %w", err)
|
||||
}
|
||||
version = 12
|
||||
case 12:
|
||||
// add cloud_setting_migrated column to settings table
|
||||
if err := db.migrateV12ToV13(); err != nil {
|
||||
return fmt.Errorf("migrate v12 to v13: %w", err)
|
||||
}
|
||||
version = 13
|
||||
case 13:
|
||||
// change default context_length from 4096 to 0 (VRAM-based tiered defaults)
|
||||
if err := db.migrateV13ToV14(); err != nil {
|
||||
return fmt.Errorf("migrate v13 to v14: %w", err)
|
||||
}
|
||||
version = 14
|
||||
case 14:
|
||||
// add auto_update_enabled column to settings table
|
||||
if err := db.migrateV14ToV15(); err != nil {
|
||||
return fmt.Errorf("migrate v14 to v15: %w", err)
|
||||
}
|
||||
version = 15
|
||||
case 15:
|
||||
// add last_home_view column to settings table
|
||||
if err := db.migrateV15ToV16(); err != nil {
|
||||
return fmt.Errorf("migrate v15 to v16: %w", err)
|
||||
}
|
||||
version = 16
|
||||
default:
|
||||
// If we have a version we don't recognize, just set it to current
|
||||
// This might happen during development
|
||||
@@ -452,6 +479,67 @@ func (db *database) migrateV11ToV12() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateV12ToV13 adds cloud_setting_migrated to settings.
|
||||
func (db *database) migrateV12ToV13() error {
|
||||
_, err := db.conn.Exec(`ALTER TABLE settings ADD COLUMN cloud_setting_migrated BOOLEAN NOT NULL DEFAULT 0`)
|
||||
if err != nil && !duplicateColumnError(err) {
|
||||
return fmt.Errorf("add cloud_setting_migrated column: %w", err)
|
||||
}
|
||||
|
||||
_, err = db.conn.Exec(`UPDATE settings SET schema_version = 13`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update schema version: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateV13ToV14 changes the default context_length from 4096 to 0.
|
||||
// When context_length is 0, the ollama server uses VRAM-based tiered defaults.
|
||||
func (db *database) migrateV13ToV14() error {
|
||||
_, err := db.conn.Exec(`UPDATE settings SET context_length = 0 WHERE context_length = 4096`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update context_length default: %w", err)
|
||||
}
|
||||
|
||||
_, err = db.conn.Exec(`UPDATE settings SET schema_version = 14`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update schema version: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateV14ToV15 adds the auto_update_enabled column to the settings table
|
||||
func (db *database) migrateV14ToV15() error {
|
||||
_, err := db.conn.Exec(`ALTER TABLE settings ADD COLUMN auto_update_enabled BOOLEAN NOT NULL DEFAULT 1`)
|
||||
if err != nil && !duplicateColumnError(err) {
|
||||
return fmt.Errorf("add auto_update_enabled column: %w", err)
|
||||
}
|
||||
|
||||
_, err = db.conn.Exec(`UPDATE settings SET schema_version = 15`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update schema version: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateV15ToV16 adds the last_home_view column to the settings table
|
||||
func (db *database) migrateV15ToV16() error {
|
||||
_, err := db.conn.Exec(`ALTER TABLE settings ADD COLUMN last_home_view TEXT NOT NULL DEFAULT 'launch'`)
|
||||
if err != nil && !duplicateColumnError(err) {
|
||||
return fmt.Errorf("add last_home_view column: %w", err)
|
||||
}
|
||||
|
||||
_, err = db.conn.Exec(`UPDATE settings SET schema_version = 16`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update schema version: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupOrphanedData removes orphaned records that may exist due to the foreign key bug
|
||||
func (db *database) cleanupOrphanedData() error {
|
||||
_, err := db.conn.Exec(`
|
||||
@@ -482,19 +570,11 @@ func (db *database) cleanupOrphanedData() error {
|
||||
}
|
||||
|
||||
func duplicateColumnError(err error) bool {
|
||||
if sqlite3Err, ok := err.(sqlite3.Error); ok {
|
||||
return sqlite3Err.Code == sqlite3.ErrError &&
|
||||
strings.Contains(sqlite3Err.Error(), "duplicate column name")
|
||||
}
|
||||
return false
|
||||
return err != nil && strings.Contains(err.Error(), "duplicate column name")
|
||||
}
|
||||
|
||||
func columnNotExists(err error) bool {
|
||||
if sqlite3Err, ok := err.(sqlite3.Error); ok {
|
||||
return sqlite3Err.Code == sqlite3.ErrError &&
|
||||
strings.Contains(sqlite3Err.Error(), "no such column")
|
||||
}
|
||||
return false
|
||||
return err != nil && strings.Contains(err.Error(), "no such column")
|
||||
}
|
||||
|
||||
func (db *database) getAllChats() ([]Chat, error) {
|
||||
@@ -1108,9 +1188,9 @@ func (db *database) getSettings() (Settings, error) {
|
||||
var s Settings
|
||||
|
||||
err := db.conn.QueryRow(`
|
||||
SELECT expose, survey, browser, models, agent, tools, working_dir, context_length, airplane_mode, turbo_enabled, websearch_enabled, selected_model, sidebar_open, think_enabled, think_level
|
||||
SELECT expose, survey, browser, models, agent, tools, working_dir, context_length, turbo_enabled, websearch_enabled, selected_model, sidebar_open, last_home_view, think_enabled, think_level, auto_update_enabled
|
||||
FROM settings
|
||||
`).Scan(&s.Expose, &s.Survey, &s.Browser, &s.Models, &s.Agent, &s.Tools, &s.WorkingDir, &s.ContextLength, &s.AirplaneMode, &s.TurboEnabled, &s.WebSearchEnabled, &s.SelectedModel, &s.SidebarOpen, &s.ThinkEnabled, &s.ThinkLevel)
|
||||
`).Scan(&s.Expose, &s.Survey, &s.Browser, &s.Models, &s.Agent, &s.Tools, &s.WorkingDir, &s.ContextLength, &s.TurboEnabled, &s.WebSearchEnabled, &s.SelectedModel, &s.SidebarOpen, &s.LastHomeView, &s.ThinkEnabled, &s.ThinkLevel, &s.AutoUpdateEnabled)
|
||||
if err != nil {
|
||||
return Settings{}, fmt.Errorf("get settings: %w", err)
|
||||
}
|
||||
@@ -1119,16 +1199,58 @@ func (db *database) getSettings() (Settings, error) {
|
||||
}
|
||||
|
||||
func (db *database) setSettings(s Settings) error {
|
||||
lastHomeView := strings.ToLower(strings.TrimSpace(s.LastHomeView))
|
||||
validLaunchView := map[string]struct{}{
|
||||
"launch": {},
|
||||
"openclaw": {},
|
||||
"claude": {},
|
||||
"codex": {},
|
||||
"opencode": {},
|
||||
"droid": {},
|
||||
"pi": {},
|
||||
}
|
||||
if lastHomeView != "chat" {
|
||||
if _, ok := validLaunchView[lastHomeView]; !ok {
|
||||
lastHomeView = "launch"
|
||||
}
|
||||
}
|
||||
|
||||
_, err := db.conn.Exec(`
|
||||
UPDATE settings
|
||||
SET expose = ?, survey = ?, browser = ?, models = ?, agent = ?, tools = ?, working_dir = ?, context_length = ?, airplane_mode = ?, turbo_enabled = ?, websearch_enabled = ?, selected_model = ?, sidebar_open = ?, think_enabled = ?, think_level = ?
|
||||
`, s.Expose, s.Survey, s.Browser, s.Models, s.Agent, s.Tools, s.WorkingDir, s.ContextLength, s.AirplaneMode, s.TurboEnabled, s.WebSearchEnabled, s.SelectedModel, s.SidebarOpen, s.ThinkEnabled, s.ThinkLevel)
|
||||
UPDATE settings
|
||||
SET expose = ?, survey = ?, browser = ?, models = ?, agent = ?, tools = ?, working_dir = ?, context_length = ?, turbo_enabled = ?, websearch_enabled = ?, selected_model = ?, sidebar_open = ?, last_home_view = ?, think_enabled = ?, think_level = ?, auto_update_enabled = ?
|
||||
`, s.Expose, s.Survey, s.Browser, s.Models, s.Agent, s.Tools, s.WorkingDir, s.ContextLength, s.TurboEnabled, s.WebSearchEnabled, s.SelectedModel, s.SidebarOpen, lastHomeView, s.ThinkEnabled, s.ThinkLevel, s.AutoUpdateEnabled)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set settings: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *database) isCloudSettingMigrated() (bool, error) {
|
||||
var migrated bool
|
||||
err := db.conn.QueryRow("SELECT cloud_setting_migrated FROM settings").Scan(&migrated)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("get cloud setting migration status: %w", err)
|
||||
}
|
||||
return migrated, nil
|
||||
}
|
||||
|
||||
func (db *database) setCloudSettingMigrated(migrated bool) error {
|
||||
_, err := db.conn.Exec("UPDATE settings SET cloud_setting_migrated = ?", migrated)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set cloud setting migration status: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *database) getAirplaneMode() (bool, error) {
|
||||
var airplaneMode bool
|
||||
err := db.conn.QueryRow("SELECT airplane_mode FROM settings").Scan(&airplaneMode)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("get airplane_mode: %w", err)
|
||||
}
|
||||
return airplaneMode, nil
|
||||
}
|
||||
|
||||
func (db *database) getWindowSize() (int, int, error) {
|
||||
var width, height int
|
||||
err := db.conn.QueryRow("SELECT window_width, window_height FROM settings").Scan(&width, &height)
|
||||
|
||||
@@ -98,6 +98,82 @@ func TestSchemaMigrations(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestMigrationV13ToV14ContextLength(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
db, err := newDatabase(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
_, err = db.conn.Exec("UPDATE settings SET context_length = 4096, schema_version = 13")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to seed v13 settings row: %v", err)
|
||||
}
|
||||
|
||||
if err := db.migrate(); err != nil {
|
||||
t.Fatalf("migration from v13 to v14 failed: %v", err)
|
||||
}
|
||||
|
||||
var contextLength int
|
||||
if err := db.conn.QueryRow("SELECT context_length FROM settings").Scan(&contextLength); err != nil {
|
||||
t.Fatalf("failed to read context_length: %v", err)
|
||||
}
|
||||
|
||||
if contextLength != 0 {
|
||||
t.Fatalf("expected context_length to migrate to 0, got %d", contextLength)
|
||||
}
|
||||
|
||||
version, err := db.getSchemaVersion()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get schema version: %v", err)
|
||||
}
|
||||
if version != currentSchemaVersion {
|
||||
t.Fatalf("expected schema version %d, got %d", currentSchemaVersion, version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrationV15ToV16LastHomeViewDefaultsToLaunch(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
db, err := newDatabase(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if _, err := db.conn.Exec(`
|
||||
ALTER TABLE settings DROP COLUMN last_home_view;
|
||||
UPDATE settings SET schema_version = 15;
|
||||
`); err != nil {
|
||||
t.Fatalf("failed to seed v15 settings row: %v", err)
|
||||
}
|
||||
|
||||
if err := db.migrate(); err != nil {
|
||||
t.Fatalf("migration from v15 to v16 failed: %v", err)
|
||||
}
|
||||
|
||||
var lastHomeView string
|
||||
if err := db.conn.QueryRow("SELECT last_home_view FROM settings").Scan(&lastHomeView); err != nil {
|
||||
t.Fatalf("failed to read last_home_view: %v", err)
|
||||
}
|
||||
|
||||
if lastHomeView != "launch" {
|
||||
t.Fatalf("expected last_home_view to default to launch after migration, got %q", lastHomeView)
|
||||
}
|
||||
|
||||
version, err := db.getSchemaVersion()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get schema version: %v", err)
|
||||
}
|
||||
if version != currentSchemaVersion {
|
||||
t.Fatalf("expected schema version %d, got %d", currentSchemaVersion, version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChatDeletionWithCascade(t *testing.T) {
|
||||
t.Run("chat deletion cascades to related messages", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
@@ -127,6 +127,65 @@ func TestNoConfigToMigrate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudMigrationFromAirplaneMode(t *testing.T) {
|
||||
tmpHome := t.TempDir()
|
||||
setTestHome(t, tmpHome)
|
||||
t.Setenv("OLLAMA_NO_CLOUD", "")
|
||||
|
||||
dbPath := filepath.Join(tmpHome, "db.sqlite")
|
||||
db, err := newDatabase(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.conn.Exec("UPDATE settings SET airplane_mode = 1, cloud_setting_migrated = 0"); err != nil {
|
||||
db.Close()
|
||||
t.Fatalf("failed to seed airplane migration state: %v", err)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
s := Store{DBPath: dbPath}
|
||||
defer s.Close()
|
||||
|
||||
// Trigger DB initialization + one-time cloud migration.
|
||||
if _, err := s.ID(); err != nil {
|
||||
t.Fatalf("failed to initialize store: %v", err)
|
||||
}
|
||||
|
||||
disabled, err := s.CloudDisabled()
|
||||
if err != nil {
|
||||
t.Fatalf("CloudDisabled() error: %v", err)
|
||||
}
|
||||
if !disabled {
|
||||
t.Fatal("expected cloud to be disabled after migrating airplane_mode=true")
|
||||
}
|
||||
|
||||
configPath := filepath.Join(tmpHome, ".ollama", serverConfigFilename)
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read migrated server config: %v", err)
|
||||
}
|
||||
|
||||
var cfg map[string]any
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
t.Fatalf("failed to parse migrated server config: %v", err)
|
||||
}
|
||||
if cfg["disable_ollama_cloud"] != true {
|
||||
t.Fatalf("disable_ollama_cloud = %v, want true", cfg["disable_ollama_cloud"])
|
||||
}
|
||||
|
||||
var airplaneMode, migrated bool
|
||||
if err := s.db.conn.QueryRow("SELECT airplane_mode, cloud_setting_migrated FROM settings").Scan(&airplaneMode, &migrated); err != nil {
|
||||
t.Fatalf("failed to read migration flags from DB: %v", err)
|
||||
}
|
||||
if !airplaneMode {
|
||||
t.Fatal("expected legacy airplane_mode value to remain unchanged")
|
||||
}
|
||||
if !migrated {
|
||||
t.Fatal("expected cloud_setting_migrated to be true")
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
v1Schema = `
|
||||
CREATE TABLE IF NOT EXISTS settings (
|
||||
|
||||
@@ -149,9 +149,6 @@ type Settings struct {
|
||||
// ContextLength specifies the context length for the ollama server (using OLLAMA_CONTEXT_LENGTH)
|
||||
ContextLength int
|
||||
|
||||
// AirplaneMode when true, turns off Ollama Turbo features and only uses local models
|
||||
AirplaneMode bool
|
||||
|
||||
// TurboEnabled indicates if Ollama Turbo features are enabled
|
||||
TurboEnabled bool
|
||||
|
||||
@@ -169,6 +166,12 @@ type Settings struct {
|
||||
|
||||
// SidebarOpen indicates if the chat sidebar is open
|
||||
SidebarOpen bool
|
||||
|
||||
// LastHomeView stores the preferred home route target ("chat" or integration name)
|
||||
LastHomeView string
|
||||
|
||||
// AutoUpdateEnabled indicates if automatic updates should be downloaded
|
||||
AutoUpdateEnabled bool
|
||||
}
|
||||
|
||||
type Store struct {
|
||||
@@ -259,6 +262,40 @@ func (s *Store) ensureDB() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Run one-time migration from legacy airplane_mode behavior.
|
||||
if err := s.migrateCloudSetting(database); err != nil {
|
||||
return fmt.Errorf("migrate cloud setting: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateCloudSetting migrates legacy airplane_mode into server.json exactly once.
|
||||
// After this, cloud state is sourced from server.json OR OLLAMA_NO_CLOUD.
|
||||
func (s *Store) migrateCloudSetting(database *database) error {
|
||||
migrated, err := database.isCloudSettingMigrated()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if migrated {
|
||||
return nil
|
||||
}
|
||||
|
||||
airplaneMode, err := database.getAirplaneMode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if airplaneMode {
|
||||
if err := setCloudEnabled(false); err != nil {
|
||||
return fmt.Errorf("migrate airplane_mode to cloud disabled: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := database.setCloudSettingMigrated(true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -355,6 +392,10 @@ func (s *Store) Settings() (Settings, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if settings.LastHomeView == "" {
|
||||
settings.LastHomeView = "launch"
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -81,6 +81,32 @@ func TestStore(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("settings default home view is launch", func(t *testing.T) {
|
||||
loaded, err := s.Settings()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if loaded.LastHomeView != "launch" {
|
||||
t.Fatalf("expected default LastHomeView to be launch, got %q", loaded.LastHomeView)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("settings empty home view falls back to launch", func(t *testing.T) {
|
||||
if err := s.SetSettings(Settings{LastHomeView: ""}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
loaded, err := s.Settings()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if loaded.LastHomeView != "launch" {
|
||||
t.Fatalf("expected empty LastHomeView to fall back to launch, got %q", loaded.LastHomeView)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("window size", func(t *testing.T) {
|
||||
if err := s.SetWindowSize(1024, 768); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
11
app/store/test_home_test.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build windows || darwin
|
||||
|
||||
package store
|
||||
|
||||
import "testing"
|
||||
|
||||
func setTestHome(t *testing.T, home string) {
|
||||
t.Helper()
|
||||
t.Setenv("HOME", home)
|
||||
t.Setenv("USERPROFILE", home)
|
||||
}
|
||||
2
app/store/testdata/schema.sql
vendored
@@ -13,7 +13,7 @@ CREATE TABLE IF NOT EXISTS settings (
|
||||
agent BOOLEAN NOT NULL DEFAULT 0,
|
||||
tools BOOLEAN NOT NULL DEFAULT 0,
|
||||
working_dir TEXT NOT NULL DEFAULT '',
|
||||
context_length INTEGER NOT NULL DEFAULT 4096,
|
||||
context_length INTEGER NOT NULL DEFAULT 0,
|
||||
window_width INTEGER NOT NULL DEFAULT 0,
|
||||
window_height INTEGER NOT NULL DEFAULT 0,
|
||||
config_migrated BOOLEAN NOT NULL DEFAULT 0,
|
||||
|
||||
35
app/tools/cloud_policy.go
Normal file
@@ -0,0 +1,35 @@
|
||||
//go:build windows || darwin
|
||||
|
||||
package tools
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
internalcloud "github.com/ollama/ollama/internal/cloud"
|
||||
)
|
||||
|
||||
// ensureCloudEnabledForTool checks cloud policy from the connected Ollama server.
|
||||
// If policy cannot be determined, this fails closed and blocks the operation.
|
||||
func ensureCloudEnabledForTool(ctx context.Context, operation string) error {
|
||||
// Reuse shared message formatting; policy evaluation is still done via
|
||||
// the connected server's /api/status endpoint below.
|
||||
disabledMessage := internalcloud.DisabledError(operation)
|
||||
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return errors.New(disabledMessage + " (unable to verify server cloud policy)")
|
||||
}
|
||||
|
||||
status, err := client.CloudStatusExperimental(ctx)
|
||||
if err != nil {
|
||||
return errors.New(disabledMessage + " (unable to verify server cloud policy)")
|
||||
}
|
||||
|
||||
if status.Cloud.Disabled {
|
||||
return errors.New(disabledMessage)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
73
app/tools/cloud_policy_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
//go:build windows || darwin
|
||||
|
||||
package tools
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEnsureCloudEnabledForTool(t *testing.T) {
|
||||
const op = "web search is unavailable"
|
||||
const disabledPrefix = "ollama cloud is disabled: web search is unavailable"
|
||||
|
||||
t.Run("enabled allows tool execution", func(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/api/status" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"cloud":{"disabled":false,"source":"none"}}`))
|
||||
}))
|
||||
t.Cleanup(ts.Close)
|
||||
t.Setenv("OLLAMA_HOST", ts.URL)
|
||||
|
||||
if err := ensureCloudEnabledForTool(context.Background(), op); err != nil {
|
||||
t.Fatalf("expected nil error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("disabled blocks tool execution", func(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/api/status" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"cloud":{"disabled":true,"source":"config"}}`))
|
||||
}))
|
||||
t.Cleanup(ts.Close)
|
||||
t.Setenv("OLLAMA_HOST", ts.URL)
|
||||
|
||||
err := ensureCloudEnabledForTool(context.Background(), op)
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if got := err.Error(); got != disabledPrefix {
|
||||
t.Fatalf("unexpected error: %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("status unavailable fails closed", func(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.NotFound(w, r)
|
||||
}))
|
||||
t.Cleanup(ts.Close)
|
||||
t.Setenv("OLLAMA_HOST", ts.URL)
|
||||
|
||||
err := ensureCloudEnabledForTool(context.Background(), op)
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if got := err.Error(); !strings.Contains(got, disabledPrefix) {
|
||||
t.Fatalf("expected disabled prefix, got %q", got)
|
||||
}
|
||||
if got := err.Error(); !strings.Contains(got, "unable to verify server cloud policy") {
|
||||
t.Fatalf("expected verification failure detail, got %q", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -77,6 +77,10 @@ func (w *WebFetch) Execute(ctx context.Context, args map[string]any) (any, strin
|
||||
}
|
||||
|
||||
func performWebFetch(ctx context.Context, targetURL string) (*FetchResponse, error) {
|
||||
if err := ensureCloudEnabledForTool(ctx, "web fetch is unavailable"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqBody := FetchRequest{URL: targetURL}
|
||||
jsonBody, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
|
||||
@@ -93,6 +93,10 @@ func (w *WebSearch) Execute(ctx context.Context, args map[string]any) (any, stri
|
||||
}
|
||||
|
||||
func performWebSearch(ctx context.Context, query string, maxResults int) (*SearchResponse, error) {
|
||||
if err := ensureCloudEnabledForTool(ctx, "web search is unavailable"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqBody := SearchRequest{Query: query, MaxResults: maxResults}
|
||||
|
||||
jsonBody, err := json.Marshal(reqBody)
|
||||
|
||||
@@ -289,10 +289,12 @@ export class InferenceCompute {
|
||||
}
|
||||
export class InferenceComputeResponse {
|
||||
inferenceComputes: InferenceCompute[];
|
||||
defaultContextLength: number;
|
||||
|
||||
constructor(source: any = {}) {
|
||||
if ('string' === typeof source) source = JSON.parse(source);
|
||||
this.inferenceComputes = this.convertValues(source["inferenceComputes"], InferenceCompute);
|
||||
this.defaultContextLength = source["defaultContextLength"];
|
||||
}
|
||||
|
||||
convertValues(a: any, classs: any, asMap: boolean = false): any {
|
||||
@@ -406,13 +408,14 @@ export class Settings {
|
||||
Tools: boolean;
|
||||
WorkingDir: string;
|
||||
ContextLength: number;
|
||||
AirplaneMode: boolean;
|
||||
TurboEnabled: boolean;
|
||||
WebSearchEnabled: boolean;
|
||||
ThinkEnabled: boolean;
|
||||
ThinkLevel: string;
|
||||
SelectedModel: string;
|
||||
SidebarOpen: boolean;
|
||||
LastHomeView: string;
|
||||
AutoUpdateEnabled: boolean;
|
||||
|
||||
constructor(source: any = {}) {
|
||||
if ('string' === typeof source) source = JSON.parse(source);
|
||||
@@ -424,13 +427,14 @@ export class Settings {
|
||||
this.Tools = source["Tools"];
|
||||
this.WorkingDir = source["WorkingDir"];
|
||||
this.ContextLength = source["ContextLength"];
|
||||
this.AirplaneMode = source["AirplaneMode"];
|
||||
this.TurboEnabled = source["TurboEnabled"];
|
||||
this.WebSearchEnabled = source["WebSearchEnabled"];
|
||||
this.ThinkEnabled = source["ThinkEnabled"];
|
||||
this.ThinkLevel = source["ThinkLevel"];
|
||||
this.SelectedModel = source["SelectedModel"];
|
||||
this.SidebarOpen = source["SidebarOpen"];
|
||||
this.LastHomeView = source["LastHomeView"];
|
||||
this.AutoUpdateEnabled = source["AutoUpdateEnabled"];
|
||||
}
|
||||
}
|
||||
export class SettingsResponse {
|
||||
@@ -469,26 +473,24 @@ export class HealthResponse {
|
||||
}
|
||||
export class User {
|
||||
id: string;
|
||||
name: string;
|
||||
email: string;
|
||||
avatarURL: string;
|
||||
plan: string;
|
||||
bio: string;
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
overThreshold: boolean;
|
||||
name: string;
|
||||
bio?: string;
|
||||
avatarurl?: string;
|
||||
firstname?: string;
|
||||
lastname?: string;
|
||||
plan?: string;
|
||||
|
||||
constructor(source: any = {}) {
|
||||
if ('string' === typeof source) source = JSON.parse(source);
|
||||
this.id = source["id"];
|
||||
this.name = source["name"];
|
||||
this.email = source["email"];
|
||||
this.avatarURL = source["avatarURL"];
|
||||
this.plan = source["plan"];
|
||||
this.name = source["name"];
|
||||
this.bio = source["bio"];
|
||||
this.firstName = source["firstName"];
|
||||
this.lastName = source["lastName"];
|
||||
this.overThreshold = source["overThreshold"];
|
||||
this.avatarurl = source["avatarurl"];
|
||||
this.firstname = source["firstname"];
|
||||
this.lastname = source["lastname"];
|
||||
this.plan = source["plan"];
|
||||
}
|
||||
}
|
||||
export class Attachment {
|
||||
@@ -550,14 +552,12 @@ export class Error {
|
||||
}
|
||||
}
|
||||
export class ModelUpstreamResponse {
|
||||
digest?: string;
|
||||
pushTime: number;
|
||||
stale: boolean;
|
||||
error?: string;
|
||||
|
||||
constructor(source: any = {}) {
|
||||
if ('string' === typeof source) source = JSON.parse(source);
|
||||
this.digest = source["digest"];
|
||||
this.pushTime = source["pushTime"];
|
||||
this.stale = source["stale"];
|
||||
this.error = source["error"];
|
||||
}
|
||||
}
|
||||
|
||||
1512
app/ui/app/package-lock.json
generated
@@ -34,6 +34,7 @@
|
||||
"rehype-raw": "^7.0.0",
|
||||
"rehype-sanitize": "^6.0.0",
|
||||
"remark-math": "^6.0.0",
|
||||
"streamdown": "^1.4.0",
|
||||
"unist-builder": "^4.0.0",
|
||||
"unist-util-parents": "^3.0.0"
|
||||
},
|
||||
|
||||
7
app/ui/app/public/launch-icons/claude.svg
Normal file
@@ -0,0 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!-- Generated by Pixelmator Pro 3.6.17 -->
|
||||
<svg width="1200" height="1200" viewBox="0 0 1200 1200" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="g314">
|
||||
<path id="path147" fill="#d97757" stroke="none" d="M 233.959793 800.214905 L 468.644287 668.536987 L 472.590637 657.100647 L 468.644287 650.738403 L 457.208069 650.738403 L 417.986633 648.322144 L 283.892639 644.69812 L 167.597321 639.865845 L 54.926208 633.825623 L 26.577238 627.785339 L 3.3e-05 592.751709 L 2.73832 575.27533 L 26.577238 559.248352 L 60.724873 562.228149 L 136.187973 567.382629 L 249.422867 575.194763 L 331.570496 580.026978 L 453.261841 592.671082 L 472.590637 592.671082 L 475.328857 584.859009 L 468.724915 580.026978 L 463.570557 575.194763 L 346.389313 495.785217 L 219.543671 411.865906 L 153.100723 363.543762 L 117.181267 339.060425 L 99.060455 316.107361 L 91.248367 266.01355 L 123.865784 230.093994 L 167.677887 233.073853 L 178.872513 236.053772 L 223.248367 270.201477 L 318.040283 343.570496 L 441.825592 434.738342 L 459.946411 449.798706 L 467.194672 444.64447 L 468.080597 441.020203 L 459.946411 427.409485 L 392.617493 305.718323 L 320.778564 181.932983 L 288.80542 130.630859 L 280.348999 99.865845 C 277.369171 87.221436 275.194641 76.590698 275.194641 63.624268 L 312.322174 13.20813 L 332.8591 6.604126 L 382.389313 13.20813 L 403.248352 31.328979 L 434.013519 101.71814 L 483.865753 212.537048 L 561.181274 363.221497 L 583.812134 407.919434 L 595.892639 449.315491 L 600.40271 461.959839 L 608.214783 461.959839 L 608.214783 454.711609 L 614.577271 369.825623 L 626.335632 265.61084 L 637.771851 131.516846 L 641.718201 93.745117 L 660.402832 48.483276 L 697.530334 24.000122 L 726.52356 37.852417 L 750.362549 72 L 747.060486 94.067139 L 732.886047 186.201416 L 705.100708 330.52356 L 686.979919 427.167847 L 697.530334 427.167847 L 709.61084 415.087341 L 758.496704 350.174561 L 840.644348 247.490051 L 876.885925 206.738342 L 919.167847 161.71814 L 946.308838 140.29541 L 997.61084 140.29541 L 1035.38269 196.429626 L 1018.469849 254.416199 L 965.637634 321.422852 L 921.825562 378.201538 L 859.006714 462.765259 L 819.785278 530.41626 L 823.409424 535.812073 L 832.75177 534.92627 L 974.657776 504.724915 L 1051.328979 490.872559 L 1142.818848 475.167786 L 1184.214844 494.496582 L 1188.724854 514.147644 L 1172.456421 554.335693 L 1074.604126 578.496765 L 959.838989 601.449829 L 788.939636 641.879272 L 786.845764 643.409485 L 789.261841 646.389343 L 866.255127 653.637634 L 899.194702 655.409424 L 979.812134 655.409424 L 1129.932861 666.604187 L 1169.154419 692.537109 L 1192.671265 724.268677 L 1188.724854 748.429688 L 1128.322144 779.194641 L 1046.818848 759.865845 L 856.590759 714.604126 L 791.355774 698.335754 L 782.335693 698.335754 L 782.335693 703.731567 L 836.69812 756.885986 L 936.322205 846.845581 L 1061.073975 962.81897 L 1067.436279 991.490112 L 1051.409424 1014.120911 L 1034.496704 1011.704712 L 924.885986 929.234924 L 882.604126 892.107544 L 786.845764 811.48999 L 780.483276 811.48999 L 780.483276 819.946289 L 802.550415 852.241699 L 919.087341 1027.409424 L 925.127625 1081.127686 L 916.671204 1098.604126 L 886.469849 1109.154419 L 853.288696 1103.114136 L 785.073914 1007.355835 L 714.684631 899.516785 L 657.906067 802.872498 L 650.979858 806.81897 L 617.476624 1167.704834 L 601.771851 1186.147705 L 565.530212 1200 L 535.328857 1177.046997 L 519.302124 1139.919556 L 535.328857 1066.550537 L 554.657776 970.792053 L 570.362488 894.68457 L 584.536926 800.134277 L 592.993347 768.724976 L 592.429626 766.630859 L 585.503479 767.516968 L 514.22821 865.369263 L 405.825531 1011.865906 L 320.053711 1103.677979 L 299.516815 1111.812256 L 263.919525 1093.369263 L 267.221497 1060.429688 L 287.114136 1031.114136 L 405.825531 880.107361 L 477.422913 786.52356 L 523.651062 732.483276 L 523.328918 724.671265 L 520.590698 724.671265 L 205.288605 929.395935 L 149.154434 936.644409 L 124.993355 914.01355 L 127.973183 876.885986 L 139.409409 864.80542 L 234.201385 799.570435 L 233.879227 799.8927 Z"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 4.0 KiB |
1
app/ui/app/public/launch-icons/codex-dark.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 320"><path fill="#fff" d="m297.06 130.97c7.26-21.79 4.76-45.66-6.85-65.48-17.46-30.4-52.56-46.04-86.84-38.68-15.25-17.18-37.16-26.95-60.13-26.81-35.04-.08-66.13 22.48-76.91 55.82-22.51 4.61-41.94 18.7-53.31 38.67-17.59 30.32-13.58 68.54 9.92 94.54-7.26 21.79-4.76 45.66 6.85 65.48 17.46 30.4 52.56 46.04 86.84 38.68 15.24 17.18 37.16 26.95 60.13 26.8 35.06.09 66.16-22.49 76.94-55.86 22.51-4.61 41.94-18.7 53.31-38.67 17.57-30.32 13.55-68.51-9.94-94.51zm-120.28 168.11c-14.03.02-27.62-4.89-38.39-13.88.49-.26 1.34-.73 1.89-1.07l63.72-36.8c3.26-1.85 5.26-5.32 5.24-9.07v-89.83l26.93 15.55c.29.14.48.42.52.74v74.39c-.04 33.08-26.83 59.9-59.91 59.97zm-128.84-55.03c-7.03-12.14-9.56-26.37-7.15-40.18.47.28 1.3.79 1.89 1.13l63.72 36.8c3.23 1.89 7.23 1.89 10.47 0l77.79-44.92v31.1c.02.32-.13.63-.38.83l-64.41 37.19c-28.69 16.52-65.33 6.7-81.92-21.95zm-16.77-139.09c7-12.16 18.05-21.46 31.21-26.29 0 .55-.03 1.52-.03 2.2v73.61c-.02 3.74 1.98 7.21 5.23 9.06l77.79 44.91-26.93 15.55c-.27.18-.61.21-.91.08l-64.42-37.22c-28.63-16.58-38.45-53.21-21.95-81.89zm221.26 51.49-77.79-44.92 26.93-15.54c.27-.18.61-.21.91-.08l64.42 37.19c28.68 16.57 38.51 53.26 21.94 81.94-7.01 12.14-18.05 21.44-31.2 26.28v-75.81c.03-3.74-1.96-7.2-5.2-9.06zm26.8-40.34c-.47-.29-1.3-.79-1.89-1.13l-63.72-36.8c-3.23-1.89-7.23-1.89-10.47 0l-77.79 44.92v-31.1c-.02-.32.13-.63.38-.83l64.41-37.16c28.69-16.55 65.37-6.7 81.91 22 6.99 12.12 9.52 26.31 7.15 40.1zm-168.51 55.43-26.94-15.55c-.29-.14-.48-.42-.52-.74v-74.39c.02-33.12 26.89-59.96 60.01-59.94 14.01 0 27.57 4.92 38.34 13.88-.49.26-1.33.73-1.89 1.07l-63.72 36.8c-3.26 1.85-5.26 5.31-5.24 9.06l-.04 89.79zm14.63-31.54 34.65-20.01 34.65 20v40.01l-34.65 20-34.65-20z"/></svg>
|
||||
|
After Width: | Height: | Size: 1.7 KiB |
1
app/ui/app/public/launch-icons/codex.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 320"><path d="m297.06 130.97c7.26-21.79 4.76-45.66-6.85-65.48-17.46-30.4-52.56-46.04-86.84-38.68-15.25-17.18-37.16-26.95-60.13-26.81-35.04-.08-66.13 22.48-76.91 55.82-22.51 4.61-41.94 18.7-53.31 38.67-17.59 30.32-13.58 68.54 9.92 94.54-7.26 21.79-4.76 45.66 6.85 65.48 17.46 30.4 52.56 46.04 86.84 38.68 15.24 17.18 37.16 26.95 60.13 26.8 35.06.09 66.16-22.49 76.94-55.86 22.51-4.61 41.94-18.7 53.31-38.67 17.57-30.32 13.55-68.51-9.94-94.51zm-120.28 168.11c-14.03.02-27.62-4.89-38.39-13.88.49-.26 1.34-.73 1.89-1.07l63.72-36.8c3.26-1.85 5.26-5.32 5.24-9.07v-89.83l26.93 15.55c.29.14.48.42.52.74v74.39c-.04 33.08-26.83 59.9-59.91 59.97zm-128.84-55.03c-7.03-12.14-9.56-26.37-7.15-40.18.47.28 1.3.79 1.89 1.13l63.72 36.8c3.23 1.89 7.23 1.89 10.47 0l77.79-44.92v31.1c.02.32-.13.63-.38.83l-64.41 37.19c-28.69 16.52-65.33 6.7-81.92-21.95zm-16.77-139.09c7-12.16 18.05-21.46 31.21-26.29 0 .55-.03 1.52-.03 2.2v73.61c-.02 3.74 1.98 7.21 5.23 9.06l77.79 44.91-26.93 15.55c-.27.18-.61.21-.91.08l-64.42-37.22c-28.63-16.58-38.45-53.21-21.95-81.89zm221.26 51.49-77.79-44.92 26.93-15.54c.27-.18.61-.21.91-.08l64.42 37.19c28.68 16.57 38.51 53.26 21.94 81.94-7.01 12.14-18.05 21.44-31.2 26.28v-75.81c.03-3.74-1.96-7.2-5.2-9.06zm26.8-40.34c-.47-.29-1.3-.79-1.89-1.13l-63.72-36.8c-3.23-1.89-7.23-1.89-10.47 0l-77.79 44.92v-31.1c-.02-.32.13-.63.38-.83l64.41-37.16c28.69-16.55 65.37-6.7 81.91 22 6.99 12.12 9.52 26.31 7.15 40.1zm-168.51 55.43-26.94-15.55c-.29-.14-.48-.42-.52-.74v-74.39c.02-33.12 26.89-59.96 60.01-59.94 14.01 0 27.57 4.92 38.34 13.88-.49.26-1.33.73-1.89 1.07l-63.72 36.8c-3.26 1.85-5.26 5.31-5.24 9.06l-.04 89.79zm14.63-31.54 34.65-20.01 34.65 20v40.01l-34.65 20-34.65-20z"/></svg>
|
||||
|
After Width: | Height: | Size: 1.7 KiB |
8
app/ui/app/public/launch-icons/droid.svg
Normal file
|
After Width: | Height: | Size: 6.2 KiB |
242
app/ui/app/public/launch-icons/openclaw.svg
Normal file
@@ -0,0 +1,242 @@
|
||||
<svg version="1.2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 500 500" width="500" height="500">
|
||||
<style>
|
||||
.s0 { fill: #f6f4f4 }
|
||||
.s1 { fill: #0b0303 }
|
||||
.s2 { fill: #ef0011 }
|
||||
.s3 { fill: #f3e2e2 }
|
||||
.s4 { fill: #f00212 }
|
||||
.s5 { fill: #ba000d }
|
||||
.s6 { fill: #faf1f1 }
|
||||
.s7 { fill: #0b0100 }
|
||||
.s8 { fill: #fbedee }
|
||||
.s9 { fill: #faeaea }
|
||||
.s10 { fill: #ab797d }
|
||||
.s11 { fill: #f8eaea }
|
||||
.s12 { fill: #902021 }
|
||||
.s13 { fill: #f9eeee }
|
||||
.s14 { fill: #f6ecec }
|
||||
.s15 { fill: #080201 }
|
||||
.s16 { fill: #150100 }
|
||||
.s17 { fill: #f2e7e7 }
|
||||
.s18 { fill: #fbe7e8 }
|
||||
.s19 { fill: #060101 }
|
||||
.s20 { fill: #f5e7e7 }
|
||||
.s21 { fill: #fa999e }
|
||||
.s22 { fill: #c46064 }
|
||||
.s23 { fill: #180300 }
|
||||
.s24 { fill: #f6dcdd }
|
||||
.s25 { fill: #f2e6e6 }
|
||||
.s26 { fill: #110200 }
|
||||
.s27 { fill: #eb0011 }
|
||||
.s28 { fill: #e20010 }
|
||||
.s29 { fill: #ea0011 }
|
||||
.s30 { fill: #760007 }
|
||||
.s31 { fill: #f00514 }
|
||||
.s32 { fill: #fcebeb }
|
||||
.s33 { fill: #ecd6d6 }
|
||||
.s34 { fill: #f5e3e3 }
|
||||
.s35 { fill: #f5e4e4 }
|
||||
.s36 { fill: #faf6f6 }
|
||||
.s37 { fill: #e50010 }
|
||||
.s38 { fill: #d5000f }
|
||||
.s39 { fill: #f2e2e3 }
|
||||
.s40 { fill: #ef1018 }
|
||||
.s41 { fill: #f4e8e9 }
|
||||
.s42 { fill: #ef0513 }
|
||||
.s43 { fill: #f5e5e5 }
|
||||
.s44 { fill: #f00413 }
|
||||
.s45 { fill: #f4e9ea }
|
||||
.s46 { fill: #ed0011 }
|
||||
.s47 { fill: #e80011 }
|
||||
.s48 { fill: #e60613 }
|
||||
.s49 { fill: #f0d6d6 }
|
||||
.s50 { fill: #fca9ac }
|
||||
.s51 { fill: #9c000c }
|
||||
.s52 { fill: #73393b }
|
||||
</style>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s0" d="m166.5 52.5q3.5 0 7 0 2.75 2.99 1.5 7-21.27 45.61-20.5 96 39.99 2.76 72 26.5 7.87 6.86 13.5 15.5 42.88-56.39 103.5-92.5 47.35-25.46 101-25 14.52 0.38 23.5 11.5 3.19 7.74 2 16-1.81 7.18-4.5 14-1 0-1 1-5.04 6.05-9 13-1 0-1 1 0 0.5 0 1-12.42 12.15-28.5 19-6.02 36.27-41.5 45-0.83 2.75 0 5 19.02-12.85 41.5-9 10.85-8.09 23.5-13 15.01-6.37 31-2.5 14.09 7.43 14 23.5-2.83 23.25-15.5 43-6.42 9.92-14 19-10.04 8.8-19.5 18-72.02 48.88-156.5 27-19.63 9.6-41.5 10.5-4.59 1.27-9 3 2 1 4 2 20.09-1.11 35 12 25.46 6.95 37.5 30.5 1.26 5.69-1 11-3.38 3.79-7.5 6.5 5.74 10.07 1.5 20.5-7.55 7.47-17.5 3.5-11.01-5.34-22.5-9.5-18.26 10-38.5 13-15.5 0-31 0-26.62-4.54-51-17-4.17 1.33-8 3.5-7.23 5.87-15 11-8.62 2.58-13.5-4.5-1.82 2.32-4.5 3.5-6.06 2.24-12 3.5-7.5 0-15 0-27.42-2.56-50-18.5-18-17.25-23-41.5 0-11.5 0-23 4.12-22.7 25-33 6.95-16.67 22-26.5-20.39-20.8-14.5-49.5 7.01-26.98 28.5-44.5 7.56-5.27 15-10.5-13.09-30.88-7.5-64 3.16-15.57 14.5-26.5 6.85-2.48 8 4.5-6.59 39.53 11 75.5 7.99-0.49 16-2 2.42-34.57 14.5-67.5 8.51-22.23 27.5-36z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s1" d="m113.5 401.5q0.48-5.1-1-10-0.91 0.19-1 1-2.46 1.74-5 3.5 5.65 9.54-5 13-32.21 5.55-61-10-32.89-23.11-29.5-63.5 2.96-22.67 23.5-32 7.99-19.75 27-29.5-27.65-23.7-15.5-58.5 7.33-16.82 20.5-29.5 10.79-8.14 22-15.5-16.49-37.08-5.5-76 3.19-6.13 7.5-11.5 1.48-0.89 2 1-5.69 41.09 12.5 78.5 1 1 2 2 9.97-3.24 20.5-4 2 0 4 0 0-7.5 0-15 0.99-42.22 24.5-77 6.12-7.12 14-12-4.65 13.43-10 27-11.93 37.6-9.5 77 49.38 0.7 83.5 36 2.75 4.5 5.5 9 38.99-52.24 93-88.5 45.84-29.03 100-32.5 15.69-1.56 29 6.5 5.68 7.29 3.5 16.5-10.38 33.62-43.5 45-4.39 37.33-41 45-0.79 8.63-6 15.5 1.91 1.83 4.5 2.5 22.27-17.25 50.5-14.5 12.93-9.41 28-15 36.22-8.28 31.5 28.5-15.19 51.69-62.5 77.5-65.92 35.87-138 15.5-19.67 10.42-42 10.5-8.39 2.88-17 5 3.58 6.08 10 9 20.92-1.14 36 13 22.67 5.23 34.5 25.5 3.33 7.13-3.5 11.5-3.88 1.8-8 3 7.36 8.45 6.5 19.5-4.43 5.66-11.5 3.5-12.84-5.67-26-10.5-39.4 21.02-83 10.5-18.85-5.78-36.5-14.5-13.65 4.14-23.5 14.5-9.51 3.74-11-6.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s2" d="m153.5 173.5q24.62 1.46 46 13.5 12.11 8.1 17.5 21.5 0.74 2.45 0.5 5 0.09 0.81 1 1 1.48-4.9 1-10 5.04 10.48 1.5 22-9.81 27.86-35.5 42.5-26.17 14.97-56 19.5-2.77-0.4-2 1 2.86 1.27 6 1 25.64 1.53 48.5-10 0.34 10.08 2 20 1.08 5.76 5 10 1 1.5 0 3-31.11 20.84-68.5 17.5-23.7-5.7-32.5-28.5-4.39-9.18-3.5-19 15.41 6.23 32 4.5-20.68-6.39-39-18-34.81-27.22-12.5-65.5 11.84-14.83 29-23 4.21 7.66 11.5 12.5 3 1 6 0-26.04-34.62-29-78-0.13-8.46 2-16.5 1 6.5 2 13 3.43 39.53 24.5 73 2.03 2.28 4.5 4 0.5-1.25 1-2.5-1.27-6.54-5-12 0.5-0.75 1-1.5 9.72-3.43 20-4 0.55 10.34 8 17.5 1.94 0.74 4 0.5-17.8-64.6 16.5-122 0.98-1.79 1.5 0-28.21 56.64-13.5 118 1.08 1.43 2.5 0.5 2.21-4.98 2-10.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s3" d="m454.5 97.5q-18.37-2.97-37-1.5-16.14 2.08-32 5.5 32.38-14.09 67-7.5 1.98 1.22 2 3.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s4" d="m454.5 97.5q-1.33 11.18-8.5 20-21.81 26.28-55.5 32-1.11-0.2-2 0.5 2.31 2.82 5.5 4.5 1 2 0 4-9.56 11.3-19.5 20 19.71-8.72 31-27 2.68-0.43 5 1-14.24 30.97-48 36.5-9.93 1.71-20 1.5-6.8-0.48-13 1 5.81 6.92 14 11-10.78 16.03-27 26.5 27.16-7.4 38-33.5 4.34 1.35 9 1-9.08 23.84-33 33.5-18.45 6.41-38 7 22.59 8.92 45-1 12.05-5.52 24-11 9.01-1.79 17 2.5 5.28-4.38 11-8 12.8-6.07 27-5 0 0.5 0 1-19.34 2.69-34 15.5 0.5 0.25 1 0.5 17.79-8.09 36-15 2.71-0.79 5-2 2.5-1 5-2 5.53-4.04 11-8 11.7-4.18 24-6.5 7.78-1.36 15 1.5-2.97 18.45-13.5 34-34.92 49.37-94.5 62.5-59.27 12.45-108-23-15.53-12.52-21.5-31.5-2.47-14.26 4-27-3.15 24.41 14 42-4.92-10.28-7-22-1.97-17.63 7-33 47.28-69.5 125.5-100 15.86-3.42 32-5.5 18.63-1.47 37 1.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s5" d="m86.5 112.5q-1-6.5-2-13 0.7-5.34 3.5-10-1.8 11.32-1.5 23z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s6" d="m433.5 97.5q2.22-0.39 4 1-10 13.75-27 14-0.24-2.06 0.5-4 10.3-7.78 22.5-11z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s7" d="m407.5 101.5q2.55-0.24 5 0.5-52.87 18.31-84.5 64.5-6.94 7.95-17 11-9.38-2.38-5-11 40.38-48.62 101.5-65z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s8" d="m402.5 112.5q3 0 6 0-2.56 8.8-12 7-0.22-1.58 0.5-3 2.72-2.22 5.5-4z"/>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s9" d="m390.5 149.5q7.77 0.52 15 2-11.29 18.28-31 27 9.94-8.7 19.5-20 1-2 0-4-3.19-1.68-5.5-4.5 0.89-0.7 2-0.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s10" d="m131.5 145.5q0 7.5 0 15-2 0-4 0 1.06-1.36 3-1-0.48-7.29 1-14z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s11" d="m219.5 204.5q-1 4.5-2 9 0.24-2.55-0.5-5-5.39-13.4-17.5-21.5-21.38-12.04-46-13.5 0-2 0-4 36.7-0.86 61.5 26 3.06 4.11 4.5 9z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s12" d="m329.5 191.5q6.2-1.48 13-1-3.5 1-7 2-2.9-0.97-6-1z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s13" d="m329.5 191.5q3.1 0.03 6 1 9.55 1.31 19 3-10.84 26.1-38 33.5 16.22-10.47 27-26.5-8.19-4.08-14-11z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s14" d="m479.5 199.5q-7.22-2.86-15-1.5-12.3 2.32-24 6.5 15.6-13.11 36-11.5 3.63 2.26 3 6.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s15" d="m193.5 216.5q-12.01 1.52-22 8-2.83 1.29-5.5 3-4.79-4.57-6.5-11-5.04 2.2-9.5-1-3.47-6.4 3.5-3 4.4 0.05 8-2.5 9.22-9.73 21-16 6.3-3.24 12 1-2.9 1.22-6 1.5 2.61 5.74 4.5 12 0.75 3.97 0.5 8z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s16" d="m458.5 200.5q3.04-0.24 6 0.5-18.02 7.05-33 19-1 1-2 0 11.53-14.3 29-19.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s17" d="m178.5 202.5q6.85-0.63 4.5 6-7.6 5.09-6-4 1.08-0.82 1.5-2z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s18" d="m469.5 201.5q-2.26 13.65-14.5 22-0.47-2.11 1-4 7.08-8.82 13.5-18z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s19" d="m74.5 208.5q8.22-0.2 16 2.5 11.8 4.26 23.5 8.5 5.65-0.63 8-6 2.41 11.83-9.5 13 0.55 3.61 2 7-0.5 1-1 2-4.67-0.94-9.5-1-9.96 0.44-19.5 2.5-5.05-3.55-6.5-9.5-0.75-7.48-0.5-15-6.47 0.15-3-4z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s20" d="m429.5 212.5q-2.5 1-5 2-4 0-8 0-14.2-1.07-27 5 15.27-12.44 35-9.5 2.72 1.14 5 2.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s21" d="m219.5 204.5q0.48 5.1-1 10-0.91-0.19-1-1 1-4.5 2-9z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s22" d="m416.5 215.5q0-0.5 0-1 4 0 8 0-2.29 1.21-5 2-1.06-1.36-3-1z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s23" d="m416.5 215.5q1.94-0.36 3 1-18.21 6.91-36 15-0.5-0.25-1-0.5 14.66-12.81 34-15.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s24" d="m193.5 216.5q4.39 1.3 9 3-0.79 1.04-2 1.5-14.77-0.13-29 3.5 9.99-6.48 22-8z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s25" d="m98.5 219.5q6.09-0.98 6 5-3.04 0.24-6-0.5-1.84-2.24 0-4.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s26" d="m176.5 229.5q8.85-1.14 16 4-4.98 1.75-10 0-13.56 14.3-33 19.5-28.06 8.2-55 1 3.32-6.4 10-5.5-0.71 1.47-2 2.5 36.58 4.24 69-14 4.68-2.13 1-5 2.35-0.91 4-2.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s27" d="m231.5 238.5q1.31-0.2 2 1-3.13 28.62 15 51-16.25 6.75-27-7.5-1-1-2 0 14.73 29.34 46 18.5 1.79 0.52 0 1.5-37.63 16.82-50.5-22.5-5.1-26.48 16.5-42z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s28" d="m243.5 259.5q5.88 3.62 10.5 9 12.96 18.46 32.5 29.5-31.51-7.75-43-38.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s29" d="m203.5 266.5q1.31-0.2 2 1-2.48 22.08 12 39-6.99 1.35-14 0.5 4.59 4.08 10 7-8.71 0.28-14.5-6.5-16.98-22.76 4.5-41z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s27" d="m58.5 284.5q9.6-2.17 14.5 6 5.15 14.18-1 28-11.05-13.14-27.5-17.5 5.15-9.9 14-16.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s30" d="m129.5 288.5q2 1 4 2-3.14 0.27-6-1-0.77-1.4 2-1z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s31" d="m56.5 313.5q3.43 5.43 8 10-4.88 0.44-8 4-1.11-0.2-2 0.5 28.91 1.65 38 28.5 0.45 3.16-1 6-11.02-7.01-23-12.5-4.75-3.75-9.5-7.5 1.47 7.42 7 13 8.34 27.18 32 43 0.99 2.41-1.5 3.5-40.25 5.58-66.5-25.5-15.67-22.01-8-48 10.46-23.87 34.5-15z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s32" d="m45.5 317.5q4.03-0.25 8 0.5 2.46 4.16-2 6-6.04 2.01-9-3.5 1.26-1.85 3-3z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s33" d="m56.5 313.5q4.91 3.14 9.5 7 0.88 2.25-1.5 3-4.57-4.57-8-10z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s34" d="m198.5 319.5q-11.1 11.56-27 15.5-15.75 4.88-32 2.5 28.81-3.69 54-18.5 2.65-0.96 5 0.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s4" d="m198.5 319.5q1.44 0.68 2.5 2 2.41 8.23 6 16 1.2 2.64-0.5 5-30.65 21.41-68 18.5-25.16-6.17-32.5-30.5 6.96 4.99 15.5 6.5 8.99 0.75 18 0.5 16.25 2.38 32-2.5 15.9-3.94 27-15.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s35" d="m92.5 356.5q-9.09-26.85-38-28.5 0.89-0.7 2-0.5 25.47-4.89 35.5 19 0.75 4.98 0.5 10z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s36" d="m72.5 335.5q3.62-0.38 5 3-4.22 1.83-5-3z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s37" d="m223.5 336.5q5.59-0.48 11 1-4.04 4.16-8.5 8-5.99-3.8-2.5-9z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s38" d="m90.5 334.5q0.59-1.54 2-0.5 3.94 5.45 9 10 7 6 14 12-6.91-1.7-13-6-6.21-7.72-12-15.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s39" d="m261.5 346.5q-3.54-2.44-8-3.5-6.98-0.75-14-0.5 0.63-1.08 2-1.5 13.82-2.52 26 4-2.63 1.98-6 1.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s40" d="m239.5 342.5q7.02-0.25 14 0.5 4.46 1.06 8 3.5-5.2 2.35-10 5.5-3.88 4.65-9 7.5-9.89-3.09-9.5-13 2.36-3.63 6.5-4z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s41" d="m214.5 349.5q-21.43 15.48-48 16 22.82-5.9 43-18.5 3.64-1.12 5 2.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s42" d="m214.5 349.5q5.96 7.2 13.5 13 1 1 0 2-28.58 23.34-65.5 20.5-18.15-4.24-27.5-19.5 1.13 0.94 2.5 1.5 14.7 1.42 29-1.5 26.57-0.52 48-16z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s43" d="m302.5 373.5q-14.74-16.73-37-19-4.55 0.25-9 1 25.3-10.24 43.5 11 2.85 2.91 2.5 7z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s44" d="m302.5 373.5q0.21 2.44-2 3.5-28.69 7.6-50.5-12.5-0.06-6.71 6.5-9 4.45-0.75 9-1 22.26 2.27 37 19z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s45" d="m100.5 356.5q5.42 2.71 11 5.5-13.04 7.54-18.5 21.5-7.57-7.14-10.5-17 5.58 1.54 10 5.5 4.2 0.84 5.5-3.5 1.41-5.99 2.5-12z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s8" d="m83.5 394.5q-18.9-10.15-29.5-29-1.54-3.52-2-7 5.79 2.39 10 7 7.82 16.63 21.5 29z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s46" d="m232.5 365.5q17.6 6.19 10.5 23-10.6 10.42-25.5 11.5-25.94 3.21-49-9 36.75-1.65 64-25.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s47" d="m113.5 367.5q7.7-0.01 9.5 7-9.69 7.19-18.5 15.5-7.23 5.76-5.5-3.5 3.12-12.84 14.5-19z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s29" d="m126.5 380.5q7.88-0.4 12 6.5-8.5 7.25-17 14.5-5.62-12.55 5-21z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s48" d="m283.5 385.5q3.22 2.95 7 5.5 2.8 4.03 6 7.5 0.42 2.77-2 4-15.5-9.75-31-19.5-1.79-0.98 0-1.5 9.96 2.49 20 4z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s49" d="m283.5 385.5q8.71-1.27 11.5 7 1.22 2.9 1.5 6-3.2-3.47-6-7.5-3.78-2.55-7-5.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s50" d="m83.5 394.5q1.88-0.06 3 1.5-2.25 0.88-3-1.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s51" d="m258.5 392.5q3.51 0.41 0 2.5-2.33 1.93-5 2 2.61-2.28 5-4.5z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" class="s52" d="m111.5 392.5q0.09-0.81 1-1 1.48 4.9 1 10-1-4.5-2-9z"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 13 KiB |
7
app/ui/app/public/launch-icons/opencode.svg
Normal file
@@ -0,0 +1,7 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" width="512" height="512"><svg width="512" height="512" viewBox="0 0 512 512" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="512" height="512" fill="#131010"></rect>
|
||||
<path d="M320 224V352H192V224H320Z" fill="#5A5858"></path>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M384 416H128V96H384V416ZM320 160H192V352H320V160Z" fill="white"></path>
|
||||
</svg><style>@media (prefers-color-scheme: light) { :root { filter: none; } }
|
||||
@media (prefers-color-scheme: dark) { :root { filter: none; } }
|
||||
</style></svg>
|
||||
|
After Width: | Height: | Size: 612 B |
9
app/ui/app/public/launch-icons/pi-dark.svg
Normal file
@@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 800 800">
|
||||
<rect width="800" height="800" rx="160" fill="#fff"/>
|
||||
<path fill="#000" fill-rule="evenodd" d="
|
||||
M165.29 165.29 H517.36 V400 H400 V517.36 H282.65 V634.72 H165.29 Z
|
||||
M282.65 282.65 V400 H400 V282.65 Z
|
||||
"/>
|
||||
<path fill="#000" d="M517.36 400 H634.72 V634.72 H517.36 Z"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 389 B |
9
app/ui/app/public/launch-icons/pi.svg
Normal file
@@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 800 800">
|
||||
<rect width="800" height="800" rx="160" fill="#000"/>
|
||||
<path fill="#fff" fill-rule="evenodd" d="
|
||||
M165.29 165.29 H517.36 V400 H400 V517.36 H282.65 V634.72 H165.29 Z
|
||||
M282.65 282.65 V400 H400 V282.65 Z
|
||||
"/>
|
||||
<path fill="#fff" d="M517.36 400 H634.72 V634.72 H517.36 Z"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 389 B |
@@ -4,7 +4,6 @@ import {
|
||||
ChatEvent,
|
||||
DownloadEvent,
|
||||
ErrorEvent,
|
||||
InferenceCompute,
|
||||
InferenceComputeResponse,
|
||||
ModelCapabilitiesResponse,
|
||||
Model,
|
||||
@@ -15,6 +14,7 @@ import {
|
||||
import { parseJsonlFromResponse } from "./util/jsonl-parsing";
|
||||
import { ollamaClient as ollama } from "./lib/ollama-client";
|
||||
import type { ModelResponse } from "ollama/browser";
|
||||
import { API_BASE, OLLAMA_DOT_COM } from "./lib/config";
|
||||
|
||||
// Extend Model class with utility methods
|
||||
declare module "@/gotypes" {
|
||||
@@ -27,8 +27,11 @@ Model.prototype.isCloud = function (): boolean {
|
||||
return this.model.endsWith("cloud");
|
||||
};
|
||||
|
||||
const API_BASE = import.meta.env.DEV ? "http://127.0.0.1:3001" : "";
|
||||
|
||||
export type CloudStatusSource = "env" | "config" | "both" | "none";
|
||||
export interface CloudStatusResponse {
|
||||
disabled: boolean;
|
||||
source: CloudStatusSource;
|
||||
}
|
||||
// Helper function to convert Uint8Array to base64
|
||||
function uint8ArrayToBase64(uint8Array: Uint8Array): string {
|
||||
const chunkSize = 0x8000; // 32KB chunks to avoid stack overflow
|
||||
@@ -43,44 +46,50 @@ function uint8ArrayToBase64(uint8Array: Uint8Array): string {
|
||||
}
|
||||
|
||||
export async function fetchUser(): Promise<User | null> {
|
||||
try {
|
||||
const response = await fetch(`${API_BASE}/api/v1/me`, {
|
||||
method: "GET",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const userData: User = await response.json();
|
||||
return userData;
|
||||
}
|
||||
|
||||
return null;
|
||||
} catch (error) {
|
||||
console.error("Error fetching user:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchConnectUrl(): Promise<string> {
|
||||
const response = await fetch(`${API_BASE}/api/v1/connect`, {
|
||||
method: "GET",
|
||||
const response = await fetch(`${API_BASE}/api/me`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error("Failed to fetch connect URL");
|
||||
if (response.ok) {
|
||||
const userData: User = await response.json();
|
||||
|
||||
if (userData.avatarurl && !userData.avatarurl.startsWith("http")) {
|
||||
userData.avatarurl = `${OLLAMA_DOT_COM}${userData.avatarurl}`;
|
||||
}
|
||||
|
||||
return userData;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.connect_url;
|
||||
if (response.status === 401 || response.status === 403) {
|
||||
return null;
|
||||
}
|
||||
|
||||
throw new Error(`Failed to fetch user: ${response.status}`);
|
||||
}
|
||||
|
||||
export async function fetchConnectUrl(): Promise<string> {
|
||||
const response = await fetch(`${API_BASE}/api/me`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
});
|
||||
|
||||
if (response.status === 401) {
|
||||
const data = await response.json();
|
||||
if (data.signin_url) {
|
||||
return data.signin_url;
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error("Failed to fetch connect URL");
|
||||
}
|
||||
|
||||
export async function disconnectUser(): Promise<void> {
|
||||
const response = await fetch(`${API_BASE}/api/v1/disconnect`, {
|
||||
const response = await fetch(`${API_BASE}/api/signout`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
@@ -152,7 +161,7 @@ export async function getModels(query?: string): Promise<Model[]> {
|
||||
// Add query if it's in the registry and not already in the list
|
||||
if (!exactMatch) {
|
||||
const result = await getModelUpstreamInfo(new Model({ model: query }));
|
||||
const existsUpstream = !!result.digest && !result.error;
|
||||
const existsUpstream = result.exists;
|
||||
if (existsUpstream) {
|
||||
filteredModels.push(new Model({ model: query }));
|
||||
}
|
||||
@@ -205,6 +214,11 @@ export async function* sendMessage(
|
||||
data: uint8ArrayToBase64(att.data),
|
||||
}));
|
||||
|
||||
// Send think parameter when it's explicitly set (true, false, or a non-empty string).
|
||||
const shouldSendThink =
|
||||
think !== undefined &&
|
||||
(typeof think === "boolean" || (typeof think === "string" && think !== ""));
|
||||
|
||||
const response = await fetch(`${API_BASE}/api/v1/chat/${chatId}`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
@@ -222,7 +236,7 @@ export async function* sendMessage(
|
||||
web_search: webSearch ?? false,
|
||||
file_tools: fileTools ?? false,
|
||||
...(forceUpdate !== undefined ? { forceUpdate } : {}),
|
||||
...(think !== undefined ? { think } : {}),
|
||||
...(shouldSendThink ? { think } : {}),
|
||||
}),
|
||||
),
|
||||
signal,
|
||||
@@ -276,6 +290,28 @@ export async function updateSettings(settings: Settings): Promise<{
|
||||
};
|
||||
}
|
||||
|
||||
export async function updateCloudSetting(
|
||||
enabled: boolean,
|
||||
): Promise<CloudStatusResponse> {
|
||||
const response = await fetch(`${API_BASE}/api/v1/cloud`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({ enabled }),
|
||||
});
|
||||
if (!response.ok) {
|
||||
const error = await response.text();
|
||||
throw new Error(error || "Failed to update cloud setting");
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return {
|
||||
disabled: Boolean(data.disabled),
|
||||
source: (data.source as CloudStatusSource) || "none",
|
||||
};
|
||||
}
|
||||
|
||||
export async function renameChat(chatId: string, title: string): Promise<void> {
|
||||
const response = await fetch(`${API_BASE}/api/v1/chat/${chatId}/rename`, {
|
||||
method: "PUT",
|
||||
@@ -303,7 +339,7 @@ export async function deleteChat(chatId: string): Promise<void> {
|
||||
// Get upstream information for model staleness checking
|
||||
export async function getModelUpstreamInfo(
|
||||
model: Model,
|
||||
): Promise<{ digest?: string; pushTime: number; error?: string }> {
|
||||
): Promise<{ stale: boolean; exists: boolean; error?: string }> {
|
||||
try {
|
||||
const response = await fetch(`${API_BASE}/api/v1/model/upstream`, {
|
||||
method: "POST",
|
||||
@@ -317,22 +353,22 @@ export async function getModelUpstreamInfo(
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`Failed to check upstream digest for ${model.model}: ${response.status}`,
|
||||
`Failed to check upstream for ${model.model}: ${response.status}`,
|
||||
);
|
||||
return { pushTime: 0 };
|
||||
return { stale: false, exists: false };
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (data.error) {
|
||||
console.warn(`Upstream digest check: ${data.error}`);
|
||||
return { error: data.error, pushTime: 0 };
|
||||
console.warn(`Upstream check: ${data.error}`);
|
||||
return { stale: false, exists: false, error: data.error };
|
||||
}
|
||||
|
||||
return { digest: data.digest, pushTime: data.pushTime || 0 };
|
||||
return { stale: !!data.stale, exists: true };
|
||||
} catch (error) {
|
||||
console.warn(`Error checking model staleness:`, error);
|
||||
return { pushTime: 0 };
|
||||
return { stale: false, exists: false };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -370,7 +406,7 @@ export async function* pullModel(
|
||||
}
|
||||
}
|
||||
|
||||
export async function getInferenceCompute(): Promise<InferenceCompute[]> {
|
||||
export async function getInferenceCompute(): Promise<InferenceComputeResponse> {
|
||||
const response = await fetch(`${API_BASE}/api/v1/inference-compute`);
|
||||
if (!response.ok) {
|
||||
throw new Error(
|
||||
@@ -379,13 +415,13 @@ export async function getInferenceCompute(): Promise<InferenceCompute[]> {
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const inferenceComputeResponse = new InferenceComputeResponse(data);
|
||||
return inferenceComputeResponse.inferenceComputes || [];
|
||||
return new InferenceComputeResponse(data);
|
||||
}
|
||||
|
||||
export async function fetchHealth(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${API_BASE}/api/v1/health`, {
|
||||
// Use the /api/version endpoint as a health check
|
||||
const response = await fetch(`${API_BASE}/api/version`, {
|
||||
method: "GET",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
@@ -394,7 +430,8 @@ export async function fetchHealth(): Promise<boolean> {
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
return data.healthy || false;
|
||||
// If we get a version back, the server is healthy
|
||||
return !!data.version;
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -403,3 +440,16 @@ export async function fetchHealth(): Promise<boolean> {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getCloudStatus(): Promise<CloudStatusResponse | null> {
|
||||
const response = await fetch(`${API_BASE}/api/v1/cloud`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch cloud status: ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return {
|
||||
disabled: Boolean(data.disabled),
|
||||
source: (data.source as CloudStatusSource) || "none",
|
||||
};
|
||||
}
|
||||
|
||||
@@ -17,11 +17,15 @@ import {
|
||||
} from "@/hooks/useChats";
|
||||
import { useNavigate } from "@tanstack/react-router";
|
||||
import { useSelectedModel } from "@/hooks/useSelectedModel";
|
||||
import { useHasVisionCapability } from "@/hooks/useModelCapabilities";
|
||||
import {
|
||||
useHasVisionCapability,
|
||||
useHasToolsCapability,
|
||||
} from "@/hooks/useModelCapabilities";
|
||||
import { useUser } from "@/hooks/useUser";
|
||||
import { DisplayLogin } from "@/components/DisplayLogin";
|
||||
import { ErrorEvent, Message } from "@/gotypes";
|
||||
import { useSettings } from "@/hooks/useSettings";
|
||||
import { useCloudStatus } from "@/hooks/useCloudStatus";
|
||||
import { ThinkButton } from "./ThinkButton";
|
||||
import { ErrorMessage } from "./ErrorMessage";
|
||||
import { processFiles } from "@/utils/fileValidation";
|
||||
@@ -141,19 +145,14 @@ function ChatForm({
|
||||
const {
|
||||
settings: {
|
||||
webSearchEnabled,
|
||||
airplaneMode,
|
||||
thinkEnabled,
|
||||
thinkLevel: settingsThinkLevel,
|
||||
},
|
||||
setSettings,
|
||||
} = useSettings();
|
||||
const { cloudDisabled } = useCloudStatus();
|
||||
|
||||
// current supported models for web search
|
||||
const modelLower = selectedModel?.model.toLowerCase() || "";
|
||||
const supportsWebSearch =
|
||||
modelLower.startsWith("gpt-oss") ||
|
||||
modelLower.startsWith("qwen3") ||
|
||||
modelLower.startsWith("deepseek-v3");
|
||||
const supportsWebSearch = useHasToolsCapability(selectedModel?.model);
|
||||
// Use per-chat thinking level instead of global
|
||||
const thinkLevel: ThinkingLevel =
|
||||
settingsThinkLevel === "none" || !settingsThinkLevel
|
||||
@@ -180,6 +179,12 @@ function ChatForm({
|
||||
setSettings,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
if (cloudDisabled && webSearchEnabled) {
|
||||
setSettings({ WebSearchEnabled: false });
|
||||
}
|
||||
}, [cloudDisabled, webSearchEnabled, setSettings]);
|
||||
|
||||
const removeFile = (index: number) => {
|
||||
setMessage((prev) => ({
|
||||
...prev,
|
||||
@@ -234,19 +239,19 @@ function ChatForm({
|
||||
|
||||
// Determine if login banner should be shown
|
||||
const shouldShowLoginBanner =
|
||||
!cloudDisabled &&
|
||||
!isLoadingUser &&
|
||||
!isAuthenticated &&
|
||||
((webSearchEnabled && supportsWebSearch) ||
|
||||
(selectedModel?.isCloud() && !airplaneMode));
|
||||
((webSearchEnabled && supportsWebSearch) || selectedModel?.isCloud());
|
||||
|
||||
// Determine which feature to highlight in the banner
|
||||
const getActiveFeatureForBanner = () => {
|
||||
if (cloudDisabled) return null;
|
||||
if (!isAuthenticated) {
|
||||
if (loginPromptFeature) return loginPromptFeature;
|
||||
if (webSearchEnabled && selectedModel?.isCloud() && !airplaneMode)
|
||||
return "webSearch";
|
||||
if (webSearchEnabled && selectedModel?.isCloud()) return "webSearch";
|
||||
if (webSearchEnabled) return "webSearch";
|
||||
if (selectedModel?.isCloud() && !airplaneMode) return "turbo";
|
||||
if (selectedModel?.isCloud()) return "turbo";
|
||||
}
|
||||
return null;
|
||||
};
|
||||
@@ -269,11 +274,12 @@ function ChatForm({
|
||||
useEffect(() => {
|
||||
if (
|
||||
isAuthenticated ||
|
||||
(!webSearchEnabled && !!selectedModel?.isCloud() && !airplaneMode)
|
||||
cloudDisabled ||
|
||||
(!webSearchEnabled && !!selectedModel?.isCloud())
|
||||
) {
|
||||
setLoginPromptFeature(null);
|
||||
}
|
||||
}, [isAuthenticated, webSearchEnabled, selectedModel, airplaneMode]);
|
||||
}, [isAuthenticated, webSearchEnabled, selectedModel, cloudDisabled]);
|
||||
|
||||
// When entering edit mode, populate the composition with existing data
|
||||
useEffect(() => {
|
||||
@@ -465,20 +471,27 @@ function ChatForm({
|
||||
const handleSubmit = async () => {
|
||||
if (!message.content.trim() || isStreaming || isDownloading) return;
|
||||
|
||||
if (cloudDisabled && selectedModel?.isCloud()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if cloud mode is enabled but user is not authenticated
|
||||
if (shouldShowLoginBanner) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Prepare attachments for submission
|
||||
const attachmentsToSend: FileAttachment[] = message.attachments.map(
|
||||
(att) => ({
|
||||
// Prepare attachments for submission, excluding unsupported images
|
||||
const attachmentsToSend: FileAttachment[] = message.attachments
|
||||
.filter(
|
||||
(att) => hasVisionCapability || !isImageFile(att.filename),
|
||||
)
|
||||
.map((att) => ({
|
||||
filename: att.filename,
|
||||
data: att.data || new Uint8Array(0), // Empty data for existing files
|
||||
}),
|
||||
);
|
||||
}));
|
||||
|
||||
const useWebSearch = supportsWebSearch && webSearchEnabled && !airplaneMode;
|
||||
const useWebSearch =
|
||||
supportsWebSearch && webSearchEnabled && !cloudDisabled;
|
||||
const useThink = modelSupportsThinkingLevels
|
||||
? thinkLevel
|
||||
: supportsThinkToggling
|
||||
@@ -725,10 +738,17 @@ function ChatForm({
|
||||
)}
|
||||
{(message.attachments.length > 0 || message.fileErrors.length > 0) && (
|
||||
<div className="flex gap-2 overflow-x-auto px-3 pt pb-3 w-full scrollbar-hide">
|
||||
{message.attachments.map((attachment, index) => (
|
||||
{message.attachments.map((attachment, index) => {
|
||||
const isUnsupportedImage =
|
||||
!hasVisionCapability && isImageFile(attachment.filename);
|
||||
return (
|
||||
<div
|
||||
key={attachment.id}
|
||||
className="group flex items-center gap-2 py-2 px-3 rounded-lg bg-neutral-50 dark:bg-neutral-700/50 hover:bg-neutral-100 dark:hover:bg-neutral-700 transition-colors flex-shrink-0"
|
||||
className={`group flex items-center gap-2 py-2 px-3 rounded-lg transition-colors flex-shrink-0 ${
|
||||
isUnsupportedImage
|
||||
? "bg-red-50 dark:bg-red-900/20 border border-red-200 dark:border-red-800"
|
||||
: "bg-neutral-50 dark:bg-neutral-700/50 hover:bg-neutral-100 dark:hover:bg-neutral-700"
|
||||
}`}
|
||||
>
|
||||
{isImageFile(attachment.filename) ? (
|
||||
<ImageThumbnail
|
||||
@@ -753,9 +773,16 @@ function ChatForm({
|
||||
/>
|
||||
</svg>
|
||||
)}
|
||||
<span className="text-sm text-neutral-700 dark:text-neutral-300 max-w-[150px] truncate">
|
||||
{attachment.filename}
|
||||
</span>
|
||||
<div className="flex flex-col min-w-0">
|
||||
<span className={`text-sm max-w-36 truncate ${isUnsupportedImage ? "text-red-700 dark:text-red-300" : "text-neutral-700 dark:text-neutral-300"}`}>
|
||||
{attachment.filename}
|
||||
</span>
|
||||
{isUnsupportedImage && (
|
||||
<span className="text-xs text-red-600 dark:text-red-400 opacity-75">
|
||||
This model does not support images
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => removeFile(index)}
|
||||
@@ -777,7 +804,8 @@ function ChatForm({
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
))}
|
||||
);
|
||||
})}
|
||||
{message.fileErrors.map((fileError, index) => (
|
||||
<div
|
||||
key={`error-${index}`}
|
||||
@@ -899,7 +927,7 @@ function ChatForm({
|
||||
)}
|
||||
<WebSearchButton
|
||||
ref={webSearchButtonRef}
|
||||
isVisible={supportsWebSearch && airplaneMode === false}
|
||||
isVisible={supportsWebSearch && cloudDisabled === false}
|
||||
isActive={webSearchEnabled}
|
||||
onToggle={() => {
|
||||
if (!webSearchEnabled && !isAuthenticated) {
|
||||
@@ -940,6 +968,7 @@ function ChatForm({
|
||||
!isDownloading &&
|
||||
(!message.content.trim() ||
|
||||
shouldShowLoginBanner ||
|
||||
(cloudDisabled && selectedModel?.isCloud()) ||
|
||||
message.fileErrors.length > 0)
|
||||
}
|
||||
className={`flex items-center justify-center h-9 w-9 rounded-full disabled:cursor-default cursor-pointer bg-black text-white dark:bg-white dark:text-black disabled:opacity-10 focus:outline-none focus:ring-2 focus:ring-blue-500`}
|
||||
|
||||
@@ -6,12 +6,13 @@ import { getChat } from "@/api";
|
||||
import { Link } from "@/components/ui/link";
|
||||
import { useState, useRef, useEffect, useCallback, useMemo } from "react";
|
||||
import { ChatsResponse } from "@/gotypes";
|
||||
import { CogIcon } from "@heroicons/react/24/outline";
|
||||
import { CogIcon, RocketLaunchIcon } from "@heroicons/react/24/outline";
|
||||
|
||||
// there's a hidden debug feature to copy a chat's data to the clipboard by
|
||||
// holding shift and clicking this many times within this many seconds
|
||||
const DEBUG_SHIFT_CLICKS_REQUIRED = 5;
|
||||
const DEBUG_SHIFT_CLICK_WINDOW_MS = 7000; // 7 seconds
|
||||
const launchSidebarRequestedKey = "ollama.launchSidebarRequested";
|
||||
|
||||
interface ChatSidebarProps {
|
||||
currentChatId?: string;
|
||||
@@ -267,9 +268,8 @@ export function ChatSidebar({ currentChatId }: ChatSidebarProps) {
|
||||
<Link
|
||||
href="/c/new"
|
||||
mask={{ to: "/" }}
|
||||
className={`flex w-full items-center gap-3 rounded-lg px-2 py-2 text-left text-sm text-neutral-700 hover:bg-neutral-100 dark:hover:bg-neutral-800 dark:text-neutral-100 ${
|
||||
currentChatId === "new" ? "bg-neutral-100 dark:bg-neutral-800" : ""
|
||||
}`}
|
||||
className={`flex w-full items-center gap-3 rounded-lg px-2 py-2 text-left text-sm text-neutral-700 hover:bg-neutral-100 dark:hover:bg-neutral-800 dark:text-neutral-100 ${currentChatId === "new" ? "bg-neutral-100 dark:bg-neutral-800" : ""
|
||||
}`}
|
||||
draggable={false}
|
||||
>
|
||||
<svg
|
||||
@@ -283,6 +283,23 @@ export function ChatSidebar({ currentChatId }: ChatSidebarProps) {
|
||||
</svg>
|
||||
<span className="truncate">New Chat</span>
|
||||
</Link>
|
||||
<Link
|
||||
to="/c/$chatId"
|
||||
params={{ chatId: "launch" }}
|
||||
onClick={() => {
|
||||
if (currentChatId !== "launch") {
|
||||
sessionStorage.setItem(launchSidebarRequestedKey, "1");
|
||||
}
|
||||
}}
|
||||
className={`flex w-full items-center gap-3 rounded-lg px-2 py-2 text-left text-sm text-neutral-700 hover:bg-neutral-100 dark:hover:bg-neutral-800 dark:text-neutral-100 cursor-pointer ${currentChatId === "launch"
|
||||
? "bg-neutral-100 dark:bg-neutral-800"
|
||||
: ""
|
||||
}`}
|
||||
draggable={false}
|
||||
>
|
||||
<RocketLaunchIcon className="h-5 w-5 stroke-current" />
|
||||
<span className="truncate">Launch</span>
|
||||
</Link>
|
||||
{isWindows && (
|
||||
<Link
|
||||
href="/settings"
|
||||
@@ -304,19 +321,18 @@ export function ChatSidebar({ currentChatId }: ChatSidebarProps) {
|
||||
{group.chats.map((chat) => (
|
||||
<div
|
||||
key={chat.id}
|
||||
className={`allow-context-menu flex items-center relative text-sm text-neutral-800 dark:text-neutral-400 rounded-lg hover:bg-neutral-100 dark:hover:bg-neutral-800 ${
|
||||
chat.id === currentChatId
|
||||
? "bg-neutral-100 text-black dark:bg-neutral-800"
|
||||
: ""
|
||||
}`}
|
||||
className={`allow-context-menu flex items-center relative text-sm text-neutral-800 dark:text-neutral-400 rounded-lg hover:bg-neutral-100 dark:hover:bg-neutral-800 ${chat.id === currentChatId
|
||||
? "bg-neutral-100 text-black dark:bg-neutral-800"
|
||||
: ""
|
||||
}`}
|
||||
onMouseEnter={() => handleMouseEnter(chat.id)}
|
||||
onContextMenu={(e) =>
|
||||
handleContextMenu(
|
||||
e,
|
||||
chat.id,
|
||||
chat.title ||
|
||||
chat.userExcerpt ||
|
||||
chat.createdAt.toLocaleString(),
|
||||
chat.userExcerpt ||
|
||||
chat.createdAt.toLocaleString(),
|
||||
)
|
||||
}
|
||||
>
|
||||
|
||||
@@ -10,6 +10,7 @@ interface CopyButtonProps {
|
||||
showLabels?: boolean;
|
||||
className?: string;
|
||||
title?: string;
|
||||
onCopy?: () => void;
|
||||
}
|
||||
|
||||
const CopyButton: React.FC<CopyButtonProps> = ({
|
||||
@@ -20,6 +21,7 @@ const CopyButton: React.FC<CopyButtonProps> = ({
|
||||
showLabels = false,
|
||||
className = "",
|
||||
title = "",
|
||||
onCopy,
|
||||
}) => {
|
||||
const [isCopied, setIsCopied] = useState(false);
|
||||
|
||||
@@ -48,12 +50,14 @@ const CopyButton: React.FC<CopyButtonProps> = ({
|
||||
}
|
||||
|
||||
setIsCopied(true);
|
||||
onCopy?.();
|
||||
setTimeout(() => setIsCopied(false), 2000);
|
||||
} catch (error) {
|
||||
console.error("Clipboard API failed, falling back to plain text", error);
|
||||
try {
|
||||
await navigator.clipboard.writeText(content);
|
||||
setIsCopied(true);
|
||||
onCopy?.();
|
||||
setTimeout(() => setIsCopied(false), 2000);
|
||||
} catch (fallbackError) {
|
||||
console.error("Fallback copy also failed:", fallbackError);
|
||||
|
||||
133
app/ui/app/src/components/LaunchCommands.tsx
Normal file
@@ -0,0 +1,133 @@
|
||||
import { useSettings } from "@/hooks/useSettings";
|
||||
import CopyButton from "@/components/CopyButton";
|
||||
|
||||
interface LaunchCommand {
|
||||
id: string;
|
||||
name: string;
|
||||
command: string;
|
||||
description: string;
|
||||
icon: string;
|
||||
darkIcon?: string;
|
||||
iconClassName?: string;
|
||||
borderless?: boolean;
|
||||
}
|
||||
|
||||
const LAUNCH_COMMANDS: LaunchCommand[] = [
|
||||
{
|
||||
id: "openclaw",
|
||||
name: "OpenClaw",
|
||||
command: "ollama launch openclaw",
|
||||
description: "Personal AI with 100+ skills",
|
||||
icon: "/launch-icons/openclaw.svg",
|
||||
},
|
||||
{
|
||||
id: "claude",
|
||||
name: "Claude",
|
||||
command: "ollama launch claude",
|
||||
description: "Anthropic's coding tool with subagents",
|
||||
icon: "/launch-icons/claude.svg",
|
||||
iconClassName: "h-7 w-7",
|
||||
},
|
||||
{
|
||||
id: "codex",
|
||||
name: "Codex",
|
||||
command: "ollama launch codex",
|
||||
description: "OpenAI's open-source coding agent",
|
||||
icon: "/launch-icons/codex.svg",
|
||||
darkIcon: "/launch-icons/codex-dark.svg",
|
||||
iconClassName: "h-7 w-7",
|
||||
},
|
||||
{
|
||||
id: "opencode",
|
||||
name: "OpenCode",
|
||||
command: "ollama launch opencode",
|
||||
description: "Anomaly's open-source coding agent",
|
||||
icon: "/launch-icons/opencode.svg",
|
||||
iconClassName: "h-7 w-7 rounded",
|
||||
},
|
||||
{
|
||||
id: "droid",
|
||||
name: "Droid",
|
||||
command: "ollama launch droid",
|
||||
description: "Factory's coding agent across terminal and IDEs",
|
||||
icon: "/launch-icons/droid.svg",
|
||||
},
|
||||
{
|
||||
id: "pi",
|
||||
name: "Pi",
|
||||
command: "ollama launch pi",
|
||||
description: "Minimal AI agent toolkit with plugin support",
|
||||
icon: "/launch-icons/pi.svg",
|
||||
darkIcon: "/launch-icons/pi-dark.svg",
|
||||
iconClassName: "h-7 w-7",
|
||||
},
|
||||
];
|
||||
|
||||
export default function LaunchCommands() {
|
||||
const isWindows = navigator.platform.toLowerCase().includes("win");
|
||||
const { setSettings } = useSettings();
|
||||
|
||||
const renderCommandCard = (item: LaunchCommand) => (
|
||||
<div key={item.command} className="w-full text-left">
|
||||
<div className="flex items-start gap-4 sm:gap-5">
|
||||
<div
|
||||
aria-hidden="true"
|
||||
className={`flex h-10 w-10 shrink-0 items-center justify-center rounded-lg overflow-hidden ${item.borderless ? "" : "border border-neutral-200 bg-white dark:border-neutral-700 dark:bg-neutral-900"}`}
|
||||
>
|
||||
{item.darkIcon ? (
|
||||
<picture>
|
||||
<source srcSet={item.darkIcon} media="(prefers-color-scheme: dark)" />
|
||||
<img src={item.icon} alt="" className={`${item.iconClassName ?? "h-8 w-8"} rounded-sm`} />
|
||||
</picture>
|
||||
) : (
|
||||
<img src={item.icon} alt="" className={item.borderless ? "h-full w-full rounded-xl" : `${item.iconClassName ?? "h-8 w-8"} rounded-sm`} />
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="min-w-0 flex-1">
|
||||
<span className="text-sm font-medium text-neutral-900 dark:text-neutral-100">
|
||||
{item.name}
|
||||
</span>
|
||||
<p className="mt-0.5 text-xs text-neutral-500 dark:text-neutral-400">
|
||||
{item.description}
|
||||
</p>
|
||||
<div className="mt-2 flex items-center gap-2 rounded-xl border-neutral-200 dark:border-neutral-700 bg-neutral-50 dark:bg-neutral-800 px-3 py-2">
|
||||
<code className="min-w-0 flex-1 truncate text-xs text-neutral-600 dark:text-neutral-300">
|
||||
{item.command}
|
||||
</code>
|
||||
<CopyButton
|
||||
content={item.command}
|
||||
size="md"
|
||||
title="Copy command to clipboard"
|
||||
className="text-neutral-500 dark:text-neutral-400 hover:text-neutral-700 dark:hover:text-neutral-200 hover:bg-neutral-200/60 dark:hover:bg-neutral-700/70"
|
||||
onCopy={() => {
|
||||
setSettings({ LastHomeView: item.id }).catch(() => { });
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
return (
|
||||
<main className="flex h-screen w-full flex-col relative">
|
||||
<section
|
||||
className={`flex-1 overflow-y-auto overscroll-contain relative min-h-0 ${isWindows ? "xl:pt-4" : "xl:pt-8"}`}
|
||||
>
|
||||
<div className="max-w-[730px] mx-auto w-full px-4 pt-4 pb-20 sm:px-6 sm:pt-6 sm:pb-24 lg:px-8 lg:pt-8 lg:pb-28">
|
||||
<h1 className="text-xl font-semibold text-neutral-900 dark:text-neutral-100">
|
||||
Launch
|
||||
</h1>
|
||||
<p className="mt-1 text-sm text-neutral-500 dark:text-neutral-400">
|
||||
Copy a command and run it in your terminal.
|
||||
</p>
|
||||
|
||||
<div className="mt-6 grid gap-7">
|
||||
{LAUNCH_COMMANDS.map(renderCommandCard)}
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
);
|
||||
}
|
||||
@@ -536,7 +536,7 @@ function ToolCallDisplay({
|
||||
let args: Record<string, unknown> | null = null;
|
||||
try {
|
||||
args = JSON.parse(toolCall.function.arguments) as Record<string, unknown>;
|
||||
} catch (e) {
|
||||
} catch {
|
||||
args = null;
|
||||
}
|
||||
const query = args && typeof args.query === "string" ? args.query : "";
|
||||
@@ -562,7 +562,7 @@ function ToolCallDisplay({
|
||||
let args: Record<string, unknown> | null = null;
|
||||
try {
|
||||
args = JSON.parse(toolCall.function.arguments) as Record<string, unknown>;
|
||||
} catch (e) {
|
||||
} catch {
|
||||
args = null;
|
||||
}
|
||||
const url = args && typeof args.url === "string" ? args.url : "";
|
||||
|
||||
@@ -73,7 +73,7 @@ export default function MessageList({
|
||||
? String(args.url).trim()
|
||||
: "";
|
||||
if (candidate) lastQuery = candidate;
|
||||
} catch {}
|
||||
} catch { /* ignored */ }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
} from "react";
|
||||
import { Model } from "@/gotypes";
|
||||
import { useSelectedModel } from "@/hooks/useSelectedModel";
|
||||
import { useSettings } from "@/hooks/useSettings";
|
||||
import { useCloudStatus } from "@/hooks/useCloudStatus";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { getModelUpstreamInfo } from "@/api";
|
||||
import { ArrowDownTrayIcon } from "@heroicons/react/24/outline";
|
||||
@@ -34,7 +34,7 @@ export const ModelPicker = forwardRef<
|
||||
chatId,
|
||||
searchQuery,
|
||||
);
|
||||
const { settings } = useSettings();
|
||||
const { cloudDisabled } = useCloudStatus();
|
||||
const dropdownRef = useRef<HTMLDivElement>(null);
|
||||
const searchInputRef = useRef<HTMLInputElement>(null);
|
||||
const queryClient = useQueryClient();
|
||||
@@ -61,24 +61,7 @@ export const ModelPicker = forwardRef<
|
||||
try {
|
||||
const upstreamInfo = await getModelUpstreamInfo(model);
|
||||
|
||||
// Compare local digest with upstream digest
|
||||
let isStale =
|
||||
model.digest &&
|
||||
upstreamInfo.digest &&
|
||||
model.digest !== upstreamInfo.digest;
|
||||
|
||||
// If the model has a modified time and upstream has a push time,
|
||||
// check if the model was modified after the push time - if so, it's not stale
|
||||
if (isStale && model.modified_at && upstreamInfo.pushTime > 0) {
|
||||
const modifiedAtTime =
|
||||
new Date(model.modified_at as string | number | Date).getTime() /
|
||||
1000;
|
||||
if (modifiedAtTime > upstreamInfo.pushTime) {
|
||||
isStale = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (isStale) {
|
||||
if (upstreamInfo.stale) {
|
||||
const currentStaleModels =
|
||||
queryClient.getQueryData<Map<string, boolean>>(["staleModels"]) ||
|
||||
new Map();
|
||||
@@ -219,7 +202,7 @@ export const ModelPicker = forwardRef<
|
||||
models={models}
|
||||
selectedModel={selectedModel}
|
||||
onModelSelect={handleModelSelect}
|
||||
airplaneMode={settings.airplaneMode}
|
||||
cloudDisabled={cloudDisabled}
|
||||
isOpen={isOpen}
|
||||
/>
|
||||
</div>
|
||||
@@ -233,13 +216,13 @@ export const ModelList = forwardRef(function ModelList(
|
||||
models,
|
||||
selectedModel,
|
||||
onModelSelect,
|
||||
airplaneMode,
|
||||
cloudDisabled,
|
||||
isOpen,
|
||||
}: {
|
||||
models: Model[];
|
||||
selectedModel: Model | null;
|
||||
onModelSelect: (model: Model) => void;
|
||||
airplaneMode: boolean;
|
||||
cloudDisabled: boolean;
|
||||
isOpen: boolean;
|
||||
},
|
||||
ref,
|
||||
@@ -348,7 +331,7 @@ export const ModelList = forwardRef(function ModelList(
|
||||
</svg>
|
||||
)}
|
||||
{model.digest === undefined &&
|
||||
(airplaneMode || !model.isCloud()) && (
|
||||
(cloudDisabled || !model.isCloud()) && (
|
||||
<ArrowDownTrayIcon
|
||||
className="h-4 w-4 text-neutral-500 dark:text-neutral-400"
|
||||
strokeWidth={1.75}
|
||||
|
||||
@@ -11,15 +11,24 @@ import {
|
||||
FolderIcon,
|
||||
BoltIcon,
|
||||
WrenchIcon,
|
||||
CloudIcon,
|
||||
XMarkIcon,
|
||||
CogIcon,
|
||||
ArrowLeftIcon,
|
||||
ArrowDownTrayIcon,
|
||||
} from "@heroicons/react/20/solid";
|
||||
import { Settings as SettingsType } from "@/gotypes";
|
||||
import { useNavigate } from "@tanstack/react-router";
|
||||
import { useUser } from "@/hooks/useUser";
|
||||
import { useCloudStatus } from "@/hooks/useCloudStatus";
|
||||
import { useQuery, useMutation, useQueryClient } from "@tanstack/react-query";
|
||||
import { getSettings, updateSettings } from "@/api";
|
||||
import {
|
||||
getSettings,
|
||||
type CloudStatusResponse,
|
||||
updateCloudSetting,
|
||||
updateSettings,
|
||||
getInferenceCompute,
|
||||
} from "@/api";
|
||||
|
||||
function AnimatedDots() {
|
||||
return (
|
||||
@@ -53,6 +62,11 @@ export default function Settings() {
|
||||
const [connectionError, setConnectionError] = useState<string | null>(null);
|
||||
const [pollingInterval, setPollingInterval] = useState<number | null>(null);
|
||||
const navigate = useNavigate();
|
||||
const {
|
||||
cloudDisabled,
|
||||
cloudStatus,
|
||||
isLoading: cloudStatusLoading,
|
||||
} = useCloudStatus();
|
||||
|
||||
const {
|
||||
data: settingsData,
|
||||
@@ -65,6 +79,13 @@ export default function Settings() {
|
||||
|
||||
const settings = settingsData?.settings || null;
|
||||
|
||||
const { data: inferenceComputeResponse } = useQuery({
|
||||
queryKey: ["inferenceCompute"],
|
||||
queryFn: getInferenceCompute,
|
||||
});
|
||||
|
||||
const defaultContextLength = inferenceComputeResponse?.defaultContextLength;
|
||||
|
||||
const updateSettingsMutation = useMutation({
|
||||
mutationFn: updateSettings,
|
||||
onSuccess: () => {
|
||||
@@ -74,6 +95,50 @@ export default function Settings() {
|
||||
},
|
||||
});
|
||||
|
||||
const updateCloudMutation = useMutation({
|
||||
mutationFn: (enabled: boolean) => updateCloudSetting(enabled),
|
||||
onMutate: async (enabled: boolean) => {
|
||||
await queryClient.cancelQueries({ queryKey: ["cloudStatus"] });
|
||||
|
||||
const previous = queryClient.getQueryData<CloudStatusResponse | null>([
|
||||
"cloudStatus",
|
||||
]);
|
||||
const envForcesDisabled =
|
||||
previous?.source === "env" || previous?.source === "both";
|
||||
|
||||
queryClient.setQueryData<CloudStatusResponse | null>(
|
||||
["cloudStatus"],
|
||||
previous
|
||||
? {
|
||||
...previous,
|
||||
disabled: !enabled || envForcesDisabled,
|
||||
}
|
||||
: {
|
||||
disabled: !enabled,
|
||||
source: "config",
|
||||
},
|
||||
);
|
||||
|
||||
return { previous };
|
||||
},
|
||||
onError: (_error, _enabled, context) => {
|
||||
if (context?.previous !== undefined) {
|
||||
queryClient.setQueryData(["cloudStatus"], context.previous);
|
||||
}
|
||||
},
|
||||
onSuccess: (status) => {
|
||||
queryClient.setQueryData<CloudStatusResponse | null>(
|
||||
["cloudStatus"],
|
||||
status,
|
||||
);
|
||||
queryClient.invalidateQueries({ queryKey: ["models"] });
|
||||
queryClient.invalidateQueries({ queryKey: ["cloudStatus"] });
|
||||
|
||||
setShowSaved(true);
|
||||
setTimeout(() => setShowSaved(false), 1500);
|
||||
},
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
refetchUser();
|
||||
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
@@ -148,13 +213,18 @@ export default function Settings() {
|
||||
Models: "",
|
||||
Agent: false,
|
||||
Tools: false,
|
||||
ContextLength: 4096,
|
||||
AirplaneMode: false,
|
||||
ContextLength: 0,
|
||||
AutoUpdateEnabled: true,
|
||||
});
|
||||
updateSettingsMutation.mutate(defaultSettings);
|
||||
}
|
||||
};
|
||||
|
||||
const cloudOverriddenByEnv =
|
||||
cloudStatus?.source === "env" || cloudStatus?.source === "both";
|
||||
const cloudToggleDisabled =
|
||||
cloudStatusLoading || updateCloudMutation.isPending || cloudOverriddenByEnv;
|
||||
|
||||
const handleConnectOllamaAccount = async () => {
|
||||
setConnectionError(null);
|
||||
|
||||
@@ -203,6 +273,10 @@ export default function Settings() {
|
||||
}
|
||||
|
||||
const isWindows = navigator.platform.toLowerCase().includes("win");
|
||||
const handleCloseSettings = () => {
|
||||
const chatId = settings.LastHomeView === "chat" ? "new" : "launch";
|
||||
navigate({ to: "/c/$chatId", params: { chatId } });
|
||||
};
|
||||
|
||||
return (
|
||||
<main className="flex h-screen w-full flex-col select-none dark:bg-neutral-900">
|
||||
@@ -216,7 +290,7 @@ export default function Settings() {
|
||||
>
|
||||
{isWindows && (
|
||||
<button
|
||||
onClick={() => navigate({ to: "/" })}
|
||||
onClick={handleCloseSettings}
|
||||
className="hover:bg-neutral-100 mr-3 dark:hover:bg-neutral-800 rounded-full p-1.5"
|
||||
>
|
||||
<ArrowLeftIcon className="w-5 h-5 dark:text-white" />
|
||||
@@ -226,7 +300,7 @@ export default function Settings() {
|
||||
</h1>
|
||||
{!isWindows && (
|
||||
<button
|
||||
onClick={() => navigate({ to: "/" })}
|
||||
onClick={handleCloseSettings}
|
||||
className="p-1 hover:bg-neutral-100 mr-3 dark:hover:bg-neutral-800 rounded-full"
|
||||
>
|
||||
<XMarkIcon className="w-6 h-6 dark:text-white" />
|
||||
@@ -237,7 +311,7 @@ export default function Settings() {
|
||||
<div className="space-y-4 max-w-2xl mx-auto">
|
||||
{/* Connect Ollama Account */}
|
||||
<div className="overflow-hidden rounded-xl bg-white dark:bg-neutral-800">
|
||||
<div className="p-4 border-b border-neutral-200 dark:border-neutral-800">
|
||||
<div className="p-4">
|
||||
<Field>
|
||||
{isLoading ? (
|
||||
// Loading skeleton, this will only happen if the app started recently
|
||||
@@ -299,9 +373,9 @@ export default function Settings() {
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
{user?.avatarURL && (
|
||||
{user?.avatarurl && (
|
||||
<img
|
||||
src={user.avatarURL}
|
||||
src={user.avatarurl}
|
||||
alt={user?.name}
|
||||
className="h-10 w-10 rounded-full bg-neutral-200 dark:bg-neutral-700 flex-shrink-0"
|
||||
onError={(e) => {
|
||||
@@ -344,6 +418,57 @@ export default function Settings() {
|
||||
{/* Local Configuration */}
|
||||
<div className="relative overflow-hidden rounded-xl bg-white dark:bg-neutral-800">
|
||||
<div className="space-y-4 p-4">
|
||||
<Field>
|
||||
<div className="flex items-start justify-between gap-4">
|
||||
<div className="flex items-start space-x-3 flex-1">
|
||||
<CloudIcon className="mt-1 h-5 w-5 flex-shrink-0 text-black dark:text-neutral-100" />
|
||||
<div>
|
||||
<Label>Cloud</Label>
|
||||
<Description>
|
||||
{cloudOverriddenByEnv
|
||||
? "The OLLAMA_NO_CLOUD environment variable is currently forcing cloud off."
|
||||
: "Enable cloud models and web search."}
|
||||
</Description>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex-shrink-0">
|
||||
<Switch
|
||||
checked={!cloudDisabled}
|
||||
disabled={cloudToggleDisabled}
|
||||
onChange={(checked) => {
|
||||
if (cloudOverriddenByEnv) {
|
||||
return;
|
||||
}
|
||||
updateCloudMutation.mutate(checked);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</Field>
|
||||
|
||||
{/* Auto Update */}
|
||||
<Field>
|
||||
<div className="flex items-start justify-between gap-4">
|
||||
<div className="flex items-start space-x-3 flex-1">
|
||||
<ArrowDownTrayIcon className="mt-1 h-5 w-5 flex-shrink-0 text-black dark:text-neutral-100" />
|
||||
<div>
|
||||
<Label>Auto-download updates</Label>
|
||||
<Description>
|
||||
{settings.AutoUpdateEnabled
|
||||
? "Automatically download updates when available."
|
||||
: "Updates will not be downloaded automatically."}
|
||||
</Description>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex-shrink-0">
|
||||
<Switch
|
||||
checked={settings.AutoUpdateEnabled}
|
||||
onChange={(checked) => handleChange("AutoUpdateEnabled", checked)}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</Field>
|
||||
|
||||
{/* Expose Ollama */}
|
||||
<Field>
|
||||
<div className="flex items-start justify-between gap-4">
|
||||
@@ -419,13 +544,11 @@ export default function Settings() {
|
||||
</Description>
|
||||
<div className="mt-3">
|
||||
<Slider
|
||||
value={(() => {
|
||||
// Otherwise use the settings value
|
||||
return settings.ContextLength || 4096;
|
||||
})()}
|
||||
value={settings.ContextLength || defaultContextLength || 0}
|
||||
onChange={(value) => {
|
||||
handleChange("ContextLength", value);
|
||||
}}
|
||||
disabled={!defaultContextLength}
|
||||
options={[
|
||||
{ value: 4096, label: "4k" },
|
||||
{ value: 8192, label: "8k" },
|
||||
@@ -440,35 +563,6 @@ export default function Settings() {
|
||||
</div>
|
||||
</div>
|
||||
</Field>
|
||||
{/* Airplane Mode */}
|
||||
<Field>
|
||||
<div className="flex items-start justify-between gap-4">
|
||||
<div className="flex items-start space-x-3 flex-1">
|
||||
<svg
|
||||
className="mt-1 h-5 w-5 flex-shrink-0 text-black dark:text-neutral-100"
|
||||
viewBox="0 0 21.5508 17.9033"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M21.5508 8.94727C21.542 7.91895 20.1445 7.17188 18.4658 7.17188L14.9238 7.17188C14.4316 7.17188 14.2471 7.09277 13.957 6.75879L8.05078 0.316406C7.86621 0.105469 7.6377 0 7.37402 0L6.35449 0C6.12598 0 5.99414 0.202148 6.1084 0.448242L9.14941 7.17188L4.68457 7.68164L3.09375 4.76367C2.97949 4.54395 2.78613 4.44727 2.49609 4.44727L2.11816 4.44727C1.88965 4.44727 1.74023 4.59668 1.74023 4.8252L1.74023 13.0693C1.74023 13.2979 1.88965 13.4385 2.11816 13.4385L2.49609 13.4385C2.78613 13.4385 2.97949 13.3418 3.09375 13.1309L4.68457 10.2129L9.14941 10.7227L6.1084 17.4463C5.99414 17.6836 6.12598 17.8945 6.35449 17.8945L7.37402 17.8945C7.6377 17.8945 7.86621 17.7803 8.05078 17.5781L13.957 11.127C14.2471 10.8018 14.4316 10.7227 14.9238 10.7227L18.4658 10.7227C20.1445 10.7227 21.542 9.9668 21.5508 8.94727Z" />
|
||||
</svg>
|
||||
<div>
|
||||
<Label>Airplane mode</Label>
|
||||
<Description>
|
||||
Airplane mode keeps data local, disabling cloud models
|
||||
and web search.
|
||||
</Description>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex-shrink-0">
|
||||
<Switch
|
||||
checked={settings.AirplaneMode}
|
||||
onChange={(checked) =>
|
||||
handleChange("AirplaneMode", checked)
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</Field>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -1,522 +0,0 @@
|
||||
import { expect, test, suite } from "vitest";
|
||||
import { processStreamingMarkdown } from "@/utils/processStreamingMarkdown";
|
||||
|
||||
suite("common llm outputs that cause issues", () => {
|
||||
test("prefix of bolded list item shouldn't make a horizontal line", () => {
|
||||
// we're going to go in order of incrementally adding characters. This
|
||||
// happens really commonly with LLMs that like to make lists like so:
|
||||
//
|
||||
// * **point 1**: explanatory text
|
||||
// * **point 2**: more explanatory text
|
||||
//
|
||||
// Partial rendering of `*` (A), followed by `* *` (B), followed by `* **`
|
||||
// (C) is a total mess. (A) renders as a single bullet point in an
|
||||
// otherwise empty list, (B) renders as two nested lists (and therefore
|
||||
// two bullet points, styled differently by default in html), and (C)
|
||||
// renders as a horizontal line because in markdown apparently `***` or `*
|
||||
// * *` horizontal rules don't have as strict whitespace rules as I
|
||||
// expected them to
|
||||
|
||||
// these are alone (i.e., they would be the first list item)
|
||||
expect(processStreamingMarkdown("*")).toBe("");
|
||||
expect(processStreamingMarkdown("* *")).toBe("");
|
||||
expect(processStreamingMarkdown("* **")).toBe("");
|
||||
// expect(processStreamingMarkdown("* **b")).toBe("* **b**");
|
||||
|
||||
// with a list item before them
|
||||
expect(
|
||||
processStreamingMarkdown(
|
||||
// prettier-ignore
|
||||
[
|
||||
"* abc",
|
||||
"*"
|
||||
].join("\n"),
|
||||
),
|
||||
).toBe("* abc");
|
||||
|
||||
expect(
|
||||
processStreamingMarkdown(
|
||||
// prettier-ignore
|
||||
[
|
||||
"* abc",
|
||||
"* *"
|
||||
].join("\n"),
|
||||
),
|
||||
).toBe("* abc");
|
||||
|
||||
expect(
|
||||
processStreamingMarkdown(
|
||||
// prettier-ignore
|
||||
[
|
||||
"* abc",
|
||||
"* **"
|
||||
].join("\n"),
|
||||
),
|
||||
).toBe("* abc");
|
||||
});
|
||||
|
||||
test("bolded list items with text should be rendered properly", () => {
|
||||
expect(processStreamingMarkdown("* **abc**")).toBe("* **abc**");
|
||||
});
|
||||
|
||||
test("partially bolded list items should be autoclosed", () => {
|
||||
expect(processStreamingMarkdown("* **abc")).toBe("* **abc**");
|
||||
});
|
||||
|
||||
suite(
|
||||
"partially bolded list items should be autoclosed, even if the last node isn't a text node",
|
||||
() => {
|
||||
test("inline code", () => {
|
||||
expect(
|
||||
processStreamingMarkdown("* **Asynchronous Function `async`*"),
|
||||
).toBe("* **Asynchronous Function `async`**");
|
||||
});
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
suite("autoclosing bold", () => {
|
||||
suite("endings with no asterisks", () => {
|
||||
test("should autoclose bold", () => {
|
||||
expect(processStreamingMarkdown("**abc")).toBe("**abc**");
|
||||
expect(processStreamingMarkdown("abc **abc")).toBe("abc **abc**");
|
||||
});
|
||||
|
||||
suite("should autoclose, even if the last node isn't a text node", () => {
|
||||
test("inline code", () => {
|
||||
expect(
|
||||
processStreamingMarkdown("* **Asynchronous Function `async`"),
|
||||
).toBe("* **Asynchronous Function `async`**");
|
||||
});
|
||||
|
||||
test("opening ** is at the end of the text", () => {
|
||||
expect(processStreamingMarkdown("abc **`def` jhk [lmn](opq)")).toBe(
|
||||
"abc **`def` jhk [lmn](opq)**",
|
||||
);
|
||||
});
|
||||
|
||||
test("if there's a space after the **, it should NOT be autoclosed", () => {
|
||||
expect(processStreamingMarkdown("abc ** `def` jhk [lmn](opq)")).toBe(
|
||||
"abc \\*\\* `def` jhk [lmn](opq)",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
test("should autoclose bold, even if the last node isn't a text node", () => {
|
||||
expect(
|
||||
processStreamingMarkdown("* **Asynchronous Function ( `async`"),
|
||||
).toBe("* **Asynchronous Function ( `async`**");
|
||||
});
|
||||
|
||||
test("whitespace fakeouts should not be modified", () => {
|
||||
expect(processStreamingMarkdown("** abc")).toBe("\\*\\* abc");
|
||||
});
|
||||
|
||||
// TODO(drifkin): arguably this should just be removed entirely, but empty
|
||||
// isn't so bad
|
||||
test("should handle empty bolded items", () => {
|
||||
expect(processStreamingMarkdown("**")).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
suite("partially closed bolded items", () => {
|
||||
test("simple partial", () => {
|
||||
expect(processStreamingMarkdown("**abc*")).toBe("**abc**");
|
||||
});
|
||||
|
||||
test("partial with non-text node at end", () => {
|
||||
expect(processStreamingMarkdown("**abc`def`*")).toBe("**abc`def`**");
|
||||
});
|
||||
|
||||
test("partial with multiply nested ending nodes", () => {
|
||||
expect(processStreamingMarkdown("**abc[abc](`def`)*")).toBe(
|
||||
"**abc[abc](`def`)**",
|
||||
);
|
||||
});
|
||||
|
||||
test("normal emphasis should not be affected", () => {
|
||||
expect(processStreamingMarkdown("*abc*")).toBe("*abc*");
|
||||
});
|
||||
|
||||
test("normal emphasis with nested code should not be affected", () => {
|
||||
expect(processStreamingMarkdown("*`abc`*")).toBe("*`abc`*");
|
||||
});
|
||||
});
|
||||
|
||||
test.skip("shouldn't autoclose immediately if there's a space before the closing *", () => {
|
||||
expect(processStreamingMarkdown("**abc *")).toBe("**abc**");
|
||||
});
|
||||
|
||||
// skipping for now because this requires partial link completion as well
|
||||
suite.skip("nested blocks that each need autoclosing", () => {
|
||||
test("emph nested in link nested in strong nested in list item", () => {
|
||||
expect(processStreamingMarkdown("* **[abc **def")).toBe(
|
||||
"* **[abc **def**]()**",
|
||||
);
|
||||
});
|
||||
|
||||
test("* **[ab *`def`", () => {
|
||||
expect(processStreamingMarkdown("* **[ab *`def`")).toBe(
|
||||
"* **[ab *`def`*]()**",
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
suite("numbered list items", () => {
|
||||
test("should remove trailing numbers", () => {
|
||||
expect(processStreamingMarkdown("1. First\n2")).toBe("1. First");
|
||||
});
|
||||
|
||||
test("should remove trailing numbers with breaks before", () => {
|
||||
expect(processStreamingMarkdown("1. First \n2")).toBe("1. First");
|
||||
});
|
||||
|
||||
test("should remove trailing numbers that form a new paragraph", () => {
|
||||
expect(processStreamingMarkdown("1. First\n\n2")).toBe("1. First");
|
||||
});
|
||||
|
||||
test("but should leave list items separated by two newlines", () => {
|
||||
expect(processStreamingMarkdown("1. First\n\n2. S")).toBe(
|
||||
"1. First\n\n2. S",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// TODO(drifkin):slop tests ahead, some are decent, but need to manually go
|
||||
// through them as I implement
|
||||
/*
|
||||
describe("StreamingMarkdownContent - processStreamingMarkdown", () => {
|
||||
describe("Ambiguous endings removal", () => {
|
||||
it("should remove list markers at the end", () => {
|
||||
expect(processStreamingMarkdown("Some text\n* ")).toBe("Some text");
|
||||
expect(processStreamingMarkdown("Some text\n*")).toBe("Some text");
|
||||
expect(processStreamingMarkdown("* Item 1\n- ")).toBe("* Item 1");
|
||||
expect(processStreamingMarkdown("* Item 1\n-")).toBe("* Item 1");
|
||||
expect(processStreamingMarkdown("Text\n+ ")).toBe("Text");
|
||||
expect(processStreamingMarkdown("Text\n+")).toBe("Text");
|
||||
expect(processStreamingMarkdown("1. First\n2. ")).toBe("1. First");
|
||||
});
|
||||
|
||||
it("should remove heading markers at the end", () => {
|
||||
expect(processStreamingMarkdown("Some text\n# ")).toBe("Some text");
|
||||
expect(processStreamingMarkdown("Some text\n#")).toBe("Some text\n#"); // # without space is not removed
|
||||
expect(processStreamingMarkdown("# Title\n## ")).toBe("# Title");
|
||||
expect(processStreamingMarkdown("# Title\n##")).toBe("# Title\n##"); // ## without space is not removed
|
||||
});
|
||||
|
||||
it("should remove ambiguous bold markers at the end", () => {
|
||||
expect(processStreamingMarkdown("Text **")).toBe("Text ");
|
||||
expect(processStreamingMarkdown("Some text\n**")).toBe("Some text");
|
||||
});
|
||||
|
||||
it("should remove code block markers at the end", () => {
|
||||
expect(processStreamingMarkdown("Text\n```")).toBe("Text");
|
||||
expect(processStreamingMarkdown("```")).toBe("");
|
||||
});
|
||||
|
||||
it("should remove single backtick at the end", () => {
|
||||
expect(processStreamingMarkdown("Text `")).toBe("Text ");
|
||||
expect(processStreamingMarkdown("`")).toBe("");
|
||||
});
|
||||
|
||||
it("should remove single asterisk at the end", () => {
|
||||
expect(processStreamingMarkdown("Text *")).toBe("Text ");
|
||||
expect(processStreamingMarkdown("*")).toBe("");
|
||||
});
|
||||
|
||||
it("should handle empty content", () => {
|
||||
expect(processStreamingMarkdown("")).toBe("");
|
||||
});
|
||||
|
||||
it("should handle single line removals correctly", () => {
|
||||
expect(processStreamingMarkdown("* ")).toBe("");
|
||||
expect(processStreamingMarkdown("# ")).toBe("");
|
||||
expect(processStreamingMarkdown("**")).toBe("");
|
||||
expect(processStreamingMarkdown("`")).toBe("");
|
||||
});
|
||||
|
||||
it("shouldn't have this regexp capture group bug", () => {
|
||||
expect(
|
||||
processStreamingMarkdown("Here's a shopping list:\n*"),
|
||||
).not.toContain("0*");
|
||||
expect(processStreamingMarkdown("Here's a shopping list:\n*")).toBe(
|
||||
"Here's a shopping list:",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("List markers", () => {
|
||||
it("should preserve complete list items", () => {
|
||||
expect(processStreamingMarkdown("* Complete item")).toBe(
|
||||
"* Complete item",
|
||||
);
|
||||
expect(processStreamingMarkdown("- Another item")).toBe("- Another item");
|
||||
expect(processStreamingMarkdown("+ Plus item")).toBe("+ Plus item");
|
||||
expect(processStreamingMarkdown("1. Numbered item")).toBe(
|
||||
"1. Numbered item",
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle indented list markers", () => {
|
||||
expect(processStreamingMarkdown(" * ")).toBe(" ");
|
||||
expect(processStreamingMarkdown(" - ")).toBe(" ");
|
||||
expect(processStreamingMarkdown("\t+ ")).toBe("\t");
|
||||
});
|
||||
});
|
||||
|
||||
describe("Heading markers", () => {
|
||||
it("should preserve complete headings", () => {
|
||||
expect(processStreamingMarkdown("# Complete Heading")).toBe(
|
||||
"# Complete Heading",
|
||||
);
|
||||
expect(processStreamingMarkdown("## Subheading")).toBe("## Subheading");
|
||||
expect(processStreamingMarkdown("### H3 Title")).toBe("### H3 Title");
|
||||
});
|
||||
|
||||
it("should not affect # in other contexts", () => {
|
||||
expect(processStreamingMarkdown("C# programming")).toBe("C# programming");
|
||||
expect(processStreamingMarkdown("Issue #123")).toBe("Issue #123");
|
||||
});
|
||||
});
|
||||
|
||||
describe("Bold text", () => {
|
||||
it("should close incomplete bold text", () => {
|
||||
expect(processStreamingMarkdown("This is **bold text")).toBe(
|
||||
"This is **bold text**",
|
||||
);
|
||||
expect(processStreamingMarkdown("Start **bold and more")).toBe(
|
||||
"Start **bold and more**",
|
||||
);
|
||||
expect(processStreamingMarkdown("**just bold")).toBe("**just bold**");
|
||||
});
|
||||
|
||||
it("should not affect complete bold text", () => {
|
||||
expect(processStreamingMarkdown("**complete bold**")).toBe(
|
||||
"**complete bold**",
|
||||
);
|
||||
expect(processStreamingMarkdown("Text **bold** more")).toBe(
|
||||
"Text **bold** more",
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle nested bold correctly", () => {
|
||||
expect(processStreamingMarkdown("**bold** and **another")).toBe(
|
||||
"**bold** and **another**",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Italic text", () => {
|
||||
it("should close incomplete italic text", () => {
|
||||
expect(processStreamingMarkdown("This is *italic text")).toBe(
|
||||
"This is *italic text*",
|
||||
);
|
||||
expect(processStreamingMarkdown("Start *italic and more")).toBe(
|
||||
"Start *italic and more*",
|
||||
);
|
||||
});
|
||||
|
||||
it("should differentiate between list markers and italic", () => {
|
||||
expect(processStreamingMarkdown("* Item\n* ")).toBe("* Item");
|
||||
expect(processStreamingMarkdown("Some *italic text")).toBe(
|
||||
"Some *italic text*",
|
||||
);
|
||||
expect(processStreamingMarkdown("*just italic")).toBe("*just italic*");
|
||||
});
|
||||
|
||||
it("should not affect complete italic text", () => {
|
||||
expect(processStreamingMarkdown("*complete italic*")).toBe(
|
||||
"*complete italic*",
|
||||
);
|
||||
expect(processStreamingMarkdown("Text *italic* more")).toBe(
|
||||
"Text *italic* more",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Code blocks", () => {
|
||||
it("should close incomplete code blocks", () => {
|
||||
expect(processStreamingMarkdown("```javascript\nconst x = 42;")).toBe(
|
||||
"```javascript\nconst x = 42;\n```",
|
||||
);
|
||||
expect(processStreamingMarkdown("```\ncode here")).toBe(
|
||||
"```\ncode here\n```",
|
||||
);
|
||||
});
|
||||
|
||||
it("should not affect complete code blocks", () => {
|
||||
expect(processStreamingMarkdown("```\ncode\n```")).toBe("```\ncode\n```");
|
||||
expect(processStreamingMarkdown("```js\nconst x = 1;\n```")).toBe(
|
||||
"```js\nconst x = 1;\n```",
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle nested code blocks correctly", () => {
|
||||
expect(processStreamingMarkdown("```\ncode\n```\n```python")).toBe(
|
||||
"```\ncode\n```\n```python\n```",
|
||||
);
|
||||
});
|
||||
|
||||
it("should not process markdown inside code blocks", () => {
|
||||
expect(processStreamingMarkdown("```\n* not a list\n**not bold**")).toBe(
|
||||
"```\n* not a list\n**not bold**\n```",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Inline code", () => {
|
||||
it("should close incomplete inline code", () => {
|
||||
expect(processStreamingMarkdown("This is `inline code")).toBe(
|
||||
"This is `inline code`",
|
||||
);
|
||||
expect(processStreamingMarkdown("Use `console.log")).toBe(
|
||||
"Use `console.log`",
|
||||
);
|
||||
});
|
||||
|
||||
it("should not affect complete inline code", () => {
|
||||
expect(processStreamingMarkdown("`complete code`")).toBe(
|
||||
"`complete code`",
|
||||
);
|
||||
expect(processStreamingMarkdown("Use `code` here")).toBe(
|
||||
"Use `code` here",
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle multiple inline codes correctly", () => {
|
||||
expect(processStreamingMarkdown("`code` and `more")).toBe(
|
||||
"`code` and `more`",
|
||||
);
|
||||
});
|
||||
|
||||
it("should not confuse inline code with code blocks", () => {
|
||||
expect(processStreamingMarkdown("```\nblock\n```\n`inline")).toBe(
|
||||
"```\nblock\n```\n`inline`",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Complex streaming scenarios", () => {
|
||||
it("should handle progressive streaming of a heading", () => {
|
||||
const steps = [
|
||||
{ input: "#", expected: "#" }, // # alone is not removed (needs space)
|
||||
{ input: "# ", expected: "" },
|
||||
{ input: "# H", expected: "# H" },
|
||||
{ input: "# Hello", expected: "# Hello" },
|
||||
];
|
||||
steps.forEach(({ input, expected }) => {
|
||||
expect(processStreamingMarkdown(input)).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle progressive streaming of bold text", () => {
|
||||
const steps = [
|
||||
{ input: "*", expected: "" },
|
||||
{ input: "**", expected: "" },
|
||||
{ input: "**b", expected: "**b**" },
|
||||
{ input: "**bold", expected: "**bold**" },
|
||||
{ input: "**bold**", expected: "**bold**" },
|
||||
];
|
||||
steps.forEach(({ input, expected }) => {
|
||||
expect(processStreamingMarkdown(input)).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle multiline content with various patterns", () => {
|
||||
const multiline = `# Title
|
||||
|
||||
This is a paragraph with **bold text** and *italic text*.
|
||||
|
||||
* Item 1
|
||||
* Item 2
|
||||
* `;
|
||||
|
||||
const expected = `# Title
|
||||
|
||||
This is a paragraph with **bold text** and *italic text*.
|
||||
|
||||
* Item 1
|
||||
* Item 2`;
|
||||
|
||||
expect(processStreamingMarkdown(multiline)).toBe(expected);
|
||||
});
|
||||
|
||||
it("should only fix the last line", () => {
|
||||
expect(processStreamingMarkdown("# Complete\n# Another\n# ")).toBe(
|
||||
"# Complete\n# Another",
|
||||
);
|
||||
expect(processStreamingMarkdown("* Item 1\n* Item 2\n* ")).toBe(
|
||||
"* Item 1\n* Item 2",
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle mixed content correctly", () => {
|
||||
const input = `# Header
|
||||
|
||||
This has **bold** text and *italic* text.
|
||||
|
||||
\`\`\`js
|
||||
const x = 42;
|
||||
\`\`\`
|
||||
|
||||
Now some \`inline code\` and **unclosed bold`;
|
||||
|
||||
const expected = `# Header
|
||||
|
||||
This has **bold** text and *italic* text.
|
||||
|
||||
\`\`\`js
|
||||
const x = 42;
|
||||
\`\`\`
|
||||
|
||||
Now some \`inline code\` and **unclosed bold**`;
|
||||
|
||||
expect(processStreamingMarkdown(input)).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Edge cases with escaping", () => {
|
||||
it("should handle escaped asterisks (future enhancement)", () => {
|
||||
// Note: Current implementation doesn't handle escaping
|
||||
// This is a known limitation - escaped characters still trigger closing
|
||||
expect(processStreamingMarkdown("Text \\*not italic")).toBe(
|
||||
"Text \\*not italic*",
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle escaped backticks (future enhancement)", () => {
|
||||
// Note: Current implementation doesn't handle escaping
|
||||
// This is a known limitation - escaped characters still trigger closing
|
||||
expect(processStreamingMarkdown("Text \\`not code")).toBe(
|
||||
"Text \\`not code`",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Code block edge cases", () => {
|
||||
it("should handle triple backticks in the middle of lines", () => {
|
||||
expect(processStreamingMarkdown("Text ``` in middle")).toBe(
|
||||
"Text ``` in middle\n```",
|
||||
);
|
||||
expect(processStreamingMarkdown("```\nText ``` in code\nmore")).toBe(
|
||||
"```\nText ``` in code\nmore\n```",
|
||||
);
|
||||
});
|
||||
|
||||
it("should properly close code blocks with language specifiers", () => {
|
||||
expect(processStreamingMarkdown("```typescript")).toBe(
|
||||
"```typescript\n```",
|
||||
);
|
||||
expect(processStreamingMarkdown("```typescript\nconst x = 1")).toBe(
|
||||
"```typescript\nconst x = 1\n```",
|
||||
);
|
||||
});
|
||||
|
||||
it("should remove a completely empty partial code block", () => {
|
||||
expect(processStreamingMarkdown("```\n")).toBe("");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
*/
|
||||
@@ -1,66 +1,123 @@
|
||||
import React from "react";
|
||||
import Markdown from "react-markdown";
|
||||
import remarkGfm from "remark-gfm";
|
||||
import remarkMath from "remark-math";
|
||||
import rehypeRaw from "rehype-raw";
|
||||
import rehypeSanitize, { defaultSchema } from "rehype-sanitize";
|
||||
import rehypePrismPlus from "rehype-prism-plus";
|
||||
import rehypeKatex from "rehype-katex";
|
||||
import remarkStreamingMarkdown, {
|
||||
type LastNodeInfo,
|
||||
} from "@/utils/remarkStreamingMarkdown";
|
||||
import type { PluggableList } from "unified";
|
||||
import { Streamdown, defaultRemarkPlugins } from "streamdown";
|
||||
import remarkCitationParser from "@/utils/remarkCitationParser";
|
||||
import CopyButton from "./CopyButton";
|
||||
import type { BundledLanguage } from "shiki";
|
||||
import { highlighter } from "@/lib/highlighter";
|
||||
|
||||
interface StreamingMarkdownContentProps {
|
||||
content: string;
|
||||
isStreaming?: boolean;
|
||||
size?: "sm" | "md" | "lg";
|
||||
onLastNode?: (info: LastNodeInfo) => void;
|
||||
browserToolResult?: any; // TODO: proper type
|
||||
}
|
||||
|
||||
// Helper to extract text from React nodes
|
||||
const extractText = (node: React.ReactNode): string => {
|
||||
if (typeof node === "string") return node;
|
||||
if (typeof node === "number") return String(node);
|
||||
if (!node) return "";
|
||||
if (React.isValidElement(node)) {
|
||||
const props = node.props as any;
|
||||
if (props?.children) {
|
||||
return extractText(props.children as React.ReactNode);
|
||||
}
|
||||
}
|
||||
if (Array.isArray(node)) {
|
||||
return node.map(extractText).join("");
|
||||
}
|
||||
return "";
|
||||
};
|
||||
|
||||
const CodeBlock = React.memo(
|
||||
({ children, className, ...props }: React.HTMLAttributes<HTMLPreElement>) => {
|
||||
const extractText = React.useCallback((node: React.ReactNode): string => {
|
||||
if (typeof node === "string") return node;
|
||||
if (typeof node === "number") return String(node);
|
||||
if (!node) return "";
|
||||
({ children }: React.HTMLAttributes<HTMLPreElement>) => {
|
||||
// Extract code and language from children
|
||||
const codeElement = children as React.ReactElement<{
|
||||
className?: string;
|
||||
children: React.ReactNode;
|
||||
}>;
|
||||
const language =
|
||||
codeElement.props.className?.replace(/language-/, "") || "";
|
||||
const codeText = extractText(codeElement.props.children);
|
||||
|
||||
if (React.isValidElement(node)) {
|
||||
if (
|
||||
node.props &&
|
||||
typeof node.props === "object" &&
|
||||
"children" in node.props
|
||||
) {
|
||||
return extractText(node.props.children as React.ReactNode);
|
||||
}
|
||||
// Synchronously highlight code using the pre-loaded highlighter
|
||||
const tokens = React.useMemo(() => {
|
||||
if (!highlighter) return null;
|
||||
|
||||
try {
|
||||
return {
|
||||
light: highlighter.codeToTokensBase(codeText, {
|
||||
lang: language as BundledLanguage,
|
||||
theme: "one-light" as any,
|
||||
}),
|
||||
dark: highlighter.codeToTokensBase(codeText, {
|
||||
lang: language as BundledLanguage,
|
||||
theme: "one-dark" as any,
|
||||
}),
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Failed to highlight code:", error);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (Array.isArray(node)) {
|
||||
return node.map(extractText).join("");
|
||||
}
|
||||
|
||||
return "";
|
||||
}, []);
|
||||
|
||||
const language = className?.replace(/language-/, "") || "";
|
||||
}, [codeText, language]);
|
||||
|
||||
return (
|
||||
<div className="relative bg-neutral-100 dark:bg-neutral-800 rounded-2xl overflow-hidden my-6">
|
||||
<div className="flex justify-between select-none">
|
||||
<div className="text-[13px] text-neutral-500 dark:text-neutral-400 font-mono px-4 py-2">
|
||||
{language}
|
||||
</div>
|
||||
<div className="flex select-none">
|
||||
{language && (
|
||||
<div className="text-[13px] text-neutral-500 dark:text-neutral-400 font-mono px-4 py-2">
|
||||
{language}
|
||||
</div>
|
||||
)}
|
||||
<CopyButton
|
||||
content={extractText(children)}
|
||||
content={codeText}
|
||||
showLabels={true}
|
||||
className="copy-button text-neutral-500 dark:text-neutral-400 bg-neutral-100 dark:bg-neutral-800"
|
||||
className="copy-button text-neutral-500 dark:text-neutral-400 bg-neutral-100 dark:bg-neutral-800 ml-auto"
|
||||
/>
|
||||
</div>
|
||||
<pre className={className} {...props}>
|
||||
{children}
|
||||
{/* Light mode */}
|
||||
<pre className="dark:hidden m-0 bg-neutral-100 text-sm overflow-x-auto p-4">
|
||||
<code className="font-mono text-sm">
|
||||
{tokens?.light
|
||||
? tokens.light.map((line: any, i: number) => (
|
||||
<React.Fragment key={i}>
|
||||
{line.map((token: any, j: number) => (
|
||||
<span
|
||||
key={j}
|
||||
style={{
|
||||
color: token.color,
|
||||
}}
|
||||
>
|
||||
{token.content}
|
||||
</span>
|
||||
))}
|
||||
{i < tokens.light.length - 1 && "\n"}
|
||||
</React.Fragment>
|
||||
))
|
||||
: codeText}
|
||||
</code>
|
||||
</pre>
|
||||
{/* Dark mode */}
|
||||
<pre className="hidden dark:block m-0 bg-neutral-800 text-sm overflow-x-auto p-4">
|
||||
<code className="font-mono text-sm">
|
||||
{tokens?.dark
|
||||
? tokens.dark.map((line: any, i: number) => (
|
||||
<React.Fragment key={i}>
|
||||
{line.map((token: any, j: number) => (
|
||||
<span
|
||||
key={j}
|
||||
style={{
|
||||
color: token.color,
|
||||
}}
|
||||
>
|
||||
{token.content}
|
||||
</span>
|
||||
))}
|
||||
{i < tokens.dark.length - 1 && "\n"}
|
||||
</React.Fragment>
|
||||
))
|
||||
: codeText}
|
||||
</code>
|
||||
</pre>
|
||||
</div>
|
||||
);
|
||||
@@ -68,65 +125,19 @@ const CodeBlock = React.memo(
|
||||
);
|
||||
|
||||
const StreamingMarkdownContent: React.FC<StreamingMarkdownContentProps> =
|
||||
React.memo(
|
||||
({ content, isStreaming = false, size, onLastNode, browserToolResult }) => {
|
||||
// Build the remark plugins array
|
||||
const remarkPlugins = React.useMemo(() => {
|
||||
const plugins: PluggableList = [
|
||||
remarkGfm,
|
||||
[remarkMath, { singleDollarTextMath: false }],
|
||||
remarkCitationParser,
|
||||
];
|
||||
React.memo(({ content, isStreaming = false, size, browserToolResult }) => {
|
||||
// Build the remark plugins array - keep default GFM and Math, add citations
|
||||
const remarkPlugins = React.useMemo(() => {
|
||||
return [
|
||||
defaultRemarkPlugins.gfm,
|
||||
defaultRemarkPlugins.math,
|
||||
remarkCitationParser,
|
||||
];
|
||||
}, []);
|
||||
|
||||
// Add streaming plugin when in streaming mode
|
||||
if (isStreaming) {
|
||||
plugins.push([remarkStreamingMarkdown, { debug: true, onLastNode }]);
|
||||
}
|
||||
|
||||
return plugins;
|
||||
}, [isStreaming, onLastNode]);
|
||||
|
||||
// Create a custom sanitization schema that allows math elements
|
||||
const sanitizeSchema = React.useMemo(() => {
|
||||
return {
|
||||
...defaultSchema,
|
||||
attributes: {
|
||||
...defaultSchema.attributes,
|
||||
span: [
|
||||
...(defaultSchema.attributes?.span || []),
|
||||
["className", /^katex/],
|
||||
],
|
||||
div: [
|
||||
...(defaultSchema.attributes?.div || []),
|
||||
["className", /^katex/],
|
||||
],
|
||||
"ol-citation": ["cursor", "start", "end"],
|
||||
},
|
||||
tagNames: [
|
||||
...(defaultSchema.tagNames || []),
|
||||
"math",
|
||||
"mrow",
|
||||
"mi",
|
||||
"mo",
|
||||
"mn",
|
||||
"msup",
|
||||
"msub",
|
||||
"mfrac",
|
||||
"mover",
|
||||
"munder",
|
||||
"msqrt",
|
||||
"mroot",
|
||||
"merror",
|
||||
"mspace",
|
||||
"mpadded",
|
||||
"ol-citation",
|
||||
],
|
||||
};
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`
|
||||
return (
|
||||
<div
|
||||
className={`
|
||||
max-w-full
|
||||
${size === "sm" ? "prose-sm" : size === "lg" ? "prose-lg" : ""}
|
||||
prose
|
||||
@@ -144,7 +155,27 @@ const StreamingMarkdownContent: React.FC<StreamingMarkdownContentProps> =
|
||||
prose-pre:my-0
|
||||
prose-pre:max-w-full
|
||||
prose-pre:pt-1
|
||||
[&_code:not(pre_code)]:text-neutral-700
|
||||
[&_table]:border-collapse
|
||||
[&_table]:w-full
|
||||
[&_table]:border
|
||||
[&_table]:border-neutral-200
|
||||
[&_table]:rounded-lg
|
||||
[&_table]:overflow-hidden
|
||||
[&_th]:px-3
|
||||
[&_th]:py-2
|
||||
[&_th]:text-left
|
||||
[&_th]:font-semibold
|
||||
[&_th]:border-b
|
||||
[&_th]:border-r
|
||||
[&_th]:border-neutral-200
|
||||
[&_th:last-child]:border-r-0
|
||||
[&_td]:px-3
|
||||
[&_td]:py-2
|
||||
[&_td]:border-r
|
||||
[&_td]:border-neutral-200
|
||||
[&_td:last-child]:border-r-0
|
||||
[&_tbody_tr:not(:last-child)_td]:border-b
|
||||
[&_code:not(pre_code)]:text-neutral-700
|
||||
[&_code:not(pre_code)]:bg-neutral-100
|
||||
[&_code:not(pre_code)]:font-normal
|
||||
[&_code:not(pre_code)]:px-1.5
|
||||
@@ -160,6 +191,10 @@ const StreamingMarkdownContent: React.FC<StreamingMarkdownContentProps> =
|
||||
dark:prose-strong:text-neutral-200
|
||||
dark:prose-pre:text-neutral-200
|
||||
dark:prose:pre:text-neutral-200
|
||||
dark:[&_table]:border-neutral-700
|
||||
dark:[&_thead]:bg-neutral-800
|
||||
dark:[&_th]:border-neutral-700
|
||||
dark:[&_td]:border-neutral-700
|
||||
dark:[&_code:not(pre_code)]:text-neutral-200
|
||||
dark:[&_code:not(pre_code)]:bg-neutral-800
|
||||
dark:[&_code:not(pre_code)]:font-normal
|
||||
@@ -167,104 +202,86 @@ const StreamingMarkdownContent: React.FC<StreamingMarkdownContentProps> =
|
||||
dark:prose-li:marker:text-neutral-300
|
||||
break-words
|
||||
`}
|
||||
>
|
||||
<StreamingMarkdownErrorBoundary
|
||||
content={content}
|
||||
isStreaming={isStreaming}
|
||||
>
|
||||
<StreamingMarkdownErrorBoundary
|
||||
content={content}
|
||||
isStreaming={isStreaming}
|
||||
>
|
||||
<Markdown
|
||||
remarkPlugins={remarkPlugins}
|
||||
rehypePlugins={
|
||||
[
|
||||
[rehypeRaw, { allowDangerousHtml: true }],
|
||||
[rehypeSanitize, sanitizeSchema],
|
||||
[rehypePrismPlus, { ignoreMissing: true }],
|
||||
[
|
||||
rehypeKatex,
|
||||
{
|
||||
errorColor: "#000000", // Black instead of red for errors
|
||||
strict: false, // Be more lenient with parsing
|
||||
throwOnError: false,
|
||||
},
|
||||
],
|
||||
] as PluggableList
|
||||
}
|
||||
components={{
|
||||
pre: CodeBlock,
|
||||
table: ({
|
||||
children,
|
||||
...props
|
||||
}: React.HTMLAttributes<HTMLTableElement>) => (
|
||||
<div className="overflow-x-auto max-w-full">
|
||||
<table {...props}>{children}</table>
|
||||
</div>
|
||||
),
|
||||
// @ts-expect-error: custom type
|
||||
"ol-citation": ({
|
||||
cursor,
|
||||
// start,
|
||||
// end,
|
||||
}: {
|
||||
cursor: number;
|
||||
start: number;
|
||||
end: number;
|
||||
}) => {
|
||||
// Check if we have a page_stack and if the cursor is valid
|
||||
const pageStack = browserToolResult?.page_stack;
|
||||
const hasValidPage = pageStack && cursor < pageStack.length;
|
||||
const pageUrl = hasValidPage ? pageStack[cursor] : null;
|
||||
<Streamdown
|
||||
parseIncompleteMarkdown={isStreaming}
|
||||
isAnimating={isStreaming}
|
||||
remarkPlugins={remarkPlugins}
|
||||
controls={false}
|
||||
components={{
|
||||
pre: CodeBlock,
|
||||
table: ({
|
||||
children,
|
||||
...props
|
||||
}: React.HTMLAttributes<HTMLTableElement>) => (
|
||||
<div className="overflow-x-auto max-w-full">
|
||||
<table
|
||||
{...props}
|
||||
className="border-collapse w-full border border-neutral-200 dark:border-neutral-700 rounded-lg overflow-hidden"
|
||||
>
|
||||
{children}
|
||||
</table>
|
||||
</div>
|
||||
),
|
||||
// @ts-expect-error: custom citation type
|
||||
"ol-citation": ({
|
||||
cursor,
|
||||
}: {
|
||||
cursor: number;
|
||||
start: number;
|
||||
end: number;
|
||||
}) => {
|
||||
const pageStack = browserToolResult?.page_stack;
|
||||
const hasValidPage = pageStack && cursor < pageStack.length;
|
||||
const pageUrl = hasValidPage ? pageStack[cursor] : null;
|
||||
|
||||
// Extract a readable title from the URL if possible
|
||||
const getPageTitle = (url: string) => {
|
||||
if (url.startsWith("search_results_")) {
|
||||
const searchTerm = url.substring(
|
||||
"search_results_".length,
|
||||
);
|
||||
return `Search: ${searchTerm}`;
|
||||
}
|
||||
// For regular URLs, try to extract domain or use full URL
|
||||
try {
|
||||
const urlObj = new URL(url);
|
||||
return urlObj.hostname;
|
||||
} catch {
|
||||
// If not a valid URL, return as is
|
||||
return url;
|
||||
}
|
||||
};
|
||||
|
||||
const citationElement = (
|
||||
<span className="text-xs text-neutral-500 dark:text-neutral-400 bg-neutral-100 dark:bg-neutral-800 rounded-full px-2 py-1 ml-1">
|
||||
[{cursor}]
|
||||
</span>
|
||||
);
|
||||
|
||||
// If we have a valid page URL, wrap in a link
|
||||
if (pageUrl && pageUrl.startsWith("http")) {
|
||||
return (
|
||||
<a
|
||||
href={pageUrl}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="inline-flex items-center hover:opacity-80 transition-opacity no-underline"
|
||||
title={getPageTitle(pageUrl)}
|
||||
>
|
||||
{citationElement}
|
||||
</a>
|
||||
);
|
||||
const getPageTitle = (url: string) => {
|
||||
if (url.startsWith("search_results_")) {
|
||||
const searchTerm = url.substring("search_results_".length);
|
||||
return `Search: ${searchTerm}`;
|
||||
}
|
||||
try {
|
||||
const urlObj = new URL(url);
|
||||
return urlObj.hostname;
|
||||
} catch {
|
||||
return url;
|
||||
}
|
||||
};
|
||||
|
||||
// Otherwise, just return the citation without a link
|
||||
return citationElement;
|
||||
},
|
||||
}}
|
||||
>
|
||||
{content}
|
||||
</Markdown>
|
||||
</StreamingMarkdownErrorBoundary>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
);
|
||||
const citationElement = (
|
||||
<span className="text-xs text-neutral-500 dark:text-neutral-400 bg-neutral-100 dark:bg-neutral-800 rounded-full px-2 py-1 ml-1">
|
||||
[{cursor}]
|
||||
</span>
|
||||
);
|
||||
|
||||
if (pageUrl && pageUrl.startsWith("http")) {
|
||||
return (
|
||||
<a
|
||||
href={pageUrl}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="inline-flex items-center hover:opacity-80 transition-opacity no-underline"
|
||||
title={getPageTitle(pageUrl)}
|
||||
>
|
||||
{citationElement}
|
||||
</a>
|
||||
);
|
||||
}
|
||||
|
||||
return citationElement;
|
||||
},
|
||||
}}
|
||||
>
|
||||
{content}
|
||||
</Streamdown>
|
||||
</StreamingMarkdownErrorBoundary>
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
interface StreamingMarkdownErrorBoundaryProps {
|
||||
content: string;
|
||||
|
||||
@@ -50,21 +50,33 @@ export default function Thinking({
|
||||
// Position content to show bottom when collapsed
|
||||
useEffect(() => {
|
||||
if (isCollapsed && contentRef.current && wrapperRef.current) {
|
||||
const contentHeight = contentRef.current.scrollHeight;
|
||||
const wrapperHeight = wrapperRef.current.clientHeight;
|
||||
if (contentHeight > wrapperHeight) {
|
||||
const translateY = -(contentHeight - wrapperHeight);
|
||||
contentRef.current.style.transform = `translateY(${translateY}px)`;
|
||||
setHasOverflow(true);
|
||||
} else {
|
||||
setHasOverflow(false);
|
||||
}
|
||||
requestAnimationFrame(() => {
|
||||
if (!contentRef.current || !wrapperRef.current) return;
|
||||
|
||||
const contentHeight = contentRef.current.scrollHeight;
|
||||
const wrapperHeight = wrapperRef.current.clientHeight;
|
||||
if (contentHeight > wrapperHeight) {
|
||||
const translateY = -(contentHeight - wrapperHeight);
|
||||
contentRef.current.style.transform = `translateY(${translateY}px)`;
|
||||
setHasOverflow(true);
|
||||
} else {
|
||||
contentRef.current.style.transform = "translateY(0)";
|
||||
setHasOverflow(false);
|
||||
}
|
||||
});
|
||||
} else if (contentRef.current) {
|
||||
contentRef.current.style.transform = "translateY(0)";
|
||||
setHasOverflow(false);
|
||||
}
|
||||
}, [thinking, isCollapsed]);
|
||||
|
||||
useEffect(() => {
|
||||
if (activelyThinking && wrapperRef.current && !isCollapsed) {
|
||||
// When expanded and actively thinking, scroll to bottom
|
||||
wrapperRef.current.scrollTop = wrapperRef.current.scrollHeight;
|
||||
}
|
||||
}, [thinking, activelyThinking, isCollapsed]);
|
||||
|
||||
const handleToggle = () => {
|
||||
setIsCollapsed(!isCollapsed);
|
||||
setHasUserInteracted(true);
|
||||
@@ -73,8 +85,9 @@ export default function Thinking({
|
||||
// Calculate max height for smooth animations
|
||||
const getMaxHeight = () => {
|
||||
if (isCollapsed) {
|
||||
return finishedThinking ? "0px" : "12rem"; // 8rem = 128px (same as max-h-32)
|
||||
return finishedThinking ? "0px" : "12rem";
|
||||
}
|
||||
// When expanded, use the content height or grow naturally
|
||||
return contentHeight ? `${contentHeight}px` : "none";
|
||||
};
|
||||
|
||||
@@ -131,10 +144,11 @@ export default function Thinking({
|
||||
</div>
|
||||
<div
|
||||
ref={wrapperRef}
|
||||
className={`text-xs text-neutral-500 dark:text-neutral-500 rounded-md overflow-hidden
|
||||
transition-[max-height,opacity] duration-300 ease-in-out relative ml-6 mt-2`}
|
||||
className={`text-xs text-neutral-500 dark:text-neutral-500 rounded-md
|
||||
transition-[max-height,opacity] duration-300 ease-in-out relative ml-6 mt-2
|
||||
${isCollapsed ? "overflow-hidden" : "overflow-y-auto"}`}
|
||||
style={{
|
||||
maxHeight: getMaxHeight(),
|
||||
maxHeight: isCollapsed ? getMaxHeight() : undefined,
|
||||
opacity: isCollapsed && finishedThinking ? 0 : 1,
|
||||
}}
|
||||
>
|
||||
|
||||
@@ -65,7 +65,7 @@ export const BadgeButton = forwardRef(function BadgeButton(
|
||||
),
|
||||
ref: React.ForwardedRef<HTMLElement>,
|
||||
) {
|
||||
let classes = clsx(
|
||||
const classes = clsx(
|
||||
className,
|
||||
"group relative inline-flex rounded-md focus:not-data-focus:outline-hidden data-focus:outline-2 data-focus:outline-offset-2 data-focus:outline-blue-500",
|
||||
);
|
||||
|
||||
@@ -171,7 +171,7 @@ export const Button = forwardRef(function Button(
|
||||
{ color, outline, plain, className, children, ...props }: ButtonProps,
|
||||
ref: React.ForwardedRef<HTMLElement>,
|
||||
) {
|
||||
let classes = clsx(
|
||||
const classes = clsx(
|
||||
className,
|
||||
styles.base,
|
||||
outline
|
||||
|
||||
@@ -6,10 +6,11 @@ export interface SliderProps {
|
||||
value?: number;
|
||||
onChange?: (value: number) => void;
|
||||
className?: string;
|
||||
disabled?: boolean;
|
||||
}
|
||||
|
||||
const Slider = React.forwardRef<HTMLDivElement, SliderProps>(
|
||||
({ label, options, value = 0, onChange }, ref) => {
|
||||
({ label, options, value = 0, onChange, disabled = false }, ref) => {
|
||||
const [selectedValue, setSelectedValue] = React.useState(value);
|
||||
const [isDragging, setIsDragging] = React.useState(false);
|
||||
const containerRef = React.useRef<HTMLDivElement>(null);
|
||||
@@ -20,6 +21,7 @@ const Slider = React.forwardRef<HTMLDivElement, SliderProps>(
|
||||
}, [value]);
|
||||
|
||||
const handleClick = (optionValue: number) => {
|
||||
if (disabled) return;
|
||||
setSelectedValue(optionValue);
|
||||
onChange?.(optionValue);
|
||||
};
|
||||
@@ -39,6 +41,7 @@ const Slider = React.forwardRef<HTMLDivElement, SliderProps>(
|
||||
};
|
||||
|
||||
const handleMouseDown = (e: React.MouseEvent) => {
|
||||
if (disabled) return;
|
||||
setIsDragging(true);
|
||||
e.preventDefault();
|
||||
};
|
||||
@@ -77,7 +80,7 @@ const Slider = React.forwardRef<HTMLDivElement, SliderProps>(
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-2" ref={ref}>
|
||||
<div className={`space-y-2 ${disabled ? "opacity-50" : ""}`} ref={ref}>
|
||||
{label && <label className="text-sm font-medium">{label}</label>}
|
||||
<div className="relative">
|
||||
<div className="absolute top-[9px] left-2 right-2 h-1 bg-neutral-200 dark:bg-neutral-700 pointer-events-none rounded-full" />
|
||||
@@ -88,10 +91,11 @@ const Slider = React.forwardRef<HTMLDivElement, SliderProps>(
|
||||
<button
|
||||
onClick={() => handleClick(option.value)}
|
||||
onMouseDown={handleMouseDown}
|
||||
className="relative px-3 py-6 -mx-3 -my-6 z-10 cursor-pointer"
|
||||
disabled={disabled}
|
||||
className={`relative px-3 py-6 -mx-3 -my-6 z-10 ${disabled ? "cursor-not-allowed" : "cursor-pointer"}`}
|
||||
>
|
||||
<div className="relative w-5 h-5 flex items-center justify-center">
|
||||
{selectedValue === option.value && (
|
||||
{selectedValue === option.value && !disabled && (
|
||||
<div className="w-4 h-4 bg-white dark:bg-white border border-neutral-400 dark:border-neutral-500 rounded-full cursor-grab active:cursor-grabbing" />
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,8 @@ import { useSelectedModel } from "./useSelectedModel";
|
||||
import { createQueryBatcher } from "./useQueryBatcher";
|
||||
import { useRefetchModels } from "./useModels";
|
||||
import { useStreamingContext } from "@/contexts/StreamingContext";
|
||||
import { useSettings } from "./useSettings";
|
||||
import { getModelCapabilities } from "@/api";
|
||||
import { useCloudStatus } from "./useCloudStatus";
|
||||
|
||||
export const useChats = () => {
|
||||
return useQuery({
|
||||
@@ -115,11 +116,9 @@ export const useIsModelStale = (modelName: string) => {
|
||||
export const useShouldShowStaleDisplay = (model: Model | null) => {
|
||||
const isStale = useIsModelStale(model?.model || "");
|
||||
const { data: dismissedModels } = useDismissedStaleModels();
|
||||
const {
|
||||
settings: { airplaneMode },
|
||||
} = useSettings();
|
||||
const { cloudDisabled } = useCloudStatus();
|
||||
|
||||
if (model?.isCloud() && !airplaneMode) {
|
||||
if (model?.isCloud() && !cloudDisabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -606,6 +605,24 @@ export const useSendMessage = (chatId: string) => {
|
||||
queryClient.setQueryData(["staleModels"], newStaleMap);
|
||||
|
||||
queryClient.invalidateQueries({ queryKey: ["models"] });
|
||||
|
||||
// Fetch fresh capabilities for the downloaded model
|
||||
getModelCapabilities(selectedModel.model)
|
||||
.then((capabilities) => {
|
||||
queryClient.setQueryData(
|
||||
["modelCapabilities", selectedModel.model],
|
||||
capabilities,
|
||||
);
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error(
|
||||
"Failed to fetch capabilities after download:",
|
||||
error,
|
||||
);
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["modelCapabilities", selectedModel.model],
|
||||
});
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
20
app/ui/app/src/hooks/useCloudStatus.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import { useQuery } from "@tanstack/react-query";
|
||||
import { getCloudStatus, type CloudStatusResponse } from "@/api";
|
||||
|
||||
export function useCloudStatus() {
|
||||
const cloudQuery = useQuery<CloudStatusResponse | null>({
|
||||
queryKey: ["cloudStatus"],
|
||||
queryFn: getCloudStatus,
|
||||
retry: false,
|
||||
staleTime: 60 * 1000,
|
||||
});
|
||||
|
||||
return {
|
||||
cloudStatus: cloudQuery.data,
|
||||
cloudDisabled: cloudQuery.data?.disabled ?? false,
|
||||
isKnown: cloudQuery.data !== null && cloudQuery.data !== undefined,
|
||||
isLoading: cloudQuery.isLoading,
|
||||
isError: cloudQuery.isError,
|
||||
error: cloudQuery.error,
|
||||
};
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
import { useMutation, useQueryClient } from "@tanstack/react-query";
|
||||
import { useState } from "react";
|
||||
import { pullModel } from "@/api";
|
||||
import { useSelectedModel } from "./useSelectedModel";
|
||||
import { useSettings } from "./useSettings";
|
||||
|
||||
interface DownloadProgress {
|
||||
status: string;
|
||||
digest?: string;
|
||||
total?: number;
|
||||
completed?: number;
|
||||
done?: boolean;
|
||||
}
|
||||
|
||||
export function useDownloadModel(chatId?: string) {
|
||||
const queryClient = useQueryClient();
|
||||
const { selectedModel } = useSelectedModel(chatId);
|
||||
const { setSettings } = useSettings();
|
||||
const [downloadProgress, setDownloadProgress] =
|
||||
useState<DownloadProgress | null>(null);
|
||||
const [abortController, setAbortController] =
|
||||
useState<AbortController | null>(null);
|
||||
const [downloadingChatIds, setDownloadingChatIds] = useState<Set<string>>(
|
||||
new Set(),
|
||||
);
|
||||
|
||||
const mutation = useMutation({
|
||||
mutationFn: async (modelName: string) => {
|
||||
const controller = new AbortController();
|
||||
setAbortController(controller);
|
||||
setDownloadProgress({ status: "Starting download..." });
|
||||
if (chatId) {
|
||||
setDownloadingChatIds((prev) => new Set(prev).add(chatId));
|
||||
}
|
||||
|
||||
try {
|
||||
for await (const progress of pullModel(modelName, controller.signal)) {
|
||||
setDownloadProgress(progress);
|
||||
|
||||
if (progress.status === "success") {
|
||||
// Update selected model to indicate it's now available locally
|
||||
if (selectedModel && selectedModel.model === modelName) {
|
||||
setSettings({ SelectedModel: modelName });
|
||||
}
|
||||
// Invalidate models query to refresh the list
|
||||
await queryClient.invalidateQueries({ queryKey: ["models"] });
|
||||
break;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
setAbortController(null);
|
||||
if (chatId) {
|
||||
setDownloadingChatIds((prev) => {
|
||||
const newSet = new Set(prev);
|
||||
newSet.delete(chatId);
|
||||
return newSet;
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
onSuccess: () => {
|
||||
setDownloadProgress(null);
|
||||
if (chatId) {
|
||||
setDownloadingChatIds((prev) => {
|
||||
const newSet = new Set(prev);
|
||||
newSet.delete(chatId);
|
||||
return newSet;
|
||||
});
|
||||
}
|
||||
},
|
||||
onError: (error: Error) => {
|
||||
const status =
|
||||
error.name === "AbortError" ? "Download cancelled" : "Download failed";
|
||||
setDownloadProgress({ status, done: true });
|
||||
|
||||
// Clear error message after delay
|
||||
const delay = error.name === "AbortError" ? 1500 : 3000;
|
||||
setTimeout(() => {
|
||||
setDownloadProgress(null);
|
||||
if (chatId) {
|
||||
setDownloadingChatIds((prev) => {
|
||||
const newSet = new Set(prev);
|
||||
newSet.delete(chatId);
|
||||
return newSet;
|
||||
});
|
||||
}
|
||||
}, delay);
|
||||
},
|
||||
});
|
||||
|
||||
const cancelDownload = () => {
|
||||
if (abortController) {
|
||||
abortController.abort();
|
||||
setAbortController(null);
|
||||
if (chatId) {
|
||||
setDownloadingChatIds((prev) => {
|
||||
const newSet = new Set(prev);
|
||||
newSet.delete(chatId);
|
||||
return newSet;
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
downloadModel: mutation.mutate,
|
||||
isDownloading:
|
||||
mutation.isPending && chatId ? downloadingChatIds.has(chatId) : false,
|
||||
downloadProgress:
|
||||
chatId && downloadingChatIds.has(chatId) ? downloadProgress : null,
|
||||
error: mutation.error,
|
||||
cancelDownload,
|
||||
};
|
||||
}
|
||||
@@ -20,3 +20,8 @@ export function useHasVisionCapability(modelName: string | undefined) {
|
||||
const { data: capabilitiesResponse } = useModelCapabilities(modelName);
|
||||
return capabilitiesResponse?.capabilities?.includes("vision") ?? false;
|
||||
}
|
||||
|
||||
export function useHasToolsCapability(modelName: string | undefined) {
|
||||
const { data: capabilitiesResponse } = useModelCapabilities(modelName);
|
||||
return capabilitiesResponse?.capabilities?.includes("tools") ?? false;
|
||||
}
|
||||
|
||||
@@ -2,11 +2,11 @@ import { useQuery } from "@tanstack/react-query";
|
||||
import { Model } from "@/gotypes";
|
||||
import { getModels } from "@/api";
|
||||
import { mergeModels } from "@/utils/mergeModels";
|
||||
import { useSettings } from "./useSettings";
|
||||
import { useMemo } from "react";
|
||||
import { useCloudStatus } from "./useCloudStatus";
|
||||
|
||||
export function useModels(searchQuery = "") {
|
||||
const { settings } = useSettings();
|
||||
const { cloudDisabled } = useCloudStatus();
|
||||
const localQuery = useQuery<Model[], Error>({
|
||||
queryKey: ["models", searchQuery],
|
||||
queryFn: () => getModels(searchQuery),
|
||||
@@ -20,7 +20,7 @@ export function useModels(searchQuery = "") {
|
||||
});
|
||||
|
||||
const allModels = useMemo(() => {
|
||||
const models = mergeModels(localQuery.data || [], settings.airplaneMode);
|
||||
const models = mergeModels(localQuery.data || [], cloudDisabled);
|
||||
|
||||
if (searchQuery && searchQuery.trim()) {
|
||||
const query = searchQuery.toLowerCase().trim();
|
||||
@@ -40,7 +40,7 @@ export function useModels(searchQuery = "") {
|
||||
}
|
||||
|
||||
return models;
|
||||
}, [localQuery.data, searchQuery, settings.airplaneMode]);
|
||||
}, [localQuery.data, searchQuery, cloudDisabled]);
|
||||
|
||||
return {
|
||||
...localQuery,
|
||||
|
||||
@@ -7,6 +7,7 @@ import { Model } from "@/gotypes";
|
||||
import { FEATURED_MODELS } from "@/utils/mergeModels";
|
||||
import { getTotalVRAM } from "@/utils/vram.ts";
|
||||
import { getInferenceCompute } from "@/api";
|
||||
import { useCloudStatus } from "./useCloudStatus";
|
||||
|
||||
export function recommendDefaultModel(totalVRAM: number): string {
|
||||
const vram = Math.max(0, Number(totalVRAM) || 0);
|
||||
@@ -22,16 +23,19 @@ export function recommendDefaultModel(totalVRAM: number): string {
|
||||
export function useSelectedModel(currentChatId?: string, searchQuery?: string) {
|
||||
const { settings, setSettings } = useSettings();
|
||||
const { data: models = [], isLoading } = useModels(searchQuery || "");
|
||||
const { cloudDisabled } = useCloudStatus();
|
||||
const { data: chatData, isLoading: isChatLoading } = useChat(
|
||||
currentChatId && currentChatId !== "new" ? currentChatId : "",
|
||||
);
|
||||
|
||||
const { data: inferenceComputes = [] } = useQuery({
|
||||
queryKey: ["inference-compute"],
|
||||
const { data: inferenceComputeResponse } = useQuery({
|
||||
queryKey: ["inferenceCompute"],
|
||||
queryFn: getInferenceCompute,
|
||||
enabled: !settings.selectedModel, // Only fetch if no model is selected
|
||||
});
|
||||
|
||||
const inferenceComputes = inferenceComputeResponse?.inferenceComputes || [];
|
||||
|
||||
const totalVRAM = useMemo(
|
||||
() => getTotalVRAM(inferenceComputes),
|
||||
[inferenceComputes],
|
||||
@@ -46,12 +50,11 @@ export function useSelectedModel(currentChatId?: string, searchQuery?: string) {
|
||||
const restoredChatRef = useRef<string | null>(null);
|
||||
|
||||
const selectedModel: Model | null = useMemo(() => {
|
||||
// if airplane mode is on and selected model ends with cloud,
|
||||
// switch to recommended default model
|
||||
if (settings.airplaneMode && settings.selectedModel?.endsWith("cloud")) {
|
||||
// If cloud is disabled and selected model ends with cloud, switch to a local default.
|
||||
if (cloudDisabled && settings.selectedModel?.endsWith("cloud")) {
|
||||
return (
|
||||
models.find((m) => m.model === recommendedModel) ||
|
||||
models.find((m) => m.isCloud) ||
|
||||
models.find((m) => !m.isCloud()) ||
|
||||
models.find((m) => m.digest === undefined || m.digest === "") ||
|
||||
models[0] ||
|
||||
null
|
||||
@@ -68,7 +71,7 @@ export function useSelectedModel(currentChatId?: string, searchQuery?: string) {
|
||||
"qwen3-coder:480b",
|
||||
];
|
||||
const shouldMigrate =
|
||||
!settings.airplaneMode &&
|
||||
!cloudDisabled &&
|
||||
settings.turboEnabled &&
|
||||
baseModelsToMigrate.includes(settings.selectedModel);
|
||||
|
||||
@@ -96,13 +99,18 @@ export function useSelectedModel(currentChatId?: string, searchQuery?: string) {
|
||||
})) ||
|
||||
null
|
||||
);
|
||||
}, [models, settings.selectedModel, settings.airplaneMode, recommendedModel]);
|
||||
}, [
|
||||
models,
|
||||
settings.selectedModel,
|
||||
cloudDisabled,
|
||||
recommendedModel,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!selectedModel) return;
|
||||
|
||||
if (
|
||||
settings.airplaneMode &&
|
||||
cloudDisabled &&
|
||||
settings.selectedModel?.endsWith("cloud") &&
|
||||
selectedModel.model !== settings.selectedModel
|
||||
) {
|
||||
@@ -110,13 +118,17 @@ export function useSelectedModel(currentChatId?: string, searchQuery?: string) {
|
||||
}
|
||||
|
||||
if (
|
||||
!settings.airplaneMode &&
|
||||
!cloudDisabled &&
|
||||
settings.turboEnabled &&
|
||||
selectedModel.model !== settings.selectedModel
|
||||
) {
|
||||
setSettings({ SelectedModel: selectedModel.model, TurboEnabled: false });
|
||||
}
|
||||
}, [selectedModel, settings.airplaneMode, settings.selectedModel]);
|
||||
}, [
|
||||
selectedModel,
|
||||
cloudDisabled,
|
||||
settings.selectedModel,
|
||||
]);
|
||||
|
||||
// Set model from chat history when chat data loads
|
||||
useEffect(() => {
|
||||
@@ -169,7 +181,9 @@ export function useSelectedModel(currentChatId?: string, searchQuery?: string) {
|
||||
|
||||
const defaultModel =
|
||||
models.find((m) => m.model === recommendedModel) ||
|
||||
models.find((m) => m.isCloud()) ||
|
||||
(cloudDisabled
|
||||
? models.find((m) => !m.isCloud())
|
||||
: models.find((m) => m.isCloud())) ||
|
||||
models.find((m) => m.digest === undefined || m.digest === "") ||
|
||||
models[0];
|
||||
|
||||
@@ -181,6 +195,7 @@ export function useSelectedModel(currentChatId?: string, searchQuery?: string) {
|
||||
inferenceComputes.length,
|
||||
models.length,
|
||||
settings.selectedModel,
|
||||
cloudDisabled,
|
||||
]);
|
||||
|
||||
// Add the selected model to the models list if it's not already there
|
||||
|
||||
@@ -9,7 +9,7 @@ interface SettingsState {
|
||||
webSearchEnabled: boolean;
|
||||
selectedModel: string;
|
||||
sidebarOpen: boolean;
|
||||
airplaneMode: boolean;
|
||||
lastHomeView: string;
|
||||
thinkEnabled: boolean;
|
||||
thinkLevel: string;
|
||||
}
|
||||
@@ -22,6 +22,7 @@ type SettingsUpdate = Partial<{
|
||||
ThinkLevel: string;
|
||||
SelectedModel: string;
|
||||
SidebarOpen: boolean;
|
||||
LastHomeView: string;
|
||||
}>;
|
||||
|
||||
export function useSettings() {
|
||||
@@ -51,7 +52,7 @@ export function useSettings() {
|
||||
thinkLevel: settingsData?.settings?.ThinkLevel ?? "none",
|
||||
selectedModel: settingsData?.settings?.SelectedModel ?? "",
|
||||
sidebarOpen: settingsData?.settings?.SidebarOpen ?? false,
|
||||
airplaneMode: settingsData?.settings?.AirplaneMode ?? false,
|
||||
lastHomeView: settingsData?.settings?.LastHomeView ?? "launch",
|
||||
}),
|
||||
[settingsData?.settings],
|
||||
);
|
||||
|
||||
@@ -1,29 +1,20 @@
|
||||
import { useQuery, useMutation, useQueryClient } from "@tanstack/react-query";
|
||||
import { useEffect, useState } from "react";
|
||||
import { fetchUser, fetchConnectUrl, disconnectUser } from "@/api";
|
||||
|
||||
export function useUser() {
|
||||
const queryClient = useQueryClient();
|
||||
const [initialDataLoaded, setInitialDataLoaded] = useState(false);
|
||||
|
||||
// Wait for initial data to be loaded
|
||||
useEffect(() => {
|
||||
const initialPromise = window.__initialUserDataPromise;
|
||||
if (initialPromise) {
|
||||
initialPromise.finally(() => {
|
||||
setInitialDataLoaded(true);
|
||||
});
|
||||
} else {
|
||||
setInitialDataLoaded(true);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const userQuery = useQuery({
|
||||
queryKey: ["user"],
|
||||
queryFn: () => fetchUser(),
|
||||
queryFn: async () => {
|
||||
const result = await fetchUser();
|
||||
return result;
|
||||
},
|
||||
staleTime: 5 * 60 * 1000, // Consider data stale after 5 minutes
|
||||
gcTime: 10 * 60 * 1000, // Keep in cache for 10 minutes
|
||||
initialData: null, // Start with null to prevent flashing
|
||||
retry: 10,
|
||||
retryDelay: (attemptIndex) => Math.min(500 * attemptIndex, 2000),
|
||||
refetchOnMount: true, // Always fetch when component mounts
|
||||
});
|
||||
|
||||
// Mutation to refresh user data
|
||||
@@ -49,14 +40,15 @@ export function useUser() {
|
||||
},
|
||||
});
|
||||
|
||||
const isLoading = userQuery.isLoading || userQuery.isFetching;
|
||||
const isAuthenticated = Boolean(userQuery.data?.name);
|
||||
|
||||
return {
|
||||
user: userQuery.data,
|
||||
isLoading:
|
||||
!initialDataLoaded ||
|
||||
(userQuery.isLoading && userQuery.data === undefined), // Show loading until initial data is loaded
|
||||
isLoading,
|
||||
isError: userQuery.isError,
|
||||
error: userQuery.error,
|
||||
isAuthenticated: Boolean(userQuery.data?.name),
|
||||
isAuthenticated,
|
||||
refreshUser: refreshUser.mutate,
|
||||
isRefreshing: refreshUser.isPending,
|
||||
refetchUser: userQuery.refetch,
|
||||
|
||||
@@ -16,793 +16,6 @@
|
||||
--text-color: #ffffff;
|
||||
}
|
||||
}
|
||||
@media (prefers-color-scheme: light) {
|
||||
.prose {
|
||||
/**
|
||||
* One Light theme for prism.js
|
||||
* Based on Atom's One Light theme: https://github.com/atom/atom/tree/master/packages/one-light-syntax
|
||||
*/
|
||||
|
||||
/**
|
||||
* One Light colours (accurate as of commit eb064bf on 19 Feb 2021)
|
||||
* From colors.less
|
||||
* --mono-1: hsl(230, 8%, 24%);
|
||||
* --mono-2: hsl(230, 6%, 44%);
|
||||
* --mono-3: hsl(230, 4%, 64%)
|
||||
* --hue-1: hsl(198, 99%, 37%);
|
||||
* --hue-2: hsl(221, 87%, 60%);
|
||||
* --hue-3: hsl(301, 63%, 40%);
|
||||
* --hue-4: hsl(119, 34%, 47%);
|
||||
* --hue-5: hsl(5, 74%, 59%);
|
||||
* --hue-5-2: hsl(344, 84%, 43%);
|
||||
* --hue-6: hsl(35, 99%, 36%);
|
||||
* --hue-6-2: hsl(35, 99%, 40%);
|
||||
* --syntax-fg: hsl(230, 8%, 24%);
|
||||
* --syntax-bg: hsl(230, 1%, 98%);
|
||||
* --syntax-gutter: hsl(230, 1%, 62%);
|
||||
* --syntax-guide: hsla(230, 8%, 24%, 0.2);
|
||||
* --syntax-accent: hsl(230, 100%, 66%);
|
||||
* From syntax-variables.less
|
||||
* --syntax-selection-color: hsl(230, 1%, 90%);
|
||||
* --syntax-gutter-background-color-selected: hsl(230, 1%, 90%);
|
||||
* --syntax-cursor-line: hsla(230, 8%, 24%, 0.05);
|
||||
*/
|
||||
|
||||
.token.comment,
|
||||
.token.prolog,
|
||||
.token.cdata {
|
||||
color: hsl(230, 4%, 64%);
|
||||
}
|
||||
|
||||
.token.doctype,
|
||||
.token.punctuation,
|
||||
.token.entity {
|
||||
color: hsl(230, 8%, 24%);
|
||||
}
|
||||
|
||||
.token.attr-name,
|
||||
.token.class-name,
|
||||
.token.boolean,
|
||||
.token.constant,
|
||||
.token.number,
|
||||
.token.atrule {
|
||||
color: hsl(35, 99%, 36%);
|
||||
}
|
||||
|
||||
.token.keyword {
|
||||
color: hsl(301, 63%, 40%);
|
||||
}
|
||||
|
||||
.token.property,
|
||||
.token.tag,
|
||||
.token.symbol,
|
||||
.token.deleted,
|
||||
.token.important {
|
||||
color: hsl(5, 74%, 59%);
|
||||
}
|
||||
|
||||
.token.selector,
|
||||
.token.string,
|
||||
.token.char,
|
||||
.token.builtin,
|
||||
.token.inserted,
|
||||
.token.regex,
|
||||
.token.attr-value,
|
||||
.token.attr-value > .token.punctuation {
|
||||
color: hsl(119, 34%, 47%);
|
||||
}
|
||||
|
||||
.token.variable,
|
||||
.token.operator,
|
||||
.token.function {
|
||||
color: hsl(221, 87%, 60%);
|
||||
}
|
||||
|
||||
.token.url {
|
||||
color: hsl(198, 99%, 37%);
|
||||
}
|
||||
|
||||
/* HTML overrides */
|
||||
.token.attr-value > .token.punctuation.attr-equals,
|
||||
.token.special-attr > .token.attr-value > .token.value.css {
|
||||
color: hsl(230, 8%, 24%);
|
||||
}
|
||||
|
||||
/* CSS overrides */
|
||||
.language-css .token.selector {
|
||||
color: hsl(5, 74%, 59%);
|
||||
}
|
||||
|
||||
.language-css .token.property {
|
||||
color: hsl(230, 8%, 24%);
|
||||
}
|
||||
|
||||
.language-css .token.function,
|
||||
.language-css .token.url > .token.function {
|
||||
color: hsl(198, 99%, 37%);
|
||||
}
|
||||
|
||||
.language-css .token.url > .token.string.url {
|
||||
color: hsl(119, 34%, 47%);
|
||||
}
|
||||
|
||||
.language-css .token.important,
|
||||
.language-css .token.atrule .token.rule {
|
||||
color: hsl(301, 63%, 40%);
|
||||
}
|
||||
|
||||
/* JS overrides */
|
||||
.language-javascript .token.operator {
|
||||
color: hsl(301, 63%, 40%);
|
||||
}
|
||||
|
||||
.language-javascript
|
||||
.token.template-string
|
||||
> .token.interpolation
|
||||
> .token.interpolation-punctuation.punctuation {
|
||||
color: hsl(344, 84%, 43%);
|
||||
}
|
||||
|
||||
/* JSON overrides */
|
||||
.language-json .token.operator {
|
||||
color: hsl(230, 8%, 24%);
|
||||
}
|
||||
|
||||
.language-json .token.null.keyword {
|
||||
color: hsl(35, 99%, 36%);
|
||||
}
|
||||
|
||||
/* MD overrides */
|
||||
.language-markdown .token.url,
|
||||
.language-markdown .token.url > .token.operator,
|
||||
.language-markdown .token.url-reference.url > .token.string {
|
||||
color: hsl(230, 8%, 24%);
|
||||
}
|
||||
|
||||
.language-markdown .token.url > .token.content {
|
||||
color: hsl(221, 87%, 60%);
|
||||
}
|
||||
|
||||
.language-markdown .token.url > .token.url,
|
||||
.language-markdown .token.url-reference.url {
|
||||
color: hsl(198, 99%, 37%);
|
||||
}
|
||||
|
||||
.language-markdown .token.blockquote.punctuation,
|
||||
.language-markdown .token.hr.punctuation {
|
||||
color: hsl(230, 4%, 64%);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.language-markdown .token.code-snippet {
|
||||
color: hsl(119, 34%, 47%);
|
||||
}
|
||||
|
||||
.language-markdown .token.bold .token.content {
|
||||
color: hsl(35, 99%, 36%);
|
||||
}
|
||||
|
||||
.language-markdown .token.italic .token.content {
|
||||
color: hsl(301, 63%, 40%);
|
||||
}
|
||||
|
||||
.language-markdown .token.strike .token.content,
|
||||
.language-markdown .token.strike .token.punctuation,
|
||||
.language-markdown .token.list.punctuation,
|
||||
.language-markdown .token.title.important > .token.punctuation {
|
||||
color: hsl(5, 74%, 59%);
|
||||
}
|
||||
|
||||
/* General */
|
||||
.token.bold {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.token.comment,
|
||||
.token.italic {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.token.entity {
|
||||
cursor: help;
|
||||
}
|
||||
|
||||
.token.namespace {
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
/* Plugin overrides */
|
||||
/* Selectors should have higher specificity than those in the plugins' default stylesheets */
|
||||
|
||||
/* Show Invisibles plugin overrides */
|
||||
.token.token.tab:not(:empty):before,
|
||||
.token.token.cr:before,
|
||||
.token.token.lf:before,
|
||||
.token.token.space:before {
|
||||
color: hsla(230, 8%, 24%, 0.2);
|
||||
}
|
||||
|
||||
/* Toolbar plugin overrides */
|
||||
/* Space out all buttons and move them away from the right edge of the code block */
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item {
|
||||
margin-right: 0.4em;
|
||||
}
|
||||
|
||||
/* Styling the buttons */
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > button,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > a,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > span {
|
||||
background: hsl(230, 1%, 90%);
|
||||
color: hsl(230, 6%, 44%);
|
||||
padding: 0.1em 0.4em;
|
||||
border-radius: 0.3em;
|
||||
}
|
||||
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > button:hover,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > button:focus,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > a:hover,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > a:focus,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > span:hover,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > span:focus {
|
||||
background: hsl(230, 1%, 78%); /* custom: darken(--syntax-bg, 20%) */
|
||||
color: hsl(230, 8%, 24%);
|
||||
}
|
||||
|
||||
/* Line Highlight plugin overrides */
|
||||
/* The highlighted line itself */
|
||||
.line-highlight.line-highlight {
|
||||
background: hsla(230, 8%, 24%, 0.05);
|
||||
}
|
||||
|
||||
/* Default line numbers in Line Highlight plugin */
|
||||
.line-highlight.line-highlight:before,
|
||||
.line-highlight.line-highlight[data-end]:after {
|
||||
background: hsl(230, 1%, 90%);
|
||||
color: hsl(230, 8%, 24%);
|
||||
padding: 0.1em 0.6em;
|
||||
border-radius: 0.3em;
|
||||
box-shadow: 0 2px 0 0 rgba(0, 0, 0, 0.2); /* same as Toolbar plugin default */
|
||||
}
|
||||
|
||||
/* Hovering over a linkable line number (in the gutter area) */
|
||||
/* Requires Line Numbers plugin as well */
|
||||
pre[id].linkable-line-numbers.linkable-line-numbers
|
||||
span.line-numbers-rows
|
||||
> span:hover:before {
|
||||
background-color: hsla(230, 8%, 24%, 0.05);
|
||||
}
|
||||
|
||||
/* Line Numbers and Command Line plugins overrides */
|
||||
/* Line separating gutter from coding area */
|
||||
.line-numbers.line-numbers .line-numbers-rows,
|
||||
.command-line .command-line-prompt {
|
||||
border-right-color: hsla(230, 8%, 24%, 0.2);
|
||||
}
|
||||
|
||||
/* Stuff in the gutter */
|
||||
.line-numbers .line-numbers-rows > span:before,
|
||||
.command-line .command-line-prompt > span:before {
|
||||
color: hsl(230, 1%, 62%);
|
||||
}
|
||||
|
||||
/* Match Braces plugin overrides */
|
||||
/* Note: Outline colour is inherited from the braces */
|
||||
.rainbow-braces .token.token.punctuation.brace-level-1,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-5,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-9 {
|
||||
color: hsl(5, 74%, 59%);
|
||||
}
|
||||
|
||||
.rainbow-braces .token.token.punctuation.brace-level-2,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-6,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-10 {
|
||||
color: hsl(119, 34%, 47%);
|
||||
}
|
||||
|
||||
.rainbow-braces .token.token.punctuation.brace-level-3,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-7,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-11 {
|
||||
color: hsl(221, 87%, 60%);
|
||||
}
|
||||
|
||||
.rainbow-braces .token.token.punctuation.brace-level-4,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-8,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-12 {
|
||||
color: hsl(301, 63%, 40%);
|
||||
}
|
||||
|
||||
/* Diff Highlight plugin overrides */
|
||||
/* Taken from https://github.com/atom/github/blob/master/styles/variables.less */
|
||||
pre.diff-highlight > code .token.token.deleted:not(.prefix),
|
||||
pre > code.diff-highlight .token.token.deleted:not(.prefix) {
|
||||
background-color: hsla(353, 100%, 66%, 0.15);
|
||||
}
|
||||
|
||||
pre.diff-highlight > code .token.token.deleted:not(.prefix)::-moz-selection,
|
||||
pre.diff-highlight
|
||||
> code
|
||||
.token.token.deleted:not(.prefix)
|
||||
*::-moz-selection,
|
||||
pre > code.diff-highlight .token.token.deleted:not(.prefix)::-moz-selection,
|
||||
pre
|
||||
> code.diff-highlight
|
||||
.token.token.deleted:not(.prefix)
|
||||
*::-moz-selection {
|
||||
background-color: hsla(353, 95%, 66%, 0.25);
|
||||
}
|
||||
|
||||
pre.diff-highlight > code .token.token.deleted:not(.prefix)::selection,
|
||||
pre.diff-highlight > code .token.token.deleted:not(.prefix) *::selection,
|
||||
pre > code.diff-highlight .token.token.deleted:not(.prefix)::selection,
|
||||
pre > code.diff-highlight .token.token.deleted:not(.prefix) *::selection {
|
||||
background-color: hsla(353, 95%, 66%, 0.25);
|
||||
}
|
||||
|
||||
pre.diff-highlight > code .token.token.inserted:not(.prefix),
|
||||
pre > code.diff-highlight .token.token.inserted:not(.prefix) {
|
||||
background-color: hsla(137, 100%, 55%, 0.15);
|
||||
}
|
||||
|
||||
pre.diff-highlight
|
||||
> code
|
||||
.token.token.inserted:not(.prefix)::-moz-selection,
|
||||
pre.diff-highlight
|
||||
> code
|
||||
.token.token.inserted:not(.prefix)
|
||||
*::-moz-selection,
|
||||
pre
|
||||
> code.diff-highlight
|
||||
.token.token.inserted:not(.prefix)::-moz-selection,
|
||||
pre
|
||||
> code.diff-highlight
|
||||
.token.token.inserted:not(.prefix)
|
||||
*::-moz-selection {
|
||||
background-color: hsla(135, 73%, 55%, 0.25);
|
||||
}
|
||||
|
||||
pre.diff-highlight > code .token.token.inserted:not(.prefix)::selection,
|
||||
pre.diff-highlight > code .token.token.inserted:not(.prefix) *::selection,
|
||||
pre > code.diff-highlight .token.token.inserted:not(.prefix)::selection,
|
||||
pre > code.diff-highlight .token.token.inserted:not(.prefix) *::selection {
|
||||
background-color: hsla(135, 73%, 55%, 0.25);
|
||||
}
|
||||
|
||||
/* Previewers plugin overrides */
|
||||
/* Based on https://github.com/atom-community/atom-ide-datatip/blob/master/styles/atom-ide-datatips.less and https://github.com/atom/atom/blob/master/packages/one-light-ui */
|
||||
/* Border around popup */
|
||||
.prism-previewer.prism-previewer:before,
|
||||
.prism-previewer-gradient.prism-previewer-gradient div {
|
||||
border-color: hsl(0, 0, 95%);
|
||||
}
|
||||
|
||||
/* Angle and time should remain as circles and are hence not included */
|
||||
.prism-previewer-color.prism-previewer-color:before,
|
||||
.prism-previewer-gradient.prism-previewer-gradient div,
|
||||
.prism-previewer-easing.prism-previewer-easing:before {
|
||||
border-radius: 0.3em;
|
||||
}
|
||||
|
||||
/* Triangles pointing to the code */
|
||||
.prism-previewer.prism-previewer:after {
|
||||
border-top-color: hsl(0, 0, 95%);
|
||||
}
|
||||
|
||||
.prism-previewer-flipped.prism-previewer-flipped.after {
|
||||
border-bottom-color: hsl(0, 0, 95%);
|
||||
}
|
||||
|
||||
/* Background colour within the popup */
|
||||
.prism-previewer-angle.prism-previewer-angle:before,
|
||||
.prism-previewer-time.prism-previewer-time:before,
|
||||
.prism-previewer-easing.prism-previewer-easing {
|
||||
background: hsl(0, 0%, 100%);
|
||||
}
|
||||
|
||||
/* For angle, this is the positive area (eg. 90deg will display one quadrant in this colour) */
|
||||
/* For time, this is the alternate colour */
|
||||
.prism-previewer-angle.prism-previewer-angle circle,
|
||||
.prism-previewer-time.prism-previewer-time circle {
|
||||
stroke: hsl(230, 8%, 24%);
|
||||
stroke-opacity: 1;
|
||||
}
|
||||
|
||||
/* Stroke colours of the handle, direction point, and vector itself */
|
||||
.prism-previewer-easing.prism-previewer-easing circle,
|
||||
.prism-previewer-easing.prism-previewer-easing path,
|
||||
.prism-previewer-easing.prism-previewer-easing line {
|
||||
stroke: hsl(230, 8%, 24%);
|
||||
}
|
||||
|
||||
/* Fill colour of the handle */
|
||||
.prism-previewer-easing.prism-previewer-easing circle {
|
||||
fill: transparent;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.prose {
|
||||
.token.comment,
|
||||
.token.prolog,
|
||||
.token.cdata {
|
||||
color: hsl(220, 10%, 40%);
|
||||
}
|
||||
|
||||
.token.doctype,
|
||||
.token.punctuation,
|
||||
.token.entity {
|
||||
color: hsl(220, 14%, 71%);
|
||||
}
|
||||
|
||||
.token.attr-name,
|
||||
.token.class-name,
|
||||
.token.boolean,
|
||||
.token.constant,
|
||||
.token.number,
|
||||
.token.atrule {
|
||||
color: hsl(29, 54%, 61%);
|
||||
}
|
||||
|
||||
.token.keyword {
|
||||
color: hsl(286, 60%, 67%);
|
||||
}
|
||||
|
||||
.token.property,
|
||||
.token.tag,
|
||||
.token.symbol,
|
||||
.token.deleted,
|
||||
.token.important {
|
||||
color: hsl(355, 65%, 65%);
|
||||
}
|
||||
|
||||
.token.selector,
|
||||
.token.string,
|
||||
.token.char,
|
||||
.token.builtin,
|
||||
.token.inserted,
|
||||
.token.regex,
|
||||
.token.attr-value,
|
||||
.token.attr-value > .token.punctuation {
|
||||
color: hsl(95, 38%, 62%);
|
||||
}
|
||||
|
||||
.token.variable,
|
||||
.token.operator,
|
||||
.token.function {
|
||||
color: hsl(207, 82%, 66%);
|
||||
}
|
||||
|
||||
.token.url {
|
||||
color: hsl(187, 47%, 55%);
|
||||
}
|
||||
|
||||
/* HTML overrides */
|
||||
.token.attr-value > .token.punctuation.attr-equals,
|
||||
.token.special-attr > .token.attr-value > .token.value.css {
|
||||
color: hsl(220, 14%, 71%);
|
||||
}
|
||||
|
||||
/* CSS overrides */
|
||||
.language-css .token.selector {
|
||||
color: hsl(355, 65%, 65%);
|
||||
}
|
||||
|
||||
.language-css .token.property {
|
||||
color: hsl(220, 14%, 71%);
|
||||
}
|
||||
|
||||
.language-css .token.function,
|
||||
.language-css .token.url > .token.function {
|
||||
color: hsl(187, 47%, 55%);
|
||||
}
|
||||
|
||||
.language-css .token.url > .token.string.url {
|
||||
color: hsl(95, 38%, 62%);
|
||||
}
|
||||
|
||||
.language-css .token.important,
|
||||
.language-css .token.atrule .token.rule {
|
||||
color: hsl(286, 60%, 67%);
|
||||
}
|
||||
|
||||
/* JS overrides */
|
||||
.language-javascript .token.operator {
|
||||
color: hsl(286, 60%, 67%);
|
||||
}
|
||||
|
||||
.language-javascript
|
||||
.token.template-string
|
||||
> .token.interpolation
|
||||
> .token.interpolation-punctuation.punctuation {
|
||||
color: hsl(5, 48%, 51%);
|
||||
}
|
||||
|
||||
/* JSON overrides */
|
||||
.language-json .token.operator {
|
||||
color: hsl(220, 14%, 71%);
|
||||
}
|
||||
|
||||
.language-json .token.null.keyword {
|
||||
color: hsl(29, 54%, 61%);
|
||||
}
|
||||
|
||||
/* MD overrides */
|
||||
.language-markdown .token.url,
|
||||
.language-markdown .token.url > .token.operator,
|
||||
.language-markdown .token.url-reference.url > .token.string {
|
||||
color: hsl(220, 14%, 71%);
|
||||
}
|
||||
|
||||
.language-markdown .token.url > .token.content {
|
||||
color: hsl(207, 82%, 66%);
|
||||
}
|
||||
|
||||
.language-markdown .token.url > .token.url,
|
||||
.language-markdown .token.url-reference.url {
|
||||
color: hsl(187, 47%, 55%);
|
||||
}
|
||||
|
||||
.language-markdown .token.blockquote.punctuation,
|
||||
.language-markdown .token.hr.punctuation {
|
||||
color: hsl(220, 10%, 40%);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.language-markdown .token.code-snippet {
|
||||
color: hsl(95, 38%, 62%);
|
||||
}
|
||||
|
||||
.language-markdown .token.bold .token.content {
|
||||
color: hsl(29, 54%, 61%);
|
||||
}
|
||||
|
||||
.language-markdown .token.italic .token.content {
|
||||
color: hsl(286, 60%, 67%);
|
||||
}
|
||||
|
||||
.language-markdown .token.strike .token.content,
|
||||
.language-markdown .token.strike .token.punctuation,
|
||||
.language-markdown .token.list.punctuation,
|
||||
.language-markdown .token.title.important > .token.punctuation {
|
||||
color: hsl(355, 65%, 65%);
|
||||
}
|
||||
|
||||
/* General */
|
||||
.token.bold {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.token.comment,
|
||||
.token.italic {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.token.entity {
|
||||
cursor: help;
|
||||
}
|
||||
|
||||
.token.namespace {
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
/* Plugin overrides */
|
||||
/* Selectors should have higher specificity than those in the plugins' default stylesheets */
|
||||
|
||||
/* Show Invisibles plugin overrides */
|
||||
.token.token.tab:not(:empty):before,
|
||||
.token.token.cr:before,
|
||||
.token.token.lf:before,
|
||||
.token.token.space:before {
|
||||
color: hsla(220, 14%, 71%, 0.15);
|
||||
text-shadow: none;
|
||||
}
|
||||
|
||||
/* Toolbar plugin overrides */
|
||||
/* Space out all buttons and move them away from the right edge of the code block */
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item {
|
||||
margin-right: 0.4em;
|
||||
}
|
||||
|
||||
/* Styling the buttons */
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > button,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > a,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > span {
|
||||
background: hsl(220, 13%, 26%);
|
||||
color: hsl(220, 9%, 55%);
|
||||
padding: 0.1em 0.4em;
|
||||
border-radius: 0.3em;
|
||||
}
|
||||
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > button:hover,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > button:focus,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > a:hover,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > a:focus,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > span:hover,
|
||||
div.code-toolbar > .toolbar.toolbar > .toolbar-item > span:focus {
|
||||
background: hsl(220, 13%, 28%);
|
||||
color: hsl(220, 14%, 71%);
|
||||
}
|
||||
|
||||
/* Line Highlight plugin overrides */
|
||||
/* The highlighted line itself */
|
||||
.line-highlight.line-highlight {
|
||||
background: hsla(220, 100%, 80%, 0.04);
|
||||
}
|
||||
|
||||
/* Default line numbers in Line Highlight plugin */
|
||||
.line-highlight.line-highlight:before,
|
||||
.line-highlight.line-highlight[data-end]:after {
|
||||
background: hsl(220, 13%, 26%);
|
||||
color: hsl(220, 14%, 71%);
|
||||
padding: 0.1em 0.6em;
|
||||
border-radius: 0.3em;
|
||||
box-shadow: 0 2px 0 0 rgba(0, 0, 0, 0.2); /* same as Toolbar plugin default */
|
||||
}
|
||||
|
||||
/* Hovering over a linkable line number (in the gutter area) */
|
||||
/* Requires Line Numbers plugin as well */
|
||||
pre[id].linkable-line-numbers.linkable-line-numbers
|
||||
span.line-numbers-rows
|
||||
> span:hover:before {
|
||||
background-color: hsla(220, 100%, 80%, 0.04);
|
||||
}
|
||||
|
||||
/* Line Numbers and Command Line plugins overrides */
|
||||
/* Line separating gutter from coding area */
|
||||
.line-numbers.line-numbers .line-numbers-rows,
|
||||
.command-line .command-line-prompt {
|
||||
border-right-color: hsla(220, 14%, 71%, 0.15);
|
||||
}
|
||||
|
||||
/* Stuff in the gutter */
|
||||
.line-numbers .line-numbers-rows > span:before,
|
||||
.command-line .command-line-prompt > span:before {
|
||||
color: hsl(220, 14%, 45%);
|
||||
}
|
||||
|
||||
/* Match Braces plugin overrides */
|
||||
/* Note: Outline colour is inherited from the braces */
|
||||
.rainbow-braces .token.token.punctuation.brace-level-1,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-5,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-9 {
|
||||
color: hsl(355, 65%, 65%);
|
||||
}
|
||||
|
||||
.rainbow-braces .token.token.punctuation.brace-level-2,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-6,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-10 {
|
||||
color: hsl(95, 38%, 62%);
|
||||
}
|
||||
|
||||
.rainbow-braces .token.token.punctuation.brace-level-3,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-7,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-11 {
|
||||
color: hsl(207, 82%, 66%);
|
||||
}
|
||||
|
||||
.rainbow-braces .token.token.punctuation.brace-level-4,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-8,
|
||||
.rainbow-braces .token.token.punctuation.brace-level-12 {
|
||||
color: hsl(286, 60%, 67%);
|
||||
}
|
||||
|
||||
/* Diff Highlight plugin overrides */
|
||||
/* Taken from https://github.com/atom/github/blob/master/styles/variables.less */
|
||||
pre.diff-highlight > code .token.token.deleted:not(.prefix),
|
||||
pre > code.diff-highlight .token.token.deleted:not(.prefix) {
|
||||
background-color: hsla(353, 100%, 66%, 0.15);
|
||||
}
|
||||
|
||||
pre.diff-highlight > code .token.token.deleted:not(.prefix)::-moz-selection,
|
||||
pre.diff-highlight
|
||||
> code
|
||||
.token.token.deleted:not(.prefix)
|
||||
*::-moz-selection,
|
||||
pre > code.diff-highlight .token.token.deleted:not(.prefix)::-moz-selection,
|
||||
pre
|
||||
> code.diff-highlight
|
||||
.token.token.deleted:not(.prefix)
|
||||
*::-moz-selection {
|
||||
background-color: hsla(353, 95%, 66%, 0.25);
|
||||
}
|
||||
|
||||
pre.diff-highlight > code .token.token.deleted:not(.prefix)::selection,
|
||||
pre.diff-highlight > code .token.token.deleted:not(.prefix) *::selection,
|
||||
pre > code.diff-highlight .token.token.deleted:not(.prefix)::selection,
|
||||
pre > code.diff-highlight .token.token.deleted:not(.prefix) *::selection {
|
||||
background-color: hsla(353, 95%, 66%, 0.25);
|
||||
}
|
||||
|
||||
pre.diff-highlight > code .token.token.inserted:not(.prefix),
|
||||
pre > code.diff-highlight .token.token.inserted:not(.prefix) {
|
||||
background-color: hsla(137, 100%, 55%, 0.15);
|
||||
}
|
||||
|
||||
pre.diff-highlight
|
||||
> code
|
||||
.token.token.inserted:not(.prefix)::-moz-selection,
|
||||
pre.diff-highlight
|
||||
> code
|
||||
.token.token.inserted:not(.prefix)
|
||||
*::-moz-selection,
|
||||
pre
|
||||
> code.diff-highlight
|
||||
.token.token.inserted:not(.prefix)::-moz-selection,
|
||||
pre
|
||||
> code.diff-highlight
|
||||
.token.token.inserted:not(.prefix)
|
||||
*::-moz-selection {
|
||||
background-color: hsla(135, 73%, 55%, 0.25);
|
||||
}
|
||||
|
||||
pre.diff-highlight > code .token.token.inserted:not(.prefix)::selection,
|
||||
pre.diff-highlight > code .token.token.inserted:not(.prefix) *::selection,
|
||||
pre > code.diff-highlight .token.token.inserted:not(.prefix)::selection,
|
||||
pre > code.diff-highlight .token.token.inserted:not(.prefix) *::selection {
|
||||
background-color: hsla(135, 73%, 55%, 0.25);
|
||||
}
|
||||
|
||||
/* Previewers plugin overrides */
|
||||
/* Based on https://github.com/atom-community/atom-ide-datatip/blob/master/styles/atom-ide-datatips.less and https://github.com/atom/atom/blob/master/packages/one-dark-ui */
|
||||
/* Border around popup */
|
||||
.prism-previewer.prism-previewer:before,
|
||||
.prism-previewer-gradient.prism-previewer-gradient div {
|
||||
border-color: hsl(224, 13%, 17%);
|
||||
}
|
||||
|
||||
/* Angle and time should remain as circles and are hence not included */
|
||||
.prism-previewer-color.prism-previewer-color:before,
|
||||
.prism-previewer-gradient.prism-previewer-gradient div,
|
||||
.prism-previewer-easing.prism-previewer-easing:before {
|
||||
border-radius: 0.3em;
|
||||
}
|
||||
|
||||
/* Triangles pointing to the code */
|
||||
.prism-previewer.prism-previewer:after {
|
||||
border-top-color: hsl(224, 13%, 17%);
|
||||
}
|
||||
|
||||
.prism-previewer-flipped.prism-previewer-flipped.after {
|
||||
border-bottom-color: hsl(224, 13%, 17%);
|
||||
}
|
||||
|
||||
/* Background colour within the popup */
|
||||
.prism-previewer-angle.prism-previewer-angle:before,
|
||||
.prism-previewer-time.prism-previewer-time:before,
|
||||
.prism-previewer-easing.prism-previewer-easing {
|
||||
background: hsl(219, 13%, 22%);
|
||||
}
|
||||
|
||||
/* For angle, this is the positive area (eg. 90deg will display one quadrant in this colour) */
|
||||
/* For time, this is the alternate colour */
|
||||
.prism-previewer-angle.prism-previewer-angle circle,
|
||||
.prism-previewer-time.prism-previewer-time circle {
|
||||
stroke: hsl(220, 14%, 71%);
|
||||
stroke-opacity: 1;
|
||||
}
|
||||
|
||||
/* Stroke colours of the handle, direction point, and vector itself */
|
||||
.prism-previewer-easing.prism-previewer-easing circle,
|
||||
.prism-previewer-easing.prism-previewer-easing path,
|
||||
.prism-previewer-easing.prism-previewer-easing line {
|
||||
stroke: hsl(220, 14%, 71%);
|
||||
}
|
||||
|
||||
/* Fill colour of the handle */
|
||||
.prism-previewer-easing.prism-previewer-easing circle {
|
||||
fill: transparent;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.prose pre {
|
||||
contain: layout style;
|
||||
}
|
||||
|
||||
/* Or more aggressively */
|
||||
.prose pre code {
|
||||
contain: layout style paint;
|
||||
}
|
||||
|
||||
/* messaging-style typing indicator animation */
|
||||
@keyframes typing {
|
||||
|
||||
13
app/ui/app/src/lib/config.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
// API configuration
|
||||
const DEV_API_URL = "http://127.0.0.1:3001";
|
||||
|
||||
// Base URL for fetch API calls (can be relative in production)
|
||||
export const API_BASE = import.meta.env.DEV ? DEV_API_URL : "";
|
||||
|
||||
// Full host URL for Ollama client (needs full origin in production)
|
||||
export const OLLAMA_HOST = import.meta.env.DEV
|
||||
? DEV_API_URL
|
||||
: window.location.origin;
|
||||
|
||||
export const OLLAMA_DOT_COM =
|
||||
import.meta.env.VITE_OLLAMA_DOT_COM_URL || "https://ollama.com";
|
||||
157
app/ui/app/src/lib/highlighter.ts
Normal file
@@ -0,0 +1,157 @@
|
||||
import { createHighlighter } from "shiki";
|
||||
import type { ThemeRegistration } from "shiki";
|
||||
|
||||
const oneLightTheme: ThemeRegistration = {
|
||||
name: "one-light",
|
||||
type: "light",
|
||||
colors: {
|
||||
"editor.background": "#fafafa",
|
||||
"editor.foreground": "#383a42",
|
||||
},
|
||||
tokenColors: [
|
||||
{
|
||||
scope: ["comment", "punctuation.definition.comment"],
|
||||
settings: { foreground: "#a0a1a7" },
|
||||
},
|
||||
{
|
||||
scope: ["keyword", "storage.type", "storage.modifier"],
|
||||
settings: { foreground: "#a626a4" },
|
||||
},
|
||||
{ scope: ["string", "string.quoted"], settings: { foreground: "#50a14f" } },
|
||||
{
|
||||
scope: ["function", "entity.name.function", "support.function"],
|
||||
settings: { foreground: "#4078f2" },
|
||||
},
|
||||
{
|
||||
scope: [
|
||||
"constant.numeric",
|
||||
"constant.language",
|
||||
"constant.character",
|
||||
"number",
|
||||
],
|
||||
settings: { foreground: "#c18401" },
|
||||
},
|
||||
{
|
||||
scope: ["variable", "support.variable"],
|
||||
settings: { foreground: "#e45649" },
|
||||
},
|
||||
{
|
||||
scope: ["entity.name.tag", "entity.name.type", "entity.name.class"],
|
||||
settings: { foreground: "#e45649" },
|
||||
},
|
||||
{
|
||||
scope: ["entity.other.attribute-name"],
|
||||
settings: { foreground: "#c18401" },
|
||||
},
|
||||
{
|
||||
scope: ["keyword.operator", "operator"],
|
||||
settings: { foreground: "#a626a4" },
|
||||
},
|
||||
{ scope: ["punctuation"], settings: { foreground: "#383a42" } },
|
||||
{
|
||||
scope: ["markup.heading"],
|
||||
settings: { foreground: "#e45649", fontStyle: "bold" },
|
||||
},
|
||||
{
|
||||
scope: ["markup.bold"],
|
||||
settings: { foreground: "#c18401", fontStyle: "bold" },
|
||||
},
|
||||
{
|
||||
scope: ["markup.italic"],
|
||||
settings: { foreground: "#a626a4", fontStyle: "italic" },
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const oneDarkTheme: ThemeRegistration = {
|
||||
name: "one-dark",
|
||||
type: "dark",
|
||||
colors: {
|
||||
"editor.background": "#282c34",
|
||||
"editor.foreground": "#abb2bf",
|
||||
},
|
||||
tokenColors: [
|
||||
{
|
||||
scope: ["comment", "punctuation.definition.comment"],
|
||||
settings: { foreground: "#5c6370" },
|
||||
},
|
||||
{
|
||||
scope: ["keyword", "storage.type", "storage.modifier"],
|
||||
settings: { foreground: "#c678dd" },
|
||||
},
|
||||
{ scope: ["string", "string.quoted"], settings: { foreground: "#98c379" } },
|
||||
{
|
||||
scope: ["function", "entity.name.function", "support.function"],
|
||||
settings: { foreground: "#61afef" },
|
||||
},
|
||||
{
|
||||
scope: [
|
||||
"constant.numeric",
|
||||
"constant.language",
|
||||
"constant.character",
|
||||
"number",
|
||||
],
|
||||
settings: { foreground: "#d19a66" },
|
||||
},
|
||||
{
|
||||
scope: ["variable", "support.variable"],
|
||||
settings: { foreground: "#e06c75" },
|
||||
},
|
||||
{
|
||||
scope: ["entity.name.tag", "entity.name.type", "entity.name.class"],
|
||||
settings: { foreground: "#e06c75" },
|
||||
},
|
||||
{
|
||||
scope: ["entity.other.attribute-name"],
|
||||
settings: { foreground: "#d19a66" },
|
||||
},
|
||||
{
|
||||
scope: ["keyword.operator", "operator"],
|
||||
settings: { foreground: "#c678dd" },
|
||||
},
|
||||
{ scope: ["punctuation"], settings: { foreground: "#abb2bf" } },
|
||||
{
|
||||
scope: ["markup.heading"],
|
||||
settings: { foreground: "#e06c75", fontStyle: "bold" },
|
||||
},
|
||||
{
|
||||
scope: ["markup.bold"],
|
||||
settings: { foreground: "#d19a66", fontStyle: "bold" },
|
||||
},
|
||||
{
|
||||
scope: ["markup.italic"],
|
||||
settings: { foreground: "#c678dd", fontStyle: "italic" },
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
export let highlighter: Awaited<ReturnType<typeof createHighlighter>> | null =
|
||||
null;
|
||||
|
||||
export const highlighterPromise = createHighlighter({
|
||||
themes: [oneLightTheme, oneDarkTheme],
|
||||
langs: [
|
||||
"javascript",
|
||||
"typescript",
|
||||
"python",
|
||||
"bash",
|
||||
"shell",
|
||||
"json",
|
||||
"html",
|
||||
"css",
|
||||
"tsx",
|
||||
"jsx",
|
||||
"go",
|
||||
"rust",
|
||||
"java",
|
||||
"c",
|
||||
"cpp",
|
||||
"sql",
|
||||
"swift",
|
||||
"yaml",
|
||||
"markdown",
|
||||
],
|
||||
}).then((h) => {
|
||||
highlighter = h;
|
||||
return h;
|
||||
});
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Ollama } from "ollama/browser";
|
||||
import { OLLAMA_HOST } from "./config";
|
||||
|
||||
let _ollamaClient: Ollama | null = null;
|
||||
|
||||
@@ -6,7 +7,7 @@ export const ollamaClient = new Proxy({} as Ollama, {
|
||||
get(_target, prop) {
|
||||
if (!_ollamaClient) {
|
||||
_ollamaClient = new Ollama({
|
||||
host: window.location.origin,
|
||||
host: OLLAMA_HOST,
|
||||
});
|
||||
}
|
||||
const value = _ollamaClient[prop as keyof Ollama];
|
||||
|
||||
@@ -5,13 +5,6 @@ import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
|
||||
import { routeTree } from "./routeTree.gen";
|
||||
import { fetchUser } from "./api";
|
||||
import { StreamingProvider } from "./contexts/StreamingContext";
|
||||
import { User } from "@/gotypes";
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
__initialUserDataPromise?: Promise<User | null>;
|
||||
}
|
||||
}
|
||||
|
||||
const queryClient = new QueryClient({
|
||||
defaultOptions: {
|
||||
@@ -24,27 +17,11 @@ const queryClient = new QueryClient({
|
||||
},
|
||||
});
|
||||
|
||||
// Track initial user data fetch
|
||||
let initialUserDataPromise: Promise<User | null> | null = null;
|
||||
|
||||
// Initialize user data on app startup
|
||||
const initializeUserData = async () => {
|
||||
try {
|
||||
const userData = await fetchUser();
|
||||
fetchUser().then((userData) => {
|
||||
if (userData) {
|
||||
queryClient.setQueryData(["user"], userData);
|
||||
return userData;
|
||||
} catch (error) {
|
||||
console.error("Error initializing user data:", error);
|
||||
queryClient.setQueryData(["user"], null);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
// Start initialization immediately and track the promise
|
||||
initialUserDataPromise = initializeUserData();
|
||||
|
||||
// Export the promise so hooks can await it
|
||||
window.__initialUserDataPromise = initialUserDataPromise;
|
||||
});
|
||||
|
||||
const router = createRouter({
|
||||
routeTree,
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { QueryClient } from "@tanstack/react-query";
|
||||
import { createRootRouteWithContext, Outlet } from "@tanstack/react-router";
|
||||
import { getSettings } from "@/api";
|
||||
import { useQuery } from "@tanstack/react-query";
|
||||
import { useCloudStatus } from "@/hooks/useCloudStatus";
|
||||
|
||||
function RootComponent() {
|
||||
// This hook ensures settings are fetched on app startup
|
||||
@@ -9,6 +10,8 @@ function RootComponent() {
|
||||
queryKey: ["settings"],
|
||||
queryFn: getSettings,
|
||||
});
|
||||
// Fetch cloud status on startup (best-effort)
|
||||
useCloudStatus();
|
||||
|
||||
return (
|
||||
<div>
|
||||
|
||||
@@ -4,12 +4,37 @@ import Chat from "@/components/Chat";
|
||||
import { getChat } from "@/api";
|
||||
import { SidebarLayout } from "@/components/layout/layout";
|
||||
import { ChatSidebar } from "@/components/ChatSidebar";
|
||||
import LaunchCommands from "@/components/LaunchCommands";
|
||||
import { useEffect, useRef } from "react";
|
||||
import { useSettings } from "@/hooks/useSettings";
|
||||
|
||||
const launchSidebarRequestedKey = "ollama.launchSidebarRequested";
|
||||
const launchSidebarSeenKey = "ollama.launchSidebarSeen";
|
||||
const fallbackSessionState = new Map<string, string>();
|
||||
|
||||
function getSessionState() {
|
||||
if (typeof sessionStorage !== "undefined") {
|
||||
return sessionStorage;
|
||||
}
|
||||
|
||||
return {
|
||||
getItem(key: string) {
|
||||
return fallbackSessionState.get(key) ?? null;
|
||||
},
|
||||
setItem(key: string, value: string) {
|
||||
fallbackSessionState.set(key, value);
|
||||
},
|
||||
removeItem(key: string) {
|
||||
fallbackSessionState.delete(key);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export const Route = createFileRoute("/c/$chatId")({
|
||||
component: RouteComponent,
|
||||
loader: async ({ context, params }) => {
|
||||
// Skip loading for "new" chat
|
||||
if (params.chatId !== "new") {
|
||||
// Skip loading for special non-chat views
|
||||
if (params.chatId !== "new" && params.chatId !== "launch") {
|
||||
context.queryClient.ensureQueryData({
|
||||
queryKey: ["chat", params.chatId],
|
||||
queryFn: () => getChat(params.chatId),
|
||||
@@ -21,13 +46,70 @@ export const Route = createFileRoute("/c/$chatId")({
|
||||
|
||||
function RouteComponent() {
|
||||
const { chatId } = Route.useParams();
|
||||
const { settingsData, setSettings } = useSettings();
|
||||
const previousChatIdRef = useRef<string | null>(null);
|
||||
|
||||
// Always call hooks at the top level - use a flag to skip data when chatId is "new"
|
||||
// Always call hooks at the top level - use a flag to skip data when chatId is a special view
|
||||
const {
|
||||
data: chatData,
|
||||
isLoading: chatLoading,
|
||||
error: chatError,
|
||||
} = useChat(chatId === "new" ? "" : chatId);
|
||||
} = useChat(chatId === "new" || chatId === "launch" ? "" : chatId);
|
||||
|
||||
useEffect(() => {
|
||||
if (!settingsData) {
|
||||
return;
|
||||
}
|
||||
|
||||
const previousChatId = previousChatIdRef.current;
|
||||
previousChatIdRef.current = chatId;
|
||||
|
||||
if (chatId === "launch") {
|
||||
const sessionState = getSessionState();
|
||||
const shouldOpenSidebar =
|
||||
previousChatId !== "launch" &&
|
||||
(() => {
|
||||
if (sessionState.getItem(launchSidebarRequestedKey) === "1") {
|
||||
sessionState.removeItem(launchSidebarRequestedKey);
|
||||
sessionState.setItem(launchSidebarSeenKey, "1");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (sessionState.getItem(launchSidebarSeenKey) !== "1") {
|
||||
sessionState.setItem(launchSidebarSeenKey, "1");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
})();
|
||||
const updates: { LastHomeView?: string; SidebarOpen?: boolean } = {};
|
||||
|
||||
if (settingsData.LastHomeView !== "launch") {
|
||||
updates.LastHomeView = "launch";
|
||||
}
|
||||
|
||||
if (shouldOpenSidebar && !settingsData.SidebarOpen) {
|
||||
updates.SidebarOpen = true;
|
||||
}
|
||||
|
||||
if (Object.keys(updates).length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
setSettings(updates).catch(() => {
|
||||
// Best effort persistence for home view preference.
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (settingsData.LastHomeView === "chat") {
|
||||
return;
|
||||
}
|
||||
|
||||
setSettings({ LastHomeView: "chat" }).catch(() => {
|
||||
// Best effort persistence for home view preference.
|
||||
});
|
||||
}, [chatId, settingsData, setSettings]);
|
||||
|
||||
// Handle "new" chat case - just use Chat component which handles everything
|
||||
if (chatId === "new") {
|
||||
@@ -38,6 +120,14 @@ function RouteComponent() {
|
||||
);
|
||||
}
|
||||
|
||||
if (chatId === "launch") {
|
||||
return (
|
||||
<SidebarLayout sidebar={<ChatSidebar currentChatId={chatId} />}>
|
||||
<LaunchCommands />
|
||||
</SidebarLayout>
|
||||
);
|
||||
}
|
||||
|
||||
// Handle existing chat case
|
||||
if (chatLoading) {
|
||||
return (
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
import { createFileRoute, redirect } from "@tanstack/react-router";
|
||||
import { getSettings } from "@/api";
|
||||
|
||||
export const Route = createFileRoute("/")({
|
||||
beforeLoad: () => {
|
||||
beforeLoad: async ({ context }) => {
|
||||
const settingsData = await context.queryClient.ensureQueryData({
|
||||
queryKey: ["settings"],
|
||||
queryFn: getSettings,
|
||||
});
|
||||
const chatId =
|
||||
settingsData?.settings?.LastHomeView === "chat" ? "new" : "launch";
|
||||
|
||||
throw redirect({
|
||||
to: "/c/$chatId",
|
||||
params: { chatId: "new" },
|
||||
params: { chatId },
|
||||
mask: {
|
||||
to: "/",
|
||||
},
|
||||
|
||||
57
app/ui/app/src/utils/clipboard.test.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
import { describe, expect, it, vi, beforeEach } from "vitest";
|
||||
import { copyTextToClipboard } from "./clipboard";
|
||||
|
||||
describe("copyTextToClipboard", () => {
|
||||
beforeEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("copies via Clipboard API when available", async () => {
|
||||
const writeText = vi.fn().mockResolvedValue(undefined);
|
||||
vi.stubGlobal("navigator", {
|
||||
clipboard: {
|
||||
writeText,
|
||||
},
|
||||
});
|
||||
|
||||
const copied = await copyTextToClipboard("ollama launch claude");
|
||||
|
||||
expect(copied).toBe(true);
|
||||
expect(writeText).toHaveBeenCalledWith("ollama launch claude");
|
||||
});
|
||||
|
||||
it("falls back to execCommand when Clipboard API fails", async () => {
|
||||
const writeText = vi.fn().mockRejectedValue(new Error("not allowed"));
|
||||
vi.stubGlobal("navigator", {
|
||||
clipboard: {
|
||||
writeText,
|
||||
},
|
||||
});
|
||||
|
||||
const textarea = {
|
||||
value: "",
|
||||
setAttribute: vi.fn(),
|
||||
style: {} as Record<string, string>,
|
||||
focus: vi.fn(),
|
||||
select: vi.fn(),
|
||||
};
|
||||
const appendChild = vi.fn();
|
||||
const removeChild = vi.fn();
|
||||
const execCommand = vi.fn().mockReturnValue(true);
|
||||
vi.stubGlobal("document", {
|
||||
createElement: vi.fn().mockReturnValue(textarea),
|
||||
body: {
|
||||
appendChild,
|
||||
removeChild,
|
||||
},
|
||||
execCommand,
|
||||
});
|
||||
|
||||
const copied = await copyTextToClipboard("ollama launch openclaw");
|
||||
|
||||
expect(copied).toBe(true);
|
||||
expect(execCommand).toHaveBeenCalledWith("copy");
|
||||
expect(appendChild).toHaveBeenCalled();
|
||||
expect(removeChild).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
30
app/ui/app/src/utils/clipboard.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
export async function copyTextToClipboard(text: string): Promise<boolean> {
|
||||
try {
|
||||
await navigator.clipboard.writeText(text);
|
||||
return true;
|
||||
} catch (clipboardError) {
|
||||
console.error(
|
||||
"Clipboard API failed, falling back to execCommand",
|
||||
clipboardError,
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
const textarea = document.createElement("textarea");
|
||||
textarea.value = text;
|
||||
textarea.setAttribute("readonly", "true");
|
||||
textarea.style.position = "fixed";
|
||||
textarea.style.left = "-9999px";
|
||||
textarea.style.opacity = "0";
|
||||
document.body.appendChild(textarea);
|
||||
textarea.focus();
|
||||
textarea.select();
|
||||
|
||||
const copied = document.execCommand("copy");
|
||||
document.body.removeChild(textarea);
|
||||
return copied;
|
||||
} catch (fallbackError) {
|
||||
console.error("Fallback copy failed", fallbackError);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
99
app/ui/app/src/utils/fileValidation.test.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import { IMAGE_EXTENSIONS, validateFile } from "./fileValidation";
|
||||
|
||||
describe("fileValidation", () => {
|
||||
describe("IMAGE_EXTENSIONS", () => {
|
||||
it("should include all supported image formats including WebP", () => {
|
||||
expect(IMAGE_EXTENSIONS).toContain("png");
|
||||
expect(IMAGE_EXTENSIONS).toContain("jpg");
|
||||
expect(IMAGE_EXTENSIONS).toContain("jpeg");
|
||||
expect(IMAGE_EXTENSIONS).toContain("webp");
|
||||
});
|
||||
});
|
||||
|
||||
describe("validateFile", () => {
|
||||
const createMockFile = (
|
||||
name: string,
|
||||
size: number,
|
||||
type: string,
|
||||
): File => {
|
||||
const blob = new Blob(["test content"], { type });
|
||||
return new File([blob], name, { type });
|
||||
};
|
||||
|
||||
it("should accept WebP images when vision capability is enabled", () => {
|
||||
const file = createMockFile("test.webp", 1024, "image/webp");
|
||||
const result = validateFile(file, {
|
||||
hasVisionCapability: true,
|
||||
});
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it("should accept images regardless of vision capability", () => {
|
||||
// Vision capability check is handled at the UI layer (ChatForm),
|
||||
// not at validation time, so users can switch models without
|
||||
// needing to re-upload files.
|
||||
const file = createMockFile("test.webp", 1024, "image/webp");
|
||||
const result = validateFile(file, {
|
||||
hasVisionCapability: false,
|
||||
});
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it("should accept PNG images when vision capability is enabled", () => {
|
||||
const file = createMockFile("test.png", 1024, "image/png");
|
||||
const result = validateFile(file, {
|
||||
hasVisionCapability: true,
|
||||
});
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it("should accept JPEG images when vision capability is enabled", () => {
|
||||
const file = createMockFile("test.jpg", 1024, "image/jpeg");
|
||||
const result = validateFile(file, {
|
||||
hasVisionCapability: true,
|
||||
});
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it("should reject files that are too large", () => {
|
||||
// Create a file with size property set correctly
|
||||
const largeSize = 11 * 1024 * 1024; // 11MB
|
||||
const content = new Uint8Array(largeSize);
|
||||
const blob = new Blob([content], { type: "image/webp" });
|
||||
const file = new File([blob], "large.webp", { type: "image/webp" });
|
||||
|
||||
const result = validateFile(file, {
|
||||
hasVisionCapability: true,
|
||||
maxFileSize: 10, // 10MB limit
|
||||
});
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.error).toBe("File too large");
|
||||
});
|
||||
|
||||
it("should reject unsupported file types", () => {
|
||||
const file = createMockFile("test.xyz", 1024, "application/xyz");
|
||||
const result = validateFile(file, {
|
||||
hasVisionCapability: true,
|
||||
});
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.error).toBe("File type not supported");
|
||||
});
|
||||
|
||||
it("should respect custom validators", () => {
|
||||
const file = createMockFile("test.webp", 1024, "image/webp");
|
||||
const result = validateFile(file, {
|
||||
hasVisionCapability: true,
|
||||
customValidator: () => ({
|
||||
valid: false,
|
||||
error: "Custom error",
|
||||
}),
|
||||
});
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.error).toBe("Custom error");
|
||||
});
|
||||
});
|
||||
|
||||
// Note: processFiles tests are skipped because FileReader is not available in the Node.js test environment
|
||||
// These functions are tested in browser environment via integration tests
|
||||
});
|
||||
@@ -41,7 +41,7 @@ export const TEXT_FILE_EXTENSIONS = [
|
||||
"rtf",
|
||||
];
|
||||
|
||||
export const IMAGE_EXTENSIONS = ["png", "jpg", "jpeg"];
|
||||
export const IMAGE_EXTENSIONS = ["png", "jpg", "jpeg", "webp"];
|
||||
|
||||
export interface FileValidationOptions {
|
||||
maxFileSize?: number; // in MB
|
||||
@@ -63,7 +63,6 @@ export function validateFile(
|
||||
const {
|
||||
maxFileSize = 10,
|
||||
allowedExtensions = [...TEXT_FILE_EXTENSIONS, ...IMAGE_EXTENSIONS],
|
||||
hasVisionCapability = false,
|
||||
customValidator,
|
||||
} = options;
|
||||
|
||||
@@ -83,10 +82,6 @@ export function validateFile(
|
||||
return { valid: false, error: "File type not supported" };
|
||||
}
|
||||
|
||||
if (IMAGE_EXTENSIONS.includes(fileExtension) && !hasVisionCapability) {
|
||||
return { valid: false, error: "This model does not support images" };
|
||||
}
|
||||
|
||||
// File size validation
|
||||
if (file.size > MAX_FILE_SIZE) {
|
||||
return { valid: false, error: "File too large" };
|
||||
|
||||
@@ -41,14 +41,14 @@ describe("Model merging logic", () => {
|
||||
expect(merged.length).toBe(FEATURED_MODELS.length + 2);
|
||||
});
|
||||
|
||||
it("should hide cloud models in airplane mode", () => {
|
||||
it("should hide cloud models when cloud is disabled", () => {
|
||||
const localModels: Model[] = [
|
||||
new Model({ model: "gpt-oss:120b-cloud" }),
|
||||
new Model({ model: "llama3:latest" }),
|
||||
new Model({ model: "mistral:latest" }),
|
||||
];
|
||||
|
||||
const merged = mergeModels(localModels, true); // airplane mode = true
|
||||
const merged = mergeModels(localModels, true); // cloud disabled = true
|
||||
|
||||
// No cloud models should be present
|
||||
const cloudModels = merged.filter((m) => m.isCloud());
|
||||
|
||||
@@ -2,27 +2,28 @@ import { Model } from "@/gotypes";
|
||||
|
||||
// Featured models list (in priority order)
|
||||
export const FEATURED_MODELS = [
|
||||
"kimi-k2.5:cloud",
|
||||
"glm-5:cloud",
|
||||
"minimax-m2.7:cloud",
|
||||
"gemma4:31b-cloud",
|
||||
"qwen3.5:397b-cloud",
|
||||
"gpt-oss:120b-cloud",
|
||||
"gpt-oss:20b-cloud",
|
||||
"deepseek-v3.1:671b-cloud",
|
||||
"qwen3-coder:480b-cloud",
|
||||
"qwen3-vl:235b-cloud",
|
||||
"minimax-m2:cloud",
|
||||
"glm-4.6:cloud",
|
||||
"gpt-oss:120b",
|
||||
"gpt-oss:20b",
|
||||
"gemma3:27b",
|
||||
"gemma3:12b",
|
||||
"gemma3:4b",
|
||||
"gemma3:1b",
|
||||
"gemma4:31b",
|
||||
"gemma4:26b",
|
||||
"gemma4:e4b",
|
||||
"gemma4:e2b",
|
||||
"deepseek-r1:8b",
|
||||
"qwen3-coder:30b",
|
||||
"qwen3-vl:30b",
|
||||
"qwen3-vl:8b",
|
||||
"qwen3-vl:4b",
|
||||
"qwen3:30b",
|
||||
"qwen3:8b",
|
||||
"qwen3:4b",
|
||||
"qwen3.5:27b",
|
||||
"qwen3.5:9b",
|
||||
"qwen3.5:4b",
|
||||
];
|
||||
|
||||
function alphabeticalSort(a: Model, b: Model): number {
|
||||
@@ -32,7 +33,7 @@ function alphabeticalSort(a: Model, b: Model): number {
|
||||
//Merges models, sorting cloud models first, then other models
|
||||
export function mergeModels(
|
||||
localModels: Model[],
|
||||
airplaneMode: boolean = false,
|
||||
hideCloudModels: boolean = false,
|
||||
): Model[] {
|
||||
const allModels = (localModels || []).map((model) => model);
|
||||
|
||||
@@ -95,7 +96,7 @@ export function mergeModels(
|
||||
|
||||
remainingModels.sort(alphabeticalSort);
|
||||
|
||||
return airplaneMode
|
||||
return hideCloudModels
|
||||
? [...featuredModels, ...remainingModels]
|
||||
: [...cloudModels, ...featuredModels, ...remainingModels];
|
||||
}
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
import { remark } from "remark";
|
||||
import remarkStringify from "remark-stringify";
|
||||
import remarkStreamingMarkdown from "./remarkStreamingMarkdown";
|
||||
|
||||
/**
|
||||
* Process markdown content for streaming display using the remark plugin.
|
||||
* This is primarily used for testing the remark plugin with string inputs/outputs.
|
||||
*/
|
||||
export function processStreamingMarkdown(content: string): string {
|
||||
if (!content) return content;
|
||||
|
||||
const result = remark()
|
||||
.use(remarkStreamingMarkdown, { debug: false })
|
||||
.use(remarkStringify)
|
||||
.processSync(content);
|
||||
|
||||
// remove trailing newline to keep tests cleaner
|
||||
let output = result.toString();
|
||||
if (output.endsWith("\n")) {
|
||||
output = output.slice(0, -1);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
@@ -1,447 +0,0 @@
|
||||
import { parents, type Proxy } from "unist-util-parents";
|
||||
import type { Plugin } from "unified";
|
||||
import type {
|
||||
Emphasis,
|
||||
Node,
|
||||
Parent,
|
||||
Root,
|
||||
RootContent,
|
||||
Text,
|
||||
Strong,
|
||||
PhrasingContent,
|
||||
Paragraph,
|
||||
} from "mdast";
|
||||
import { u } from "unist-builder";
|
||||
|
||||
declare module "unist" {
|
||||
interface Node {
|
||||
/** Added by `unist-util-parents` (or your own walk). */
|
||||
parent?: Proxy & Parent;
|
||||
}
|
||||
}
|
||||
|
||||
// interface SimpleTextRule {
|
||||
// pattern: RegExp;
|
||||
// transform: (matches: RegExpExecArray[], lastNode: Proxy) => void;
|
||||
// }
|
||||
|
||||
// const simpleTextRules: SimpleTextRule[] = [
|
||||
// // TODO(drifkin): generalize this for `__`/`_`/`~~`/`~` etc.
|
||||
// {
|
||||
// pattern: /(\*\*)(?=\S|$)/g,
|
||||
// transform: (matchesIterator, lastNode) => {
|
||||
// const textNode = lastNode.node as Text;
|
||||
|
||||
// const matches = [...matchesIterator];
|
||||
// const lastMatch = matches[matches.length - 1];
|
||||
// const origValue = textNode.value;
|
||||
// const start = lastMatch.index;
|
||||
// const sep = lastMatch[1];
|
||||
|
||||
// const before = origValue.slice(0, start);
|
||||
// const after = origValue.slice(start + sep.length);
|
||||
|
||||
// if (lastNode.parent) {
|
||||
// const index = (lastNode.parent.node as Parent).children.indexOf(
|
||||
// lastNode.node as RootContent,
|
||||
// );
|
||||
// const shouldRemove = before.length === 0;
|
||||
// if (!shouldRemove) {
|
||||
// textNode.value = before;
|
||||
// }
|
||||
|
||||
// const newNode = u("strong", {
|
||||
// children: [u("text", { value: after })],
|
||||
// });
|
||||
// (lastNode.parent.node as Parent).children.splice(
|
||||
// index + (shouldRemove ? 0 : 1),
|
||||
// shouldRemove ? 1 : 0,
|
||||
// newNode,
|
||||
// );
|
||||
// }
|
||||
// },
|
||||
// },
|
||||
// ];
|
||||
|
||||
interface Options {
|
||||
debug?: boolean;
|
||||
onLastNode?: (info: LastNodeInfo) => void;
|
||||
}
|
||||
|
||||
export interface LastNodeInfo {
|
||||
path: string[];
|
||||
type: string;
|
||||
value?: string;
|
||||
lastChars?: string;
|
||||
fullNode: Node;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes `child` from `parent` in-place.
|
||||
* @returns `true` if the child was found and removed; `false` otherwise.
|
||||
*/
|
||||
export function removeChildFromParent(
|
||||
child: RootContent,
|
||||
parent: Node,
|
||||
): boolean {
|
||||
if (!isParent(parent)) return false; // parent isn’t a Parent → nothing to do
|
||||
|
||||
const idx = parent.children.indexOf(child);
|
||||
if (idx < 0) return false; // not a child → nothing to remove
|
||||
|
||||
parent.children.splice(idx, 1);
|
||||
return true; // removal successful
|
||||
}
|
||||
|
||||
/** Narrow a generic `Node` to a `Parent` (i.e. one that really has children). */
|
||||
function isParent(node: Node): node is Parent {
|
||||
// A `Parent` always has a `children` array; make sure it's an array first.
|
||||
return Array.isArray((node as Partial<Parent>).children);
|
||||
}
|
||||
|
||||
/**
|
||||
* Follow “last-child” pointers until you reach a leaf.
|
||||
* Returns the right-most, deepest node in source order.
|
||||
*/
|
||||
export function findRightmostDeepestNode(root: Node): Node {
|
||||
let current: Node = root;
|
||||
|
||||
// While the current node *is* a Parent and has at least one child…
|
||||
while (isParent(current) && current.children.length > 0) {
|
||||
const lastIndex = current.children.length - 1;
|
||||
current = current.children[lastIndex];
|
||||
}
|
||||
|
||||
return current; // Leaf: no further children
|
||||
}
|
||||
|
||||
const remarkStreamingMarkdown: Plugin<[Options?], Root> = () => {
|
||||
return (tree) => {
|
||||
const treeWithParents = parents(tree);
|
||||
const lastNode = findRightmostDeepestNode(treeWithParents) as Proxy;
|
||||
|
||||
const parentNode = lastNode.parent;
|
||||
const grandparentNode = parentNode?.parent;
|
||||
|
||||
let ruleMatched = false;
|
||||
|
||||
// handling `* *` -> ``
|
||||
//
|
||||
// if the last node is part of a <list item (otherwise empty)> ->
|
||||
// <list (otherwise empty)> -> <list item (last node, empty)>, then we need to
|
||||
// remove everything up to and including the first list item. This happens
|
||||
// when we have `* *`, which can become a bolded list item OR a horizontal
|
||||
// line
|
||||
if (
|
||||
lastNode.type === "listItem" &&
|
||||
parentNode &&
|
||||
grandparentNode &&
|
||||
parentNode.type === "list" &&
|
||||
grandparentNode.type === "listItem" &&
|
||||
parentNode.children.length === 1 &&
|
||||
grandparentNode.children.length === 1
|
||||
) {
|
||||
ruleMatched = true;
|
||||
if (grandparentNode.parent) {
|
||||
removeChildFromParent(
|
||||
grandparentNode.node as RootContent,
|
||||
grandparentNode.parent.node,
|
||||
);
|
||||
}
|
||||
// Handle `*` -> ``:
|
||||
//
|
||||
// if the last node is just an empty list item, we need to remove it
|
||||
// because it could become something else (e.g., a horizontal line)
|
||||
} else if (
|
||||
lastNode.type === "listItem" &&
|
||||
parentNode &&
|
||||
parentNode.type === "list"
|
||||
) {
|
||||
ruleMatched = true;
|
||||
removeChildFromParent(lastNode.node as RootContent, parentNode.node);
|
||||
} else if (lastNode.type === "thematicBreak") {
|
||||
ruleMatched = true;
|
||||
const parent = lastNode.parent;
|
||||
if (parent) {
|
||||
removeChildFromParent(lastNode.node as RootContent, parent.node);
|
||||
}
|
||||
} else if (lastNode.type === "text") {
|
||||
const textNode = lastNode.node as Text;
|
||||
if (textNode.value.endsWith("**")) {
|
||||
ruleMatched = true;
|
||||
textNode.value = textNode.value.slice(0, -2);
|
||||
// if there's a newline then a number, this is very very likely a
|
||||
// numbered list item. Let's just hide it until the period comes (or
|
||||
// other text disambiguates it)
|
||||
} else {
|
||||
const match = textNode.value.match(/^([0-9]+)$/m);
|
||||
if (match) {
|
||||
const number = match[1];
|
||||
textNode.value = textNode.value.slice(0, -number.length - 1);
|
||||
ruleMatched = true;
|
||||
// if the text node is now empty, then we might want to remove other
|
||||
// elements, like a now-empty containing paragraph, or a break that
|
||||
// might disappear once more tokens come in
|
||||
if (textNode.value.length === 0) {
|
||||
if (
|
||||
lastNode.parent?.type === "paragraph" &&
|
||||
lastNode.parent.children.length === 1
|
||||
) {
|
||||
// remove the whole paragraph if it's now empty (otherwise it'll
|
||||
// cause an extra newline that might not last)
|
||||
removeChildFromParent(
|
||||
lastNode.parent.node as Paragraph,
|
||||
lastNode.parent.parent?.node as Node,
|
||||
);
|
||||
} else {
|
||||
const prev = prevSibling(lastNode);
|
||||
if (prev?.type === "break") {
|
||||
removeChildFromParent(
|
||||
prev.node as RootContent,
|
||||
lastNode.parent?.node as Node,
|
||||
);
|
||||
removeChildFromParent(
|
||||
lastNode.node as RootContent,
|
||||
lastNode.parent?.node as Node,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ruleMatched) {
|
||||
return tree;
|
||||
}
|
||||
|
||||
// we need to
|
||||
// a case like
|
||||
// - *def `abc` [abc **def**](abc)*
|
||||
// is pretty tricky, because if we land just after def, then we actually
|
||||
// have two separate tags to process at two different parents. Maybe we
|
||||
// need to keep iterating up until we find a paragraph, but process each
|
||||
// parent on the way up. Hmm, well actually after `def` we won't even be a proper link yet
|
||||
// TODO(drifkin): it's really if the last node's parent is a paragraph, for which the following is a sub-cas where the lastNode is a text node.
|
||||
// And instead of just processing simple text rules, they need to operate on the whole paragraph
|
||||
// like `**[abc](def)` needs to become `**[abc](def)**`
|
||||
|
||||
// if we're just text at the end, then we should remove some ambiguous characters
|
||||
|
||||
if (lastNode.parent) {
|
||||
const didChange = processParent(lastNode.parent as Parent & Proxy);
|
||||
if (didChange) {
|
||||
// TODO(drifkin): need to fix up the tree, but not sure lastNode will still exist? Check all the transforms to see if it's safe to find the last node again
|
||||
//
|
||||
// need to regen the tree w/ parents since reparenting could've happened
|
||||
// treeWithParents = parents(tree);
|
||||
}
|
||||
}
|
||||
|
||||
const grandparent = lastNode.parent?.parent;
|
||||
// TODO(drifkin): let's go arbitrarily high up the tree, but limiting it
|
||||
// to 2 levels for now until I think more about the stop condition
|
||||
if (grandparent) {
|
||||
processParent(grandparent as Parent & Proxy);
|
||||
}
|
||||
|
||||
// console.log("ruleMatched", ruleMatched);
|
||||
|
||||
// } else if (lastNode.parent?.type === "paragraph") {
|
||||
// console.log("!!! paragraph");
|
||||
// console.log("lastNode.parent", lastNode.parent);
|
||||
|
||||
// // Handle `**abc*` -> `**abc**`:
|
||||
// // We detect this when the last child is an emphasis node, and it's preceded by a text node that ends with `*`
|
||||
// const paragraph = lastNode.parent as Proxy & Paragraph;
|
||||
// if (paragraph.children.length >= 2) {
|
||||
// const lastChild = paragraph.children[paragraph.children.length - 1];
|
||||
// if (lastChild.type === "emphasis") {
|
||||
// const sibling = paragraph.children[paragraph.children.length - 2];
|
||||
// if (sibling.type === "text") {
|
||||
// const siblingText = sibling as Text & Proxy;
|
||||
// if (siblingText.value.endsWith("*")) {
|
||||
// ruleMatched = true;
|
||||
// const textNode = (lastNode as Proxy).node as Text;
|
||||
// textNode.value = textNode.value.slice(0, -1);
|
||||
// paragraph.node.type = "strong";
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// } else if (lastNode.type === "text") {
|
||||
// // Handle `**abc*` -> `**abc**`:
|
||||
// //
|
||||
// // this gets parsed as a text node ending in `*` followed by an emphasis
|
||||
// // node. So if we're in text, we need to check if our parent is emphasis,
|
||||
// // and then get our parent's sibling before it and check if it ends with
|
||||
// // `*`
|
||||
// const parent = lastNode.parent;
|
||||
// if (parent && parent.type === "emphasis") {
|
||||
// const grandparent = parent.parent;
|
||||
// if (grandparent) {
|
||||
// const index = (grandparent.node as Parent).children.indexOf(
|
||||
// parent.node as RootContent,
|
||||
// );
|
||||
// if (index > 0) {
|
||||
// const prevNode = grandparent.children[index - 1];
|
||||
// if (
|
||||
// prevNode.type === "text" &&
|
||||
// (prevNode as Text).value.endsWith("*")
|
||||
// ) {
|
||||
// ruleMatched = true;
|
||||
// const textNode = (prevNode as Proxy).node as Text;
|
||||
// textNode.value = textNode.value.slice(0, -1);
|
||||
// parent.node.type = "strong";
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// if (!ruleMatched) {
|
||||
// // if the last node is just text, then we process it in order to fix up certain unclosed items
|
||||
// // e.g., `**abc` -> `**abc**`
|
||||
// const textNode = lastNode.node as Text;
|
||||
// for (const rule of simpleTextRules) {
|
||||
// const matchesIterator = textNode.value.matchAll(rule.pattern);
|
||||
// const matches = [...matchesIterator];
|
||||
// if (matches.length > 0) {
|
||||
// rule.transform(matches, lastNode);
|
||||
// ruleMatched = true;
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// } else if (!ruleMatched) {
|
||||
// // console.log("no rule matched", lastNode);
|
||||
// }
|
||||
|
||||
return tree;
|
||||
};
|
||||
};
|
||||
|
||||
function processParent(parent: Parent & Proxy): boolean {
|
||||
if (parent.type === "emphasis") {
|
||||
// Handle `**abc*` -> `**abc**`:
|
||||
// We detect this when we end with an emphasis node, and it's preceded by
|
||||
// a text node that ends with `*`
|
||||
// TODO(drifkin): the last node can be more deeply nested (e.g., a code
|
||||
// literal in a link), so we probably need to walk up the tree until we
|
||||
// find an emphasis node or a block? For now we'll just go up one layer to
|
||||
// catch the most common cases
|
||||
const emphasisNode = parent as Emphasis & Proxy;
|
||||
const grandparent = emphasisNode.parent;
|
||||
if (grandparent) {
|
||||
const indexOfEmphasisNode = (grandparent.node as Parent).children.indexOf(
|
||||
emphasisNode.node as RootContent,
|
||||
);
|
||||
if (indexOfEmphasisNode >= 0) {
|
||||
const nodeBefore = grandparent.children[indexOfEmphasisNode - 1] as
|
||||
| (Node & Proxy)
|
||||
| undefined;
|
||||
if (nodeBefore?.type === "text") {
|
||||
const textNode = nodeBefore.node as Text;
|
||||
if (textNode.value.endsWith("*")) {
|
||||
const strBefore = textNode.value.slice(0, -1);
|
||||
textNode.value = strBefore;
|
||||
const strongNode = u("strong", {
|
||||
children: emphasisNode.children,
|
||||
});
|
||||
(grandparent.node as Parent).children.splice(
|
||||
indexOfEmphasisNode,
|
||||
1,
|
||||
strongNode,
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Let's check if we have any bold items to close
|
||||
for (let i = parent.children.length - 1; i >= 0; i--) {
|
||||
const child = parent.children[i];
|
||||
if (child.type === "text") {
|
||||
const textNode = child as Text & Proxy;
|
||||
const sep = "**";
|
||||
const index = textNode.value.lastIndexOf(sep);
|
||||
if (index >= 0) {
|
||||
let isValidOpening = false;
|
||||
if (index + sep.length < textNode.value.length) {
|
||||
const charAfter = textNode.value[index + sep.length];
|
||||
if (!isWhitespace(charAfter)) {
|
||||
isValidOpening = true;
|
||||
}
|
||||
} else {
|
||||
if (i < parent.children.length - 1) {
|
||||
// TODO(drifkin): I'm not sure that this check is strict enough.
|
||||
// We're trying to detect cases like `**[abc]()` where the char
|
||||
// after the opening ** is indeed a non-whitespace character. We're
|
||||
// using the heuristic that there's another item after the current
|
||||
// one, but I'm not sure if that is good enough. In a well
|
||||
// constructed tree, there aren't two text nodes in a row, so this
|
||||
// _seems_ good, but I should think through it more
|
||||
isValidOpening = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (isValidOpening) {
|
||||
// TODO(drifkin): close the bold
|
||||
const strBefore = textNode.value.slice(0, index);
|
||||
const strAfter = textNode.value.slice(index + sep.length);
|
||||
(textNode.node as Text).value = strBefore;
|
||||
// TODO(drifkin): the node above could be empty in which case we probably want to delete it
|
||||
const children: PhrasingContent[] = [
|
||||
...(strAfter.length > 0 ? [u("text", { value: strAfter })] : []),
|
||||
];
|
||||
const strongNode: Strong = u("strong", {
|
||||
children,
|
||||
});
|
||||
const nodesAfter = (parent.node as Parent).children.splice(
|
||||
i + 1,
|
||||
parent.children.length - i - 1,
|
||||
strongNode,
|
||||
);
|
||||
// TODO(drifkin): this cast seems iffy, should see if we can cast the
|
||||
// parent instead, which would also help us check some of our
|
||||
// assumptions
|
||||
strongNode.children.push(...(nodesAfter as PhrasingContent[]));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function prevSibling(node: Node & Proxy): (Node & Proxy) | null {
|
||||
const parent = node.parent;
|
||||
if (parent) {
|
||||
const index = parent.children.indexOf(node);
|
||||
return parent.children[index - 1] as Node & Proxy;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function isWhitespace(str: string) {
|
||||
return str.trim() === "";
|
||||
}
|
||||
|
||||
// function debugPrintTreeNoPos(tree: Node) {
|
||||
// console.log(
|
||||
// JSON.stringify(
|
||||
// tree,
|
||||
// (key, value) => {
|
||||
// if (key === "position") {
|
||||
// return undefined;
|
||||
// }
|
||||
// return value;
|
||||
// },
|
||||
// 2,
|
||||
// ),
|
||||
// );
|
||||
// }
|
||||
|
||||
export default remarkStreamingMarkdown;
|
||||
@@ -45,7 +45,8 @@ type InferenceCompute struct {
|
||||
}
|
||||
|
||||
type InferenceComputeResponse struct {
|
||||
InferenceComputes []InferenceCompute `json:"inferenceComputes"`
|
||||
InferenceComputes []InferenceCompute `json:"inferenceComputes"`
|
||||
DefaultContextLength int `json:"defaultContextLength"`
|
||||
}
|
||||
|
||||
type ModelCapabilitiesResponse struct {
|
||||
@@ -101,15 +102,14 @@ type HealthResponse struct {
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
AvatarURL string `json:"avatarURL"`
|
||||
Plan string `json:"plan"`
|
||||
Bio string `json:"bio"`
|
||||
FirstName string `json:"firstName"`
|
||||
LastName string `json:"lastName"`
|
||||
OverThreshold bool `json:"overThreshold"`
|
||||
ID string `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
Bio string `json:"bio,omitempty"`
|
||||
AvatarURL string `json:"avatarurl,omitempty"`
|
||||
FirstName string `json:"firstname,omitempty"`
|
||||
LastName string `json:"lastname,omitempty"`
|
||||
Plan string `json:"plan,omitempty"`
|
||||
}
|
||||
|
||||
type Attachment struct {
|
||||
@@ -133,9 +133,8 @@ type Error struct {
|
||||
}
|
||||
|
||||
type ModelUpstreamResponse struct {
|
||||
Digest string `json:"digest,omitempty"`
|
||||
PushTime int64 `json:"pushTime"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Stale bool `json:"stale"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Serializable data for the browser state
|
||||
|
||||
361
app/ui/ui.go
@@ -12,26 +12,27 @@ import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/app/auth"
|
||||
"github.com/ollama/ollama/app/server"
|
||||
"github.com/ollama/ollama/app/store"
|
||||
"github.com/ollama/ollama/app/tools"
|
||||
"github.com/ollama/ollama/app/types/not"
|
||||
"github.com/ollama/ollama/app/ui/responses"
|
||||
"github.com/ollama/ollama/app/updater"
|
||||
"github.com/ollama/ollama/app/version"
|
||||
ollamaAuth "github.com/ollama/ollama/auth"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/manifest"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
_ "github.com/tkrajina/typescriptify-golang-structs/typescriptify"
|
||||
)
|
||||
@@ -107,6 +108,10 @@ type Server struct {
|
||||
|
||||
// Dev is true if the server is running in development mode
|
||||
Dev bool
|
||||
|
||||
// Updater for checking and downloading updates
|
||||
Updater *updater.Updater
|
||||
UpdateAvailableFunc func()
|
||||
}
|
||||
|
||||
func (s *Server) log() *slog.Logger {
|
||||
@@ -118,40 +123,66 @@ func (s *Server) log() *slog.Logger {
|
||||
|
||||
// ollamaProxy creates a reverse proxy handler to the Ollama server
|
||||
func (s *Server) ollamaProxy() http.Handler {
|
||||
ollamaHost := os.Getenv("OLLAMA_HOST")
|
||||
if ollamaHost == "" {
|
||||
ollamaHost = "http://127.0.0.1:11434"
|
||||
}
|
||||
var (
|
||||
proxy http.Handler
|
||||
proxyMu sync.Mutex
|
||||
)
|
||||
|
||||
if !strings.HasPrefix(ollamaHost, "http://") && !strings.HasPrefix(ollamaHost, "https://") {
|
||||
ollamaHost = "http://" + ollamaHost
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
proxyMu.Lock()
|
||||
p := proxy
|
||||
proxyMu.Unlock()
|
||||
|
||||
target, err := url.Parse(ollamaHost)
|
||||
if err != nil {
|
||||
s.log().Error("failed to parse OLLAMA_HOST", "error", err, "host", ollamaHost)
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "failed to configure proxy", http.StatusInternalServerError)
|
||||
})
|
||||
}
|
||||
if p == nil {
|
||||
proxyMu.Lock()
|
||||
if proxy == nil {
|
||||
var err error
|
||||
for i := range 2 {
|
||||
if i > 0 {
|
||||
s.log().Warn("ollama server not ready, retrying", "attempt", i+1)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
s.log().Info("configuring ollama proxy", "target", target.String())
|
||||
err = WaitForServer(context.Background(), 10*time.Second)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
proxy := httputil.NewSingleHostReverseProxy(target)
|
||||
if err != nil {
|
||||
proxyMu.Unlock()
|
||||
s.log().Error("ollama server not ready after retries", "error", err)
|
||||
http.Error(w, "Ollama server is not ready", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
originalDirector := proxy.Director
|
||||
proxy.Director = func(req *http.Request) {
|
||||
originalDirector(req)
|
||||
req.Host = target.Host
|
||||
s.log().Debug("proxying request", "method", req.Method, "path", req.URL.Path, "target", target.Host)
|
||||
}
|
||||
target := envconfig.ConnectableHost()
|
||||
s.log().Info("configuring ollama proxy", "target", target.String())
|
||||
|
||||
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
|
||||
s.log().Error("proxy error", "error", err, "path", r.URL.Path, "target", target.String())
|
||||
http.Error(w, "proxy error: "+err.Error(), http.StatusBadGateway)
|
||||
}
|
||||
newProxy := httputil.NewSingleHostReverseProxy(target)
|
||||
|
||||
return proxy
|
||||
originalDirector := newProxy.Director
|
||||
newProxy.Director = func(req *http.Request) {
|
||||
originalDirector(req)
|
||||
req.Host = target.Host
|
||||
s.log().Debug("proxying request", "method", req.Method, "path", req.URL.Path, "target", target.Host)
|
||||
}
|
||||
|
||||
newProxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
|
||||
s.log().Error("proxy error", "error", err, "path", r.URL.Path, "target", target.String())
|
||||
http.Error(w, "proxy error: "+err.Error(), http.StatusBadGateway)
|
||||
}
|
||||
|
||||
proxy = newProxy
|
||||
p = newProxy
|
||||
} else {
|
||||
p = proxy
|
||||
}
|
||||
proxyMu.Unlock()
|
||||
}
|
||||
|
||||
p.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
type errHandlerFunc func(http.ResponseWriter, *http.Request) error
|
||||
@@ -163,7 +194,7 @@ func (s *Server) Handler() http.Handler {
|
||||
if CORS() {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-Requested-With")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, User-Agent, Accept, X-Requested-With")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
|
||||
// Handle preflight requests
|
||||
@@ -259,16 +290,18 @@ func (s *Server) Handler() http.Handler {
|
||||
mux.Handle("POST /api/v1/model/upstream", handle(s.modelUpstream))
|
||||
mux.Handle("GET /api/v1/settings", handle(s.getSettings))
|
||||
mux.Handle("POST /api/v1/settings", handle(s.settings))
|
||||
mux.Handle("GET /api/v1/cloud", handle(s.getCloudSetting))
|
||||
mux.Handle("POST /api/v1/cloud", handle(s.cloudSetting))
|
||||
|
||||
// Ollama proxy endpoints
|
||||
ollamaProxy := s.ollamaProxy()
|
||||
mux.Handle("GET /api/tags", ollamaProxy)
|
||||
mux.Handle("POST /api/show", ollamaProxy)
|
||||
|
||||
mux.Handle("GET /api/v1/me", handle(s.me))
|
||||
mux.Handle("POST /api/v1/disconnect", handle(s.disconnect))
|
||||
mux.Handle("GET /api/v1/connect", handle(s.connectURL))
|
||||
mux.Handle("GET /api/v1/health", handle(s.health))
|
||||
mux.Handle("GET /api/version", ollamaProxy)
|
||||
mux.Handle("GET /api/status", ollamaProxy)
|
||||
mux.Handle("HEAD /api/version", ollamaProxy)
|
||||
mux.Handle("POST /api/me", ollamaProxy)
|
||||
mux.Handle("POST /api/signout", ollamaProxy)
|
||||
|
||||
// React app - catch all non-API routes and serve the React app
|
||||
mux.Handle("GET /", s.appHandler())
|
||||
@@ -286,7 +319,7 @@ func (s *Server) handleError(w http.ResponseWriter, e error) {
|
||||
if CORS() {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-Requested-With")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, User-Agent, Accept, X-Requested-With")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
|
||||
@@ -309,8 +342,18 @@ func (t *userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error
|
||||
|
||||
// httpClient returns an HTTP client that automatically adds the User-Agent header
|
||||
func (s *Server) httpClient() *http.Client {
|
||||
return userAgentHTTPClient(10 * time.Second)
|
||||
}
|
||||
|
||||
// inferenceClient uses almost the same HTTP client, but without a timeout so
|
||||
// long requests aren't truncated
|
||||
func (s *Server) inferenceClient() *api.Client {
|
||||
return api.NewClient(envconfig.Host(), userAgentHTTPClient(0))
|
||||
}
|
||||
|
||||
func userAgentHTTPClient(timeout time.Duration) *http.Client {
|
||||
return &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
Timeout: timeout,
|
||||
Transport: &userAgentTransport{
|
||||
base: http.DefaultTransport,
|
||||
},
|
||||
@@ -338,7 +381,7 @@ func (s *Server) doSelfSigned(ctx context.Context, method, path string) (*http.R
|
||||
}
|
||||
|
||||
// UserData fetches user data from ollama.com API for the current ollama key
|
||||
func (s *Server) UserData(ctx context.Context) (*responses.User, error) {
|
||||
func (s *Server) UserData(ctx context.Context) (*api.UserResponse, error) {
|
||||
resp, err := s.doSelfSigned(ctx, http.MethodPost, "/api/me")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to call ollama.com/api/me: %w", err)
|
||||
@@ -349,7 +392,7 @@ func (s *Server) UserData(ctx context.Context) (*responses.User, error) {
|
||||
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var user responses.User
|
||||
var user api.UserResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&user); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse user response: %w", err)
|
||||
}
|
||||
@@ -368,29 +411,27 @@ func (s *Server) UserData(ctx context.Context) (*responses.User, error) {
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
func waitForServer(ctx context.Context) error {
|
||||
timeout := time.Now().Add(10 * time.Second)
|
||||
// TODO: this avoids an error on first load of the app
|
||||
// however we should either show a loading state or
|
||||
// wait for the Ollama server to be ready before redirecting
|
||||
for {
|
||||
// WaitForServer waits for the Ollama server to be ready
|
||||
func WaitForServer(ctx context.Context, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
c, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := c.Version(ctx); err == nil {
|
||||
break
|
||||
}
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("timeout waiting for Ollama server to be ready")
|
||||
slog.Debug("ollama server is ready")
|
||||
return nil
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
return errors.New("timeout waiting for Ollama server to be ready")
|
||||
}
|
||||
|
||||
func (s *Server) createChat(w http.ResponseWriter, r *http.Request) error {
|
||||
waitForServer(r.Context())
|
||||
if err := WaitForServer(r.Context(), 10*time.Second); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := uuid.NewV7()
|
||||
if err != nil {
|
||||
@@ -690,11 +731,7 @@ func (s *Server) chat(w http.ResponseWriter, r *http.Request) error {
|
||||
_, cancelLoading := context.WithCancel(ctx)
|
||||
loading := false
|
||||
|
||||
c, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
cancelLoading()
|
||||
return err
|
||||
}
|
||||
c := s.inferenceClient()
|
||||
|
||||
// Check if the model exists locally by trying to show it
|
||||
// TODO (jmorganca): skip this round trip and instead just act
|
||||
@@ -804,8 +841,9 @@ func (s *Server) chat(w http.ResponseWriter, r *http.Request) error {
|
||||
|
||||
if !hasAttachments {
|
||||
WebSearchEnabled := req.WebSearch != nil && *req.WebSearch
|
||||
hasToolsCapability := slices.Contains(details.Capabilities, model.CapabilityTools)
|
||||
|
||||
if WebSearchEnabled {
|
||||
if WebSearchEnabled && hasToolsCapability {
|
||||
if supportsBrowserTools(req.Model) {
|
||||
browserState, ok := s.browserState(chat)
|
||||
if !ok {
|
||||
@@ -815,7 +853,7 @@ func (s *Server) chat(w http.ResponseWriter, r *http.Request) error {
|
||||
registry.Register(tools.NewBrowserSearch(browser))
|
||||
registry.Register(tools.NewBrowserOpen(browser))
|
||||
registry.Register(tools.NewBrowserFind(browser))
|
||||
} else if supportsWebSearchTools(req.Model) {
|
||||
} else {
|
||||
registry.Register(&tools.WebSearch{})
|
||||
registry.Register(&tools.WebFetch{})
|
||||
}
|
||||
@@ -975,7 +1013,7 @@ func (s *Server) chat(w http.ResponseWriter, r *http.Request) error {
|
||||
for _, toolCall := range res.Message.ToolCalls {
|
||||
// continues loop as tools were executed
|
||||
toolsExecuted = true
|
||||
result, content, err := registry.Execute(ctx, toolCall.Function.Name, toolCall.Function.Arguments)
|
||||
result, content, err := registry.Execute(ctx, toolCall.Function.Name, toolCall.Function.Arguments.ToMap())
|
||||
if err != nil {
|
||||
errContent := fmt.Sprintf("Error: %v", err)
|
||||
toolErrMsg := store.NewMessage("tool", errContent, nil)
|
||||
@@ -1395,11 +1433,6 @@ func (s *Server) getSettings(w http.ResponseWriter, r *http.Request) error {
|
||||
settings.Models = envconfig.Models()
|
||||
}
|
||||
|
||||
// set default context length if not set
|
||||
if settings.ContextLength == 0 {
|
||||
settings.ContextLength = 4096
|
||||
}
|
||||
|
||||
// Include current runtime settings
|
||||
settings.Agent = s.Agent
|
||||
settings.Tools = s.Tools
|
||||
@@ -1426,6 +1459,24 @@ func (s *Server) settings(w http.ResponseWriter, r *http.Request) error {
|
||||
return fmt.Errorf("failed to save settings: %w", err)
|
||||
}
|
||||
|
||||
// Handle auto-update toggle changes
|
||||
if old.AutoUpdateEnabled != settings.AutoUpdateEnabled {
|
||||
if !settings.AutoUpdateEnabled {
|
||||
// Auto-update disabled: cancel any ongoing download
|
||||
if s.Updater != nil {
|
||||
s.Updater.CancelOngoingDownload()
|
||||
}
|
||||
} else {
|
||||
// Auto-update re-enabled: show notification if update is already staged, or trigger immediate check
|
||||
if (updater.IsUpdatePending() || updater.UpdateDownloaded) && s.UpdateAvailableFunc != nil {
|
||||
s.UpdateAvailableFunc()
|
||||
} else if s.Updater != nil {
|
||||
// Trigger the background checker to run immediately
|
||||
s.Updater.TriggerImmediateCheck()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if old.ContextLength != settings.ContextLength ||
|
||||
old.Models != settings.Models ||
|
||||
old.Expose != settings.Expose {
|
||||
@@ -1438,140 +1489,51 @@ func (s *Server) settings(w http.ResponseWriter, r *http.Request) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) me(w http.ResponseWriter, r *http.Request) error {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return nil
|
||||
func (s *Server) cloudSetting(w http.ResponseWriter, r *http.Request) error {
|
||||
var req struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
return fmt.Errorf("invalid request body: %w", err)
|
||||
}
|
||||
|
||||
user, err := s.UserData(r.Context())
|
||||
if err != nil {
|
||||
// If fetching from API fails, try to return cached user data if available
|
||||
if cachedUser, cacheErr := s.Store.User(); cacheErr == nil && cachedUser != nil {
|
||||
s.log().Info("API request failed, returning cached user data", "error", err)
|
||||
responseUser := &responses.User{
|
||||
Name: cachedUser.Name,
|
||||
Email: cachedUser.Email,
|
||||
Plan: cachedUser.Plan,
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return json.NewEncoder(w).Encode(responseUser)
|
||||
}
|
||||
|
||||
s.log().Error("failed to get user data", "error", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return json.NewEncoder(w).Encode(responses.Error{
|
||||
Error: "failed to get user data",
|
||||
})
|
||||
if err := s.Store.SetCloudEnabled(req.Enabled); err != nil {
|
||||
return fmt.Errorf("failed to persist cloud setting: %w", err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return json.NewEncoder(w).Encode(user)
|
||||
s.Restart()
|
||||
|
||||
return s.writeCloudStatus(w)
|
||||
}
|
||||
|
||||
func (s *Server) disconnect(w http.ResponseWriter, r *http.Request) error {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.Store.ClearUser(); err != nil {
|
||||
s.log().Warn("failed to clear cached user data", "error", err)
|
||||
}
|
||||
|
||||
// Get the SSH public key to encode for the delete request
|
||||
pubKey, err := ollamaAuth.GetPublicKey()
|
||||
if err != nil {
|
||||
s.log().Error("failed to get public key", "error", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return json.NewEncoder(w).Encode(responses.Error{
|
||||
Error: "failed to get public key",
|
||||
})
|
||||
}
|
||||
|
||||
// Encode the key using base64 URL encoding
|
||||
encodedKey := base64.RawURLEncoding.EncodeToString([]byte(pubKey))
|
||||
|
||||
// Call the /api/user/keys/{encodedKey} endpoint with DELETE
|
||||
resp, err := s.doSelfSigned(r.Context(), http.MethodDelete, fmt.Sprintf("/api/user/keys/%s", encodedKey))
|
||||
if err != nil {
|
||||
s.log().Error("failed to call ollama.com/api/user/keys", "error", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return json.NewEncoder(w).Encode(responses.Error{
|
||||
Error: "failed to disconnect from ollama.com",
|
||||
})
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
s.log().Error("disconnect request failed", "status", resp.StatusCode)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return json.NewEncoder(w).Encode(responses.Error{
|
||||
Error: "failed to disconnect from ollama.com",
|
||||
})
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return json.NewEncoder(w).Encode(map[string]string{"status": "disconnected"})
|
||||
func (s *Server) getCloudSetting(w http.ResponseWriter, r *http.Request) error {
|
||||
return s.writeCloudStatus(w)
|
||||
}
|
||||
|
||||
func (s *Server) connectURL(w http.ResponseWriter, r *http.Request) error {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return nil
|
||||
}
|
||||
|
||||
connectURL, err := auth.BuildConnectURL(OllamaDotCom)
|
||||
func (s *Server) writeCloudStatus(w http.ResponseWriter) error {
|
||||
disabled, source, err := s.Store.CloudStatus()
|
||||
if err != nil {
|
||||
s.log().Error("failed to build connect URL", "error", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return json.NewEncoder(w).Encode(responses.Error{
|
||||
Error: "failed to build connect URL",
|
||||
})
|
||||
return fmt.Errorf("failed to load cloud status: %w", err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return json.NewEncoder(w).Encode(map[string]string{
|
||||
"connect_url": connectURL,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) health(w http.ResponseWriter, r *http.Request) error {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return nil
|
||||
}
|
||||
|
||||
healthy := false
|
||||
c, err := api.ClientFromEnvironment()
|
||||
if err == nil {
|
||||
if _, err := c.Version(r.Context()); err == nil {
|
||||
healthy = true
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return json.NewEncoder(w).Encode(responses.HealthResponse{
|
||||
Healthy: healthy,
|
||||
return json.NewEncoder(w).Encode(map[string]any{
|
||||
"disabled": disabled,
|
||||
"source": source,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) getInferenceCompute(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 500*time.Millisecond)
|
||||
defer cancel()
|
||||
serverInferenceComputes, err := server.GetInferenceComputer(ctx)
|
||||
info, err := server.GetInferenceInfo(ctx)
|
||||
if err != nil {
|
||||
s.log().Error("failed to get inference compute", "error", err)
|
||||
return fmt.Errorf("failed to get inference compute: %w", err)
|
||||
s.log().Error("failed to get inference info", "error", err)
|
||||
return fmt.Errorf("failed to get inference info: %w", err)
|
||||
}
|
||||
|
||||
inferenceComputes := make([]responses.InferenceCompute, len(serverInferenceComputes))
|
||||
for i, ic := range serverInferenceComputes {
|
||||
inferenceComputes := make([]responses.InferenceCompute, len(info.Computes))
|
||||
for i, ic := range info.Computes {
|
||||
inferenceComputes[i] = responses.InferenceCompute{
|
||||
Library: ic.Library,
|
||||
Variant: ic.Variant,
|
||||
@@ -1583,7 +1545,8 @@ func (s *Server) getInferenceCompute(w http.ResponseWriter, r *http.Request) err
|
||||
}
|
||||
|
||||
response := responses.InferenceComputeResponse{
|
||||
InferenceComputes: inferenceComputes,
|
||||
InferenceComputes: inferenceComputes,
|
||||
DefaultContextLength: info.DefaultContextLength,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -1616,9 +1579,18 @@ func (s *Server) modelUpstream(w http.ResponseWriter, r *http.Request) error {
|
||||
return json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
n := model.ParseName(req.Model)
|
||||
stale := true
|
||||
if m, err := manifest.ParseNamedManifest(n); err == nil {
|
||||
if m.Digest() == digest {
|
||||
stale = false
|
||||
} else if pushTime > 0 && m.FileInfo().ModTime().Unix() >= pushTime {
|
||||
stale = false
|
||||
}
|
||||
}
|
||||
|
||||
response := responses.ModelUpstreamResponse{
|
||||
Digest: digest,
|
||||
PushTime: pushTime,
|
||||
Stale: stale,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -1659,13 +1631,13 @@ func convertToOllamaTool(toolSchema map[string]any) api.Tool {
|
||||
|
||||
tool.Function.Parameters.Type = "object"
|
||||
tool.Function.Parameters.Required = []string{}
|
||||
tool.Function.Parameters.Properties = make(map[string]api.ToolProperty)
|
||||
tool.Function.Parameters.Properties = api.NewToolPropertiesMap()
|
||||
|
||||
if schemaProps, ok := toolSchema["schema"].(map[string]any); ok {
|
||||
tool.Function.Parameters.Type = getStringFromMap(schemaProps, "type", "object")
|
||||
|
||||
if props, ok := schemaProps["properties"].(map[string]any); ok {
|
||||
tool.Function.Parameters.Properties = make(map[string]api.ToolProperty)
|
||||
tool.Function.Parameters.Properties = api.NewToolPropertiesMap()
|
||||
|
||||
for propName, propDef := range props {
|
||||
if propMap, ok := propDef.(map[string]any); ok {
|
||||
@@ -1673,7 +1645,7 @@ func convertToOllamaTool(toolSchema map[string]any) api.Tool {
|
||||
Type: api.PropertyType{getStringFromMap(propMap, "type", "string")},
|
||||
Description: getStringFromMap(propMap, "description", ""),
|
||||
}
|
||||
tool.Function.Parameters.Properties[propName] = prop
|
||||
tool.Function.Parameters.Properties.Set(propName, prop)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1705,7 +1677,7 @@ func getStringFromMap(m map[string]any, key, defaultValue string) string {
|
||||
// isImageAttachment checks if a filename is an image file
|
||||
func isImageAttachment(filename string) bool {
|
||||
ext := strings.ToLower(filename)
|
||||
return strings.HasSuffix(ext, ".png") || strings.HasSuffix(ext, ".jpg") || strings.HasSuffix(ext, ".jpeg")
|
||||
return strings.HasSuffix(ext, ".png") || strings.HasSuffix(ext, ".jpg") || strings.HasSuffix(ext, ".jpeg") || strings.HasSuffix(ext, ".webp")
|
||||
}
|
||||
|
||||
// ptr is a convenience function for &literal
|
||||
@@ -1716,18 +1688,6 @@ func supportsBrowserTools(model string) bool {
|
||||
return strings.HasPrefix(strings.ToLower(model), "gpt-oss")
|
||||
}
|
||||
|
||||
// Web search tools are simpler, providing only basic web search and fetch capabilities (e.g., "web_search", "web_fetch") without simulating a browser. Currently only qwen3 and deepseek-v3 support web search tools.
|
||||
func supportsWebSearchTools(model string) bool {
|
||||
model = strings.ToLower(model)
|
||||
prefixes := []string{"qwen3", "deepseek-v3"}
|
||||
for _, p := range prefixes {
|
||||
if strings.HasPrefix(model, p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// buildChatRequest converts store.Chat to api.ChatRequest
|
||||
func (s *Server) buildChatRequest(chat *store.Chat, model string, think any, availableTools []map[string]any) (*api.ChatRequest, error) {
|
||||
var msgs []api.Message
|
||||
@@ -1794,13 +1754,14 @@ func (s *Server) buildChatRequest(chat *store.Chat, model string, think any, ava
|
||||
|
||||
var thinkValue *api.ThinkValue
|
||||
if think != nil {
|
||||
// Only set Think if it's actually requesting thinking
|
||||
if boolValue, ok := think.(bool); ok {
|
||||
thinkValue = &api.ThinkValue{
|
||||
Value: boolValue,
|
||||
if boolValue {
|
||||
thinkValue = &api.ThinkValue{Value: boolValue}
|
||||
}
|
||||
} else if stringValue, ok := think.(string); ok {
|
||||
thinkValue = &api.ThinkValue{
|
||||
Value: stringValue,
|
||||
if stringValue != "" && stringValue != "none" {
|
||||
thinkValue = &api.ThinkValue{Value: stringValue}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||