Compare commits
1660 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f4e62ad5b | ||
|
|
2123569598 | ||
|
|
3ecff4bce7 | ||
|
|
4cd8d0e97c | ||
|
|
200f9fb481 | ||
|
|
1ab62ad339 | ||
|
|
7cb8b96ed5 | ||
|
|
b9827529aa | ||
|
|
8ca9e8ff28 | ||
|
|
ede538bfe2 | ||
|
|
2c146aff74 | ||
|
|
703d0f6aa7 | ||
|
|
8e2515f768 | ||
|
|
d9323ea3f6 | ||
|
|
24faba561b | ||
|
|
cbc28d7296 | ||
|
|
f9beb142c8 | ||
|
|
781b782f73 | ||
|
|
160f0b8af4 | ||
|
|
26672ffeda | ||
|
|
0078cdc724 | ||
|
|
9c0820c94a | ||
|
|
cf049511cf | ||
|
|
c52e89d92a | ||
|
|
2c7feae7bb | ||
|
|
52f2a2a046 | ||
|
|
9bf0612e3d | ||
|
|
7da7636abc | ||
|
|
326a9652aa | ||
|
|
b6e1f208e9 | ||
|
|
4815a6c92e | ||
|
|
448e79576c | ||
|
|
3902f6d235 | ||
|
|
72a6878287 | ||
|
|
5eb37ea2e3 | ||
|
|
f29ab1e67f | ||
|
|
341ee9db44 | ||
|
|
2f2221ca47 | ||
|
|
6d128f24f0 | ||
|
|
8721aad18d | ||
|
|
13377191e7 | ||
|
|
110b5694da | ||
|
|
da1c1a225c | ||
|
|
5eb025d9dc | ||
|
|
6003d8dd83 | ||
|
|
b0f5fb2d3e | ||
|
|
6dd066ca72 | ||
|
|
c0dafb95cc | ||
|
|
487235259e | ||
|
|
a592b1cd3f | ||
|
|
6011bac07f | ||
|
|
ae9f4d208e | ||
|
|
78290d360f | ||
|
|
966c582a2a | ||
|
|
bfb76faadb | ||
|
|
40263dd403 | ||
|
|
825e4976ae | ||
|
|
253d4b9b9a | ||
|
|
07b7c966ed | ||
|
|
d78b14721c | ||
|
|
5ad4d520dd | ||
|
|
d879e86cab | ||
|
|
cd54eeb137 | ||
|
|
021d635af0 | ||
|
|
f40d415f07 | ||
|
|
7198654611 | ||
|
|
818f87b5e4 | ||
|
|
c6d91c3e3d | ||
|
|
5c74a28de9 | ||
|
|
e48f4a24c9 | ||
|
|
279fe9c659 | ||
|
|
bab93690fc | ||
|
|
54bed50713 | ||
|
|
caf502d141 | ||
|
|
76e82a9b8f | ||
|
|
574ea4f4d5 | ||
|
|
cf1c853b19 | ||
|
|
fb8fbe96e4 | ||
|
|
7c89f88513 | ||
|
|
1fa714ee71 | ||
|
|
b2fabe211b | ||
|
|
d5edb08b24 | ||
|
|
051c599bb5 | ||
|
|
cd13aee97d | ||
|
|
8146279bbe | ||
|
|
c49c6923a1 | ||
|
|
041e964b30 | ||
|
|
f75ae3ea5f | ||
|
|
0522a42ab7 | ||
|
|
5460b93ba2 | ||
|
|
ab650253d1 | ||
|
|
670131c886 | ||
|
|
2756996125 | ||
|
|
f04a2bfcc7 | ||
|
|
9c5aa5bf2f | ||
|
|
36956520ea | ||
|
|
4f67f4a841 | ||
|
|
e042a76f11 | ||
|
|
13628c5f9a | ||
|
|
5afbae56fc | ||
|
|
2504ea3393 | ||
|
|
07906751c1 | ||
|
|
b7a249f46a | ||
|
|
cbfbbe5e6b | ||
|
|
845adfbc0b | ||
|
|
53ff5040e2 | ||
|
|
77914386b0 | ||
|
|
03edc3e501 | ||
|
|
3f70a0d18c | ||
|
|
b0708c7a8d | ||
|
|
887cf95db7 | ||
|
|
b39b585867 | ||
|
|
042a70c09a | ||
|
|
ffbfcd41f2 | ||
|
|
22ee1a0c97 | ||
|
|
4157201c2c | ||
|
|
6eab501baf | ||
|
|
268bb68122 | ||
|
|
4f5824e964 | ||
|
|
25f5f103c1 | ||
|
|
0c465faf8b | ||
|
|
c94938bc83 | ||
|
|
147b4c0688 | ||
|
|
e1e8b0f056 | ||
|
|
38bc4050ac | ||
|
|
ae31e6dd8b | ||
|
|
e1f125be4e | ||
|
|
597ca146d0 | ||
|
|
9c525b7fa5 | ||
|
|
a5e758bf82 | ||
|
|
370dbfef37 | ||
|
|
1c3b928223 | ||
|
|
454d82b2e4 | ||
|
|
ee6a28b565 | ||
|
|
f50400a204 | ||
|
|
852ca5f849 | ||
|
|
f9a5ae99ee | ||
|
|
7248163b07 | ||
|
|
60d5f7d439 | ||
|
|
088a27df27 | ||
|
|
481d1c4b3a | ||
|
|
6df6ed2920 | ||
|
|
57c73b1424 | ||
|
|
c9c867664e | ||
|
|
65a2ccdc64 | ||
|
|
31631e56ca | ||
|
|
56d32a9908 | ||
|
|
1adff316e5 | ||
|
|
c50e988f5a | ||
|
|
85771825ba | ||
|
|
54e5b41647 | ||
|
|
17803cb7c1 | ||
|
|
64378837ef | ||
|
|
b6d2bc71ce | ||
|
|
fb6c84bc57 | ||
|
|
80c26e4a24 | ||
|
|
eeba2e2665 | ||
|
|
c0968fb581 | ||
|
|
8483c0bc96 | ||
|
|
6b61d917a1 | ||
|
|
8ac255fc55 | ||
|
|
56c2a70a74 | ||
|
|
71deed93df | ||
|
|
3057c29e47 | ||
|
|
554d6b4820 | ||
|
|
e5b4bb41d8 | ||
|
|
0cf65d5933 | ||
|
|
8c2019f8b9 | ||
|
|
4a77880ee1 | ||
|
|
dc07cba1d4 | ||
|
|
b058e1906f | ||
|
|
2ab944996e | ||
|
|
e5acf3d2e6 | ||
|
|
1b5efecc89 | ||
|
|
b2ea386f60 | ||
|
|
d83de508cc | ||
|
|
0a4dfe58a4 | ||
|
|
2c92cd00ae | ||
|
|
67ae61d567 | ||
|
|
cb3822651b | ||
|
|
241690d593 | ||
|
|
f28cd59557 | ||
|
|
991e824bcd | ||
|
|
f031ebc19e | ||
|
|
ebda1056e5 | ||
|
|
0a37fe93b8 | ||
|
|
6df4554c8b | ||
|
|
3e959e5b0f | ||
|
|
e55f8cf7a9 | ||
|
|
c5985ae5be | ||
|
|
3333007e13 | ||
|
|
ff91508d8b | ||
|
|
4ac510ff4a | ||
|
|
269090ea66 | ||
|
|
75ff268e88 | ||
|
|
e615932e7e | ||
|
|
a5b52e1bd1 | ||
|
|
29ef5dda64 | ||
|
|
03eb5e9d2e | ||
|
|
4f07384c66 | ||
|
|
3da68b5ce3 | ||
|
|
b653c25fbc | ||
|
|
e7bba6bd0a | ||
|
|
b5064813af | ||
|
|
01bac077bc | ||
|
|
448c99f969 | ||
|
|
6f85119f03 | ||
|
|
440307fa94 | ||
|
|
17e8bc072b | ||
|
|
67034669a9 | ||
|
|
3d66899b0f | ||
|
|
ce29d14f46 | ||
|
|
50aeb24582 | ||
|
|
fc75c4513a | ||
|
|
aeb7a7e93f | ||
|
|
61bc815540 | ||
|
|
7d4a6b5edf | ||
|
|
3c33ca7b89 | ||
|
|
f1ae95c8ca | ||
|
|
719e2713ed | ||
|
|
349de9b955 | ||
|
|
e6c376fba0 | ||
|
|
c179230ce0 | ||
|
|
3b28f40c6a | ||
|
|
5cccd5e7aa | ||
|
|
af38f75b29 | ||
|
|
a8f94dd595 | ||
|
|
1f3a32ebaf | ||
|
|
9ff7bef2c2 | ||
|
|
2e1442a5c1 | ||
|
|
4e5bc8b399 | ||
|
|
4e69033d33 | ||
|
|
778d2d906a | ||
|
|
d77f778e0d | ||
|
|
129c765a74 | ||
|
|
5891f94c88 | ||
|
|
60510ff2f0 | ||
|
|
c1a32d9f1a | ||
|
|
fdea406101 | ||
|
|
bba4754994 | ||
|
|
9fdeb475e9 | ||
|
|
a870076051 | ||
|
|
7e01e82470 | ||
|
|
7e8391eb55 | ||
|
|
332e1ccfaa | ||
|
|
e7410e40df | ||
|
|
1cbad32b6e | ||
|
|
f45d6ab49c | ||
|
|
9c5b81cb2b | ||
|
|
472a97a616 | ||
|
|
c8fef3380c | ||
|
|
db008c1af8 | ||
|
|
2604c9e761 | ||
|
|
95c09981a6 | ||
|
|
db1c1e2ee4 | ||
|
|
2e9319846e | ||
|
|
7b70776810 | ||
|
|
fdd6eb5f6d | ||
|
|
bcbeb9d8ac | ||
|
|
ad0b133ac8 | ||
|
|
4c2fda7200 | ||
|
|
d1f94c2846 | ||
|
|
1fac51fe35 | ||
|
|
6547005073 | ||
|
|
5fe933fa44 | ||
|
|
67b7c99d03 | ||
|
|
cf65f44eb6 | ||
|
|
7d2de47bcf | ||
|
|
dcb76984bc | ||
|
|
023cb22da7 | ||
|
|
2d0615aede | ||
|
|
643cc6c5be | ||
|
|
12113be6ec | ||
|
|
6558647b65 | ||
|
|
f9348326f5 | ||
|
|
4fa28c71b1 | ||
|
|
0cc1be224d | ||
|
|
60f8e1d55e | ||
|
|
c34b4136c0 | ||
|
|
f2d917d1e4 | ||
|
|
89b07ba55d | ||
|
|
072e0a2a32 | ||
|
|
7e99cc12b3 | ||
|
|
d55f53d8c0 | ||
|
|
120dc50004 | ||
|
|
5ac77e5089 | ||
|
|
8b70b1db7e | ||
|
|
40918c21de | ||
|
|
0ca6f40c45 | ||
|
|
c4fe3393b3 | ||
|
|
7487af3bad | ||
|
|
c4bf8c48ec | ||
|
|
b7a50daa0f | ||
|
|
509dee0425 | ||
|
|
7d580d9a47 | ||
|
|
6076ab1198 | ||
|
|
02c33e6ce9 | ||
|
|
8b695dba03 | ||
|
|
75a2cba078 | ||
|
|
279ffdf117 | ||
|
|
33374bd9f2 | ||
|
|
248db46187 | ||
|
|
f57837c07a | ||
|
|
08530edb3d | ||
|
|
92dcb11b26 | ||
|
|
abb32722fe | ||
|
|
717355bf1e | ||
|
|
a83e03011d | ||
|
|
f9f16edd13 | ||
|
|
63bdca1343 | ||
|
|
3c4d4a5345 | ||
|
|
1de4258aa0 | ||
|
|
1b0cf67a57 | ||
|
|
dfcebf7bc3 | ||
|
|
3839c212db | ||
|
|
1d3a69ff86 | ||
|
|
824c29a6d2 | ||
|
|
76151e884a | ||
|
|
1b9b777eaf | ||
|
|
aae4144476 | ||
|
|
7a85fe74e9 | ||
|
|
1c97673d41 | ||
|
|
274e7a9c0a | ||
|
|
7eee9212e0 | ||
|
|
569209289b | ||
|
|
17c9632942 | ||
|
|
3cf7422c21 | ||
|
|
ce7255dce1 | ||
|
|
a9cf191532 | ||
|
|
700c4ad170 | ||
|
|
262c1fc8c3 | ||
|
|
33f43f7a96 | ||
|
|
1a291d9db9 | ||
|
|
ad7a6fd438 | ||
|
|
68a544ea33 | ||
|
|
4ed8f049ab | ||
|
|
51c43122cb | ||
|
|
fbe214471b | ||
|
|
3b3d948d27 | ||
|
|
2adfa4277c | ||
|
|
21b079d751 | ||
|
|
c094b5f91c | ||
|
|
e78a804ec6 | ||
|
|
94d4304638 | ||
|
|
4e8e6de7ff | ||
|
|
861510a92b | ||
|
|
e4ffa2f93b | ||
|
|
f06a819ba6 | ||
|
|
b6e366d93a | ||
|
|
91ff0574df | ||
|
|
068cb5df9c | ||
|
|
33c462a281 | ||
|
|
72a1f5dcb3 | ||
|
|
4fca531d97 | ||
|
|
89be31c66c | ||
|
|
3462d06759 | ||
|
|
5e9acfd633 | ||
|
|
9a741fd14d | ||
|
|
3e20e47328 | ||
|
|
a8f4e7603e | ||
|
|
c1440c2609 | ||
|
|
63111a5c87 | ||
|
|
261b4899c1 | ||
|
|
b51ba11f45 | ||
|
|
ce1001a043 | ||
|
|
9588bcfa94 | ||
|
|
35164b5196 | ||
|
|
d0d688d4b0 | ||
|
|
ccf277c96f | ||
|
|
0e8700edb9 | ||
|
|
349adbdfa7 | ||
|
|
da1dbf9763 | ||
|
|
3952fbd464 | ||
|
|
e233e4f220 | ||
|
|
fb32c31a70 | ||
|
|
fe73ceab15 | ||
|
|
051a2e97dc | ||
|
|
f4f7845f93 | ||
|
|
ce9160fbfa | ||
|
|
ed2122a8f6 | ||
|
|
a938b67163 | ||
|
|
5e0476925b | ||
|
|
57612d5232 | ||
|
|
fa906b33a8 | ||
|
|
554122f60e | ||
|
|
d4b0b4d83d | ||
|
|
36584428d5 | ||
|
|
d8e15b57ba | ||
|
|
65592a45c3 | ||
|
|
7f9592773b | ||
|
|
1eddb2c321 | ||
|
|
ff22d5f98f | ||
|
|
b1100ddefb | ||
|
|
deed92169f | ||
|
|
aa7c580974 | ||
|
|
bb72d9ac26 | ||
|
|
d054244e55 | ||
|
|
f19211b1f5 | ||
|
|
6ed87954b2 | ||
|
|
b68f7fcdea | ||
|
|
dea468ab0f | ||
|
|
24d1e28a07 | ||
|
|
ce8e341b9f | ||
|
|
b4c68ddd05 | ||
|
|
e74fe2d950 | ||
|
|
35a7ca74c0 | ||
|
|
27418ba9fa | ||
|
|
03e55e11c4 | ||
|
|
be67ea43d8 | ||
|
|
19f5f07d57 | ||
|
|
2f67e1d0cc | ||
|
|
b166457897 | ||
|
|
e904bc2ade | ||
|
|
68498bd93b | ||
|
|
a036e3ccda | ||
|
|
e6382cacb1 | ||
|
|
c983e9f559 | ||
|
|
b002a2879b | ||
|
|
303c9d94df | ||
|
|
1b50547e60 | ||
|
|
1c5382d96b | ||
|
|
fcbd97203a | ||
|
|
4aa16d1c7b | ||
|
|
38a48bc831 | ||
|
|
115a123442 | ||
|
|
9186532fcb | ||
|
|
9b5f64788c | ||
|
|
d316efe8d3 | ||
|
|
0e7f0e1452 | ||
|
|
c41d7add7d | ||
|
|
df0ee992f3 | ||
|
|
ce54414b4f | ||
|
|
dd7223b3a9 | ||
|
|
86ee6fe08c | ||
|
|
46225ad784 | ||
|
|
109bb7f6c5 | ||
|
|
a81bd827dd | ||
|
|
6e9acfc1af | ||
|
|
195929b518 | ||
|
|
1d9d9df9e9 | ||
|
|
e920cd0064 | ||
|
|
83e25f8011 | ||
|
|
0b25d7b079 | ||
|
|
1b3bd4442c | ||
|
|
d053950aee | ||
|
|
7e28291e9f | ||
|
|
b9b42991f6 | ||
|
|
b8aad555a1 | ||
|
|
4d48d5f2ab | ||
|
|
69e5da4e7a | ||
|
|
9ce328fea9 | ||
|
|
94977c7ab1 | ||
|
|
9163dda5df | ||
|
|
70dd7c1f64 | ||
|
|
cd748128c3 | ||
|
|
1ac173958d | ||
|
|
01577d6264 | ||
|
|
f4153beccf | ||
|
|
d35dbc730a | ||
|
|
68eb72e94f | ||
|
|
dfb36562ca | ||
|
|
c313f99b40 | ||
|
|
76e21c87da | ||
|
|
bc5f846c6a | ||
|
|
4578bc4780 | ||
|
|
21cf43567a | ||
|
|
ff652f1f6b | ||
|
|
e9381f74af | ||
|
|
97c2126e80 | ||
|
|
15fab6e10f | ||
|
|
27f5e7d967 | ||
|
|
111bc45d04 | ||
|
|
7475205eca | ||
|
|
2c8fc95f7a | ||
|
|
387b4cb967 | ||
|
|
0304279545 | ||
|
|
038f2eb1ec | ||
|
|
37500b878e | ||
|
|
48338207bd | ||
|
|
295783b549 | ||
|
|
84a4141dec | ||
|
|
db3efb306a | ||
|
|
f30b4d5d57 | ||
|
|
a655608d01 | ||
|
|
fc6a3b07ed | ||
|
|
332f8be401 | ||
|
|
e88d926bbb | ||
|
|
77d38db00e | ||
|
|
eb6f5cf84b | ||
|
|
b11d6ab5bb | ||
|
|
61c4000859 | ||
|
|
b077085fbe | ||
|
|
d9056a8df6 | ||
|
|
6b1e13a4e2 | ||
|
|
00745c99e2 | ||
|
|
0e20552083 | ||
|
|
ba8d0f8e17 | ||
|
|
30b8daecb3 | ||
|
|
1b312d5715 | ||
|
|
efd8edda16 | ||
|
|
8fa99026c8 | ||
|
|
db169f628c | ||
|
|
0641ce5db4 | ||
|
|
b3c7f8b072 | ||
|
|
8d0f8a5d67 | ||
|
|
523764e284 | ||
|
|
d39878ff35 | ||
|
|
5702554171 | ||
|
|
c546b4271e | ||
|
|
2716ff739d | ||
|
|
56d75af824 | ||
|
|
9cbd9f6711 | ||
|
|
2d823aa4b1 | ||
|
|
af46ed9539 | ||
|
|
39337a6603 | ||
|
|
3309b5c0c1 | ||
|
|
3a6d166e17 | ||
|
|
f4da0845b9 | ||
|
|
810145c5fb | ||
|
|
192fbee1fb | ||
|
|
89aed8e675 | ||
|
|
f3042a6a29 | ||
|
|
171300c513 | ||
|
|
e4a927c5d1 | ||
|
|
3ff130ecc2 | ||
|
|
818cb1a491 | ||
|
|
0586f94c5a | ||
|
|
92359fb6b9 | ||
|
|
6a0a791c80 | ||
|
|
bd0164ea62 | ||
|
|
9bcce0a572 | ||
|
|
b161985d79 | ||
|
|
be22869c5e | ||
|
|
cb2b560a5b | ||
|
|
a580cbddab | ||
|
|
b6c390777a | ||
|
|
30e72ebc3f | ||
|
|
3cd73ef11e | ||
|
|
e07bbf6efb | ||
|
|
6c4598b356 | ||
|
|
46af5b7571 | ||
|
|
1ec63de968 | ||
|
|
59fb309ba4 | ||
|
|
1bae930691 | ||
|
|
ec85cd1954 | ||
|
|
7d31e84cc7 | ||
|
|
bb7ce601fc | ||
|
|
902ba0c321 | ||
|
|
175c90c362 | ||
|
|
a65bc4a63c | ||
|
|
598eb298df | ||
|
|
13cca9cf81 | ||
|
|
7577bd52ce | ||
|
|
9a7beef270 | ||
|
|
745a105bae | ||
|
|
7eed50337d | ||
|
|
42d76aabdf | ||
|
|
c8d8727783 | ||
|
|
3dde02be28 | ||
|
|
955b0bc211 | ||
|
|
80375e1ff3 | ||
|
|
f7c73b842a | ||
|
|
456229c13f | ||
|
|
b2d86bf3f8 | ||
|
|
fc78004ab3 | ||
|
|
42fba7ef90 | ||
|
|
a87a2815da | ||
|
|
eb49a91190 | ||
|
|
f066e427ce | ||
|
|
63f0d27167 | ||
|
|
20eef45a58 | ||
|
|
30aeec1c45 | ||
|
|
f0f1dc2537 | ||
|
|
4cf2e5ea5e | ||
|
|
fbaca1009e | ||
|
|
1813814a65 | ||
|
|
4c61a642cf | ||
|
|
bda1b01a2d | ||
|
|
17985c7f8e | ||
|
|
3745b1d1bb | ||
|
|
6b734ced56 | ||
|
|
883445e5b2 | ||
|
|
4c5fc6ab01 | ||
|
|
6ad8aee88c | ||
|
|
769d75d784 | ||
|
|
dc413120e2 | ||
|
|
f75812afcb | ||
|
|
bfdbe69fa1 | ||
|
|
c4cb4e19e5 | ||
|
|
99b862dfc8 | ||
|
|
512638a3b1 | ||
|
|
16d8809c9a | ||
|
|
9defa4332e | ||
|
|
f1f8ab80e4 | ||
|
|
8b040620de | ||
|
|
a03b8d330d | ||
|
|
efd6d4a251 | ||
|
|
4aa14b859e | ||
|
|
1d0c6a9184 | ||
|
|
2020f5bb88 | ||
|
|
03c021d50f | ||
|
|
8064586374 | ||
|
|
8a12b9c658 | ||
|
|
60f88ae92e | ||
|
|
6a03611750 | ||
|
|
22ff073309 | ||
|
|
518d3c4951 | ||
|
|
be621772ab | ||
|
|
d0e654d847 | ||
|
|
ed6df7cd24 | ||
|
|
b0a9d8f30e | ||
|
|
60e6887e94 | ||
|
|
5550e3c1a9 | ||
|
|
228fc5bd93 | ||
|
|
f082edbaad | ||
|
|
f94f2793f8 | ||
|
|
6e9a7e0c89 | ||
|
|
775870a516 | ||
|
|
196bde3ead | ||
|
|
1195eb75eb | ||
|
|
f2aed727c2 | ||
|
|
4cfee9c17c | ||
|
|
136b667683 | ||
|
|
71a1ae0627 | ||
|
|
607831496b | ||
|
|
bf748eb732 | ||
|
|
10237d24ca | ||
|
|
661a0e9d72 | ||
|
|
f422575a17 | ||
|
|
80f00e27f9 | ||
|
|
6ea4fc7a30 | ||
|
|
9ed43ab2ab | ||
|
|
54cf79f521 | ||
|
|
eeb152e22c | ||
|
|
59bdfd85ea | ||
|
|
f607424c11 | ||
|
|
673410ecd5 | ||
|
|
8ec3db94f7 | ||
|
|
e9b25e3f14 | ||
|
|
76c2c61bb2 | ||
|
|
5e4bd5c3d3 | ||
|
|
bc0a39ab82 | ||
|
|
6f039cfdeb | ||
|
|
0ea18314f2 | ||
|
|
082f429809 | ||
|
|
5f533570a1 | ||
|
|
6702f39db1 | ||
|
|
afd02317bd | ||
|
|
629e0278d5 | ||
|
|
1027ab6c06 | ||
|
|
b9ce09cbab | ||
|
|
491c9605d9 | ||
|
|
c26d66772a | ||
|
|
e23e27a9f2 | ||
|
|
74cca5eb0e | ||
|
|
fdfbf31318 | ||
|
|
33cbfb72b4 | ||
|
|
75f1e84b1f | ||
|
|
780c1457d0 | ||
|
|
c24e00a386 | ||
|
|
0b60a25f2a | ||
|
|
3a706907ff | ||
|
|
30c00ffac6 | ||
|
|
9da7313e7e | ||
|
|
220b1bd9ae | ||
|
|
3e87530478 | ||
|
|
464ccc6d53 | ||
|
|
98e4fbaaa3 | ||
|
|
015ce57691 | ||
|
|
53d3fbc335 | ||
|
|
8ea75417ec | ||
|
|
d1b4dea2ca | ||
|
|
44262da4e5 | ||
|
|
3a7c1f2744 | ||
|
|
62aa17b9a4 | ||
|
|
053ebb82bf | ||
|
|
2275b1d84a | ||
|
|
15619a1270 | ||
|
|
35f2c04535 | ||
|
|
aa4a5ad224 | ||
|
|
93b133f9ac | ||
|
|
df23349584 | ||
|
|
8dce5f7e7c | ||
|
|
b82ef5dcea | ||
|
|
e21ca447af | ||
|
|
8475a5dfd3 | ||
|
|
38e7c8299c | ||
|
|
857e689b8c | ||
|
|
0bc9d5c8ee | ||
|
|
27e9ac57cf | ||
|
|
e2354c215b | ||
|
|
a9035205ff | ||
|
|
4fae7d4884 | ||
|
|
8a73673d6b | ||
|
|
9855bc6f55 | ||
|
|
25880ebd52 | ||
|
|
3de9b2649c | ||
|
|
d7c884e34a | ||
|
|
3391155077 | ||
|
|
3a660d4f74 | ||
|
|
6cb1bdf623 | ||
|
|
55e417a263 | ||
|
|
96770cca53 | ||
|
|
ab4c8d2e5d | ||
|
|
9b664d5eb2 | ||
|
|
1db8a07765 | ||
|
|
d6dce95682 | ||
|
|
427646ff36 | ||
|
|
6c4ef26e9a | ||
|
|
72b9da2649 | ||
|
|
c54ee8c289 | ||
|
|
ab70f5582d | ||
|
|
b1201a5f23 | ||
|
|
16fb19d2c3 | ||
|
|
80590ef5dc | ||
|
|
4bdfb6e154 | ||
|
|
26a9237237 | ||
|
|
4e73e561fd | ||
|
|
a5ee38511d | ||
|
|
b8831b6d91 | ||
|
|
215bc25f5c | ||
|
|
17c842df43 | ||
|
|
34a416b941 | ||
|
|
ca0ae29fef | ||
|
|
3acbf4d5d1 | ||
|
|
3eb3b7cbaa | ||
|
|
9369a6031b | ||
|
|
89e3e9a691 | ||
|
|
8faafa6a32 | ||
|
|
6ef8844b2b | ||
|
|
58d5787bdf | ||
|
|
1af8f89a49 | ||
|
|
1fa4e906c7 | ||
|
|
ead2e26da1 | ||
|
|
96f69edf7b | ||
|
|
9ca53fa3ad | ||
|
|
37985f29f0 | ||
|
|
78e79af68b | ||
|
|
7cbd8ee089 | ||
|
|
f95d41ef47 | ||
|
|
fea7e8ddf1 | ||
|
|
32bf508745 | ||
|
|
ab5d1df2a3 | ||
|
|
ec5e5c9b2a | ||
|
|
2e930cf43c | ||
|
|
021677d5d6 | ||
|
|
79d225be36 | ||
|
|
4dafca2376 | ||
|
|
aaa3e8133c | ||
|
|
faba1b3877 | ||
|
|
c5e6d4ddf8 | ||
|
|
33ce276e3e | ||
|
|
e68e155687 | ||
|
|
9550c8262e | ||
|
|
b07d1da90f | ||
|
|
cc0bd4efd9 | ||
|
|
447b370416 | ||
|
|
999870c3a7 | ||
|
|
29738c071c | ||
|
|
0c63839ff6 | ||
|
|
2604341e9e | ||
|
|
24e7351d32 | ||
|
|
67ac53242d | ||
|
|
bc3dbb443e | ||
|
|
659d017fd3 | ||
|
|
412a3ea9c8 | ||
|
|
c9220ae9bf | ||
|
|
e995301995 | ||
|
|
12cd7327f8 | ||
|
|
050a585a9d | ||
|
|
2a2b95d214 | ||
|
|
63ca393731 | ||
|
|
50c6369f46 | ||
|
|
a0437ba94f | ||
|
|
ea4b5ebd16 | ||
|
|
4180fdbbb8 | ||
|
|
37454db3ec | ||
|
|
4899c3b3cc | ||
|
|
3504cb5274 | ||
|
|
7291e77978 | ||
|
|
4bcc74d214 | ||
|
|
351b62f7a7 | ||
|
|
44edce6bcf | ||
|
|
054ae4715d | ||
|
|
42f6b2d1f6 | ||
|
|
d79676cad1 | ||
|
|
16542f20b4 | ||
|
|
cb29e1ed23 | ||
|
|
11856cc542 | ||
|
|
2802e96b91 | ||
|
|
01233ef1a1 | ||
|
|
c82134c07b | ||
|
|
c571585ffd | ||
|
|
f40bb69224 | ||
|
|
71d0f7d965 | ||
|
|
8d5966e9e1 | ||
|
|
b3450bfc96 | ||
|
|
cae0452f69 | ||
|
|
7bd644701c | ||
|
|
1996db9e4d | ||
|
|
56fdc02030 | ||
|
|
b05f7b758e | ||
|
|
adbee9f777 | ||
|
|
f0bcb3105a | ||
|
|
2e1396a2d5 | ||
|
|
fd5988422e | ||
|
|
150a78bfd9 | ||
|
|
cb3fe84fe3 | ||
|
|
d694652b87 | ||
|
|
62aa97bb32 | ||
|
|
cdea240c31 | ||
|
|
956237feac | ||
|
|
69627a2fa3 | ||
|
|
62d6f44b28 | ||
|
|
523aed015c | ||
|
|
2bf9d0d812 | ||
|
|
fe9f2df17d | ||
|
|
95d9fb0ee9 | ||
|
|
d4cf77170a | ||
|
|
c3627de005 | ||
|
|
adb102f5f0 | ||
|
|
4fb2e2198b | ||
|
|
d9f74ec061 | ||
|
|
67839daad3 | ||
|
|
52837429d2 | ||
|
|
4278d1e000 | ||
|
|
5ec512801f | ||
|
|
f8229fc4a9 | ||
|
|
8b3b869418 | ||
|
|
7f7238168c | ||
|
|
4318197ac7 | ||
|
|
0ecf3379b4 | ||
|
|
a97cf021a9 | ||
|
|
7b7981facc | ||
|
|
d430ec2be1 | ||
|
|
3f0c5727ca | ||
|
|
c792e770e8 | ||
|
|
6d1ea9bc99 | ||
|
|
d4c6a23a8d | ||
|
|
0830f0d3b7 | ||
|
|
44d4bacf1c | ||
|
|
6c9e3ddc21 | ||
|
|
7dfc337bfa | ||
|
|
f1251454b1 | ||
|
|
f92b002342 | ||
|
|
f29a877bd0 | ||
|
|
a87fa10213 | ||
|
|
0d763cd2bc | ||
|
|
4f8d65555f | ||
|
|
c4208b351c | ||
|
|
87eecf56bd | ||
|
|
2aca635b3f | ||
|
|
a0ba4689fe | ||
|
|
7ef032bcf4 | ||
|
|
3e8eec2e19 | ||
|
|
e4c5ea5924 | ||
|
|
bef36608e0 | ||
|
|
9bf0883587 | ||
|
|
7cca0e2a72 | ||
|
|
1e12944a5d | ||
|
|
7cd7ca240b | ||
|
|
38aea8fd9b | ||
|
|
832c261cdf | ||
|
|
1a40d464be | ||
|
|
8396b57101 | ||
|
|
b9fa84635d | ||
|
|
641fcaee40 | ||
|
|
8085bc38c0 | ||
|
|
8e93a69e1a | ||
|
|
8524196948 | ||
|
|
8e805b18db | ||
|
|
d7fe158d78 | ||
|
|
6c00c03faa | ||
|
|
e7d07a7923 | ||
|
|
8ed1f8efca | ||
|
|
84b491b8b2 | ||
|
|
5f1471232d | ||
|
|
f64affc930 | ||
|
|
67b8ebef52 | ||
|
|
f3a1247629 | ||
|
|
b97358ea33 | ||
|
|
8e00f8305b | ||
|
|
a47240e871 | ||
|
|
17278922dc | ||
|
|
348edb9a0d | ||
|
|
1551adb867 | ||
|
|
f9d5038e59 | ||
|
|
75d6feb55b | ||
|
|
b40beac1a8 | ||
|
|
116302f3b5 | ||
|
|
7c0e9c46db | ||
|
|
e3d968d043 | ||
|
|
c37f3f27ee | ||
|
|
80f44ba24e | ||
|
|
ec4d3e84ce | ||
|
|
5c0274efce | ||
|
|
0568e9be47 | ||
|
|
d5fcc958b8 | ||
|
|
c81e63eeee | ||
|
|
811c308ef4 | ||
|
|
d652a28023 | ||
|
|
9fb38727fb | ||
|
|
c591dd7836 | ||
|
|
8477adfa33 | ||
|
|
087bb37e08 | ||
|
|
23421204dc | ||
|
|
c320ed60ba | ||
|
|
3cbd4d24fa | ||
|
|
f7c82c13da | ||
|
|
793d972174 | ||
|
|
d2440d3640 | ||
|
|
ec984205ae | ||
|
|
fc56b07f31 | ||
|
|
bb8331384f | ||
|
|
71b528a2d1 | ||
|
|
e730172a5a | ||
|
|
c4142e24c5 | ||
|
|
d14d80d759 | ||
|
|
8b18cf2a5e | ||
|
|
0704eabe23 | ||
|
|
90c92f8c93 | ||
|
|
cc08e2cb96 | ||
|
|
0d6a9cf8f3 | ||
|
|
986cb9179c | ||
|
|
0cea9ad3da | ||
|
|
6590ca32db | ||
|
|
932ca4e4c5 | ||
|
|
c34215465d | ||
|
|
126bd51232 | ||
|
|
84e4981cc1 | ||
|
|
a61c3cb465 | ||
|
|
586d3eda53 | ||
|
|
837c4dbf25 | ||
|
|
ac4d22a89f | ||
|
|
b543f49278 | ||
|
|
77364c046e | ||
|
|
1b07648238 | ||
|
|
5df3de8dd0 | ||
|
|
6359f5e883 | ||
|
|
9974bf33a0 | ||
|
|
353d9fc72b | ||
|
|
b59b34eaf7 | ||
|
|
1535059d2c | ||
|
|
9514ec1944 | ||
|
|
e4c541ed0e | ||
|
|
e196ef0b17 | ||
|
|
bbfb599412 | ||
|
|
f539766e8f | ||
|
|
df6ef705b0 | ||
|
|
90eb45ac46 | ||
|
|
18fe1b97f9 | ||
|
|
4d4aabd8f7 | ||
|
|
d081277618 | ||
|
|
57a9f04e8f | ||
|
|
70055d7f3a | ||
|
|
79d87d3344 | ||
|
|
b1d48317e1 | ||
|
|
f4a42b8ccb | ||
|
|
2ad33e010d | ||
|
|
8c289e4974 | ||
|
|
e65458a506 | ||
|
|
90d385cb92 | ||
|
|
844b15a6f7 | ||
|
|
afc6aa968d | ||
|
|
2bb9a7949f | ||
|
|
802ddb7b55 | ||
|
|
9add46ebbd | ||
|
|
2a57f49d80 | ||
|
|
2a44bd870e | ||
|
|
a08fe994c2 | ||
|
|
a261fda20b | ||
|
|
3c44cf65cd | ||
|
|
ef098111dc | ||
|
|
8b1c5bcf9e | ||
|
|
0d1d9c5ed7 | ||
|
|
28a2295f73 | ||
|
|
62047c880e | ||
|
|
22c6192561 | ||
|
|
9fc467274f | ||
|
|
8d463a31fd | ||
|
|
8a4d9168fa | ||
|
|
22c8f277bf | ||
|
|
18e7e7307b | ||
|
|
b733a47902 | ||
|
|
3db73d458b | ||
|
|
0364215987 | ||
|
|
17ddee38a4 | ||
|
|
a59ab911a7 | ||
|
|
9525915a85 | ||
|
|
fd247f4657 | ||
|
|
9ed91f0e8a | ||
|
|
4fd07c36c3 | ||
|
|
f5024245a7 | ||
|
|
7a57f67587 | ||
|
|
d89fcb3b2e | ||
|
|
d4f728dfa2 | ||
|
|
f7b2c8cd04 | ||
|
|
73393d781a | ||
|
|
b1361be6df | ||
|
|
b9da9cdcb5 | ||
|
|
6c2b2b7ba2 | ||
|
|
6019671080 | ||
|
|
a3e724281e | ||
|
|
53fef3a7e9 | ||
|
|
8df00d0b04 | ||
|
|
1a112c6908 | ||
|
|
d67e354276 | ||
|
|
dd26f552db | ||
|
|
02b9d57072 | ||
|
|
222689ed5b | ||
|
|
ca49f9d095 | ||
|
|
be7d31416c | ||
|
|
168323ab41 | ||
|
|
66ff913bb4 | ||
|
|
3e9d088cd9 | ||
|
|
34bb681db1 | ||
|
|
8a34c3f163 | ||
|
|
24a280ce8c | ||
|
|
1b70fb5f9b | ||
|
|
db29a2fea7 | ||
|
|
ba3335efb8 | ||
|
|
0a95c32f06 | ||
|
|
22260af16f | ||
|
|
450b23c4f2 | ||
|
|
997f8b2e3f | ||
|
|
323fc13d4c | ||
|
|
c7b91bfaf1 | ||
|
|
25280e0cea | ||
|
|
4da7c04859 | ||
|
|
973d3097e7 | ||
|
|
9883341924 | ||
|
|
b98f5fd0b9 | ||
|
|
ec76adc81d | ||
|
|
532ed81ecf | ||
|
|
717ddf4d10 | ||
|
|
c17572b6a6 | ||
|
|
2d49746a8d | ||
|
|
c8b6d53ede | ||
|
|
135af5f3ff | ||
|
|
2b0f8caa62 | ||
|
|
e918244159 | ||
|
|
444e1afad5 | ||
|
|
c4a3995310 | ||
|
|
584e3a9234 | ||
|
|
7e07f0bbf4 | ||
|
|
79be21521a | ||
|
|
b61e0772c9 | ||
|
|
41fd1acc9c | ||
|
|
82494aec00 | ||
|
|
360bd0a559 | ||
|
|
0746272525 | ||
|
|
40b6d1f178 | ||
|
|
de8db40f4b | ||
|
|
823eb06c5d | ||
|
|
08493bffbb | ||
|
|
035a199c32 | ||
|
|
c981cdb042 | ||
|
|
44c713ef98 | ||
|
|
e666e0d0be | ||
|
|
0f46185cfb | ||
|
|
08cf2915bb | ||
|
|
72951d95fb | ||
|
|
df9b539f74 | ||
|
|
2a17f7ff10 | ||
|
|
18d31f5116 | ||
|
|
06af3f4c5e | ||
|
|
b91d96ab0c | ||
|
|
d7039d05a1 | ||
|
|
0ad63aee63 | ||
|
|
a447529c2e | ||
|
|
8773149a3e | ||
|
|
7f89efee4b | ||
|
|
3f045cedb9 | ||
|
|
313e590961 | ||
|
|
f68e432570 | ||
|
|
cd0f2a2c41 | ||
|
|
14a5de6a29 | ||
|
|
7beeaacea6 | ||
|
|
e8a88f174d | ||
|
|
206261cd0c | ||
|
|
6c7adfbaeb | ||
|
|
7e1472a95b | ||
|
|
590367a5b5 | ||
|
|
515dcc1b5f | ||
|
|
c987a95c92 | ||
|
|
2eb35291b2 | ||
|
|
775d46fecf | ||
|
|
bc3ee83910 | ||
|
|
5d1a0ec15d | ||
|
|
46c7d8d169 | ||
|
|
b4f80ca370 | ||
|
|
7bfbe18011 | ||
|
|
3318cef751 | ||
|
|
27d49be706 | ||
|
|
c4994e85df | ||
|
|
88682c87ff | ||
|
|
9a18218293 | ||
|
|
82aa805bf3 | ||
|
|
329c24ee73 | ||
|
|
b62ea6bd2a | ||
|
|
5c804aac75 | ||
|
|
df61f31c8e | ||
|
|
e1698ce12e | ||
|
|
fdbff8a91f | ||
|
|
6f93a27e53 | ||
|
|
4651b50176 | ||
|
|
dc74bdab41 | ||
|
|
f7d465b7d4 | ||
|
|
3c2b8a85d6 | ||
|
|
d36d801a58 | ||
|
|
904eaa58c4 | ||
|
|
6b03914446 | ||
|
|
d0fd6253a3 | ||
|
|
1d1a8e9339 | ||
|
|
ab7a76f380 | ||
|
|
e4fed93989 | ||
|
|
18a94fcf45 | ||
|
|
15e67a4d3e | ||
|
|
7c01a2a253 | ||
|
|
5ce2484af0 | ||
|
|
7d79b676d5 | ||
|
|
8261deb99d | ||
|
|
bcfd3f5af5 | ||
|
|
c8a75ab11f | ||
|
|
2c5b6bb0ad | ||
|
|
a1a7484ef4 | ||
|
|
9f4567865c | ||
|
|
00cb7bf598 | ||
|
|
dfe7bee243 | ||
|
|
9e2b411b01 | ||
|
|
47dfb6cd8b | ||
|
|
946f10e1f2 | ||
|
|
463659f0ba | ||
|
|
51ea717606 | ||
|
|
179c2a9a92 | ||
|
|
05ccc0346e | ||
|
|
41444fd4b5 | ||
|
|
d4ffc70d96 | ||
|
|
1a7271e966 | ||
|
|
c880412300 | ||
|
|
829ced398b | ||
|
|
fef931d2b8 | ||
|
|
8b1cf7effd | ||
|
|
70c6870776 | ||
|
|
97ee4686a1 | ||
|
|
154cad1b45 | ||
|
|
4235270a32 | ||
|
|
e95996a9b9 | ||
|
|
59442dcd4a | ||
|
|
dd0f90b4a4 | ||
|
|
161c9e6c50 | ||
|
|
620ccb3bdc | ||
|
|
9326c63a62 | ||
|
|
dcf49a59ef | ||
|
|
cd91290bfb | ||
|
|
c431bee941 | ||
|
|
310a865ccd | ||
|
|
f7a32f9248 | ||
|
|
36a1c72573 | ||
|
|
8732dcc1c4 | ||
|
|
01a54d1042 | ||
|
|
0a062ba07b | ||
|
|
34a2af118b | ||
|
|
e8ff31be1f | ||
|
|
58a5de92a5 | ||
|
|
5f74f70515 | ||
|
|
466f0c9c97 | ||
|
|
18d453cc18 | ||
|
|
db80358df8 | ||
|
|
f21fea41b4 | ||
|
|
1f8924eeac | ||
|
|
62ba7679fe | ||
|
|
3f627af745 | ||
|
|
b586ee6e1f | ||
|
|
fdd353e48a | ||
|
|
ff5d2ecc1e | ||
|
|
0f43845a98 | ||
|
|
1e1075b98a | ||
|
|
b2278f2f25 | ||
|
|
a6f42e7c8e | ||
|
|
3f76cb5f47 | ||
|
|
3b6819f091 | ||
|
|
6ec1e56e03 | ||
|
|
acee2d5ad2 | ||
|
|
c1c2ed1943 | ||
|
|
32122ee225 | ||
|
|
ec2bf9757c | ||
|
|
7d37dc876e | ||
|
|
5479a74789 | ||
|
|
e2eaefc999 | ||
|
|
a1b454fc43 | ||
|
|
5f4f8dcedc | ||
|
|
a5294748a4 | ||
|
|
915f4f359a | ||
|
|
bf940e9041 | ||
|
|
2db6604c55 | ||
|
|
dd3617fcdf | ||
|
|
ee7285cd93 | ||
|
|
50c872c0b8 | ||
|
|
c501df2405 | ||
|
|
6199c45a13 | ||
|
|
391dc1d33a | ||
|
|
90fe40b221 | ||
|
|
f88bf2cbf3 | ||
|
|
d714644afc | ||
|
|
6b66efa576 | ||
|
|
343d16e583 | ||
|
|
1ef3ef5d27 | ||
|
|
7dc44249eb | ||
|
|
98b773c43a | ||
|
|
10a3185b34 | ||
|
|
33b978b5c4 | ||
|
|
a54b73aaf9 | ||
|
|
aa944f6ee7 | ||
|
|
6f967a069d | ||
|
|
4e6edecd9e | ||
|
|
c5026fb4e7 | ||
|
|
22d08f5ec5 | ||
|
|
1e2124ff54 | ||
|
|
c389f1a33a | ||
|
|
5cbb86a823 | ||
|
|
df3744f501 | ||
|
|
3b93902670 | ||
|
|
1cd4ba8a51 | ||
|
|
3107f777fa | ||
|
|
db0fcf4e03 | ||
|
|
760abab1d7 | ||
|
|
3d082bf566 | ||
|
|
57cb6b05de | ||
|
|
43d1bf4fb3 | ||
|
|
f68d347c25 | ||
|
|
c5bc21781c | ||
|
|
ca13ab8173 | ||
|
|
1940da0adf | ||
|
|
979dfa1976 | ||
|
|
2371feaa4f | ||
|
|
a8e820359a | ||
|
|
48ce8b1c84 | ||
|
|
64f22a9e53 | ||
|
|
e02f49a49e | ||
|
|
3a6a6fa8f6 | ||
|
|
7480005822 | ||
|
|
2d1ad18a4f | ||
|
|
ff5f73e041 | ||
|
|
a5ca4bf26a | ||
|
|
cf0f1d6625 | ||
|
|
c564c91a82 | ||
|
|
80af725a41 | ||
|
|
f0ea1c1bb0 | ||
|
|
5f0697799a | ||
|
|
a8a3ea9ca2 | ||
|
|
af4ddc8923 | ||
|
|
300d287d61 | ||
|
|
e68bc53cab | ||
|
|
de98919fe6 | ||
|
|
10abb8d781 | ||
|
|
7217e3391d | ||
|
|
967ac2ed2b | ||
|
|
80ecc08044 | ||
|
|
c52308d605 | ||
|
|
43fae98bb5 | ||
|
|
6ab496ea4a | ||
|
|
b922970ac2 | ||
|
|
393bd400cb | ||
|
|
421ce3da3a | ||
|
|
51ed739d6c | ||
|
|
3c66fc3b18 | ||
|
|
1612c89160 | ||
|
|
2bda2cf7a6 | ||
|
|
46bd13d9db | ||
|
|
1ec8f33db8 | ||
|
|
c69e63a442 | ||
|
|
521d9e2f98 | ||
|
|
d0b746c54f | ||
|
|
623d3d44ec | ||
|
|
eaf46a04e3 | ||
|
|
fc57d76c3c | ||
|
|
bf87e791f3 | ||
|
|
6195a5974a | ||
|
|
6e666237cd | ||
|
|
d24d736c38 | ||
|
|
8d01bc9170 | ||
|
|
1a0eadccb2 | ||
|
|
241e19a9af | ||
|
|
7b4884a2db | ||
|
|
ef5bab1d92 | ||
|
|
11c35722ba | ||
|
|
d7d235463e | ||
|
|
6e953da07b | ||
|
|
017b34647e | ||
|
|
f6218316e3 | ||
|
|
25b46a7b9e | ||
|
|
41d8846e13 | ||
|
|
6023426344 | ||
|
|
d61ff37f45 | ||
|
|
f21e00bf16 | ||
|
|
3f7e81585e | ||
|
|
409a302f9b | ||
|
|
6981bdee62 | ||
|
|
5eab13c911 | ||
|
|
3efd408f5a | ||
|
|
91c647812f | ||
|
|
7cbdc6702e | ||
|
|
2a964f298c | ||
|
|
2282320d41 | ||
|
|
2311fb007d | ||
|
|
1eba834985 | ||
|
|
3743f6f5c6 | ||
|
|
ba336440aa | ||
|
|
c42c0cbf87 | ||
|
|
b262612248 | ||
|
|
e177b0bbc8 | ||
|
|
3d95706d5c | ||
|
|
5f652183f4 | ||
|
|
a62617f394 | ||
|
|
b674c9a524 | ||
|
|
ed7c54858a | ||
|
|
a70df98825 | ||
|
|
331f39cde7 | ||
|
|
58d6a9a098 | ||
|
|
a55aab498f | ||
|
|
40101dbb1f | ||
|
|
5c81671cf7 | ||
|
|
660f8c6715 | ||
|
|
987f5518f2 | ||
|
|
b63c74212f | ||
|
|
11dd2b5b84 | ||
|
|
a887df1715 | ||
|
|
f7e7d318d8 | ||
|
|
585b4427ba | ||
|
|
fc6d4c33e3 | ||
|
|
0abd2d9992 | ||
|
|
fef22e3133 | ||
|
|
cb870694df | ||
|
|
f1cfd6d688 | ||
|
|
7e89f5b69b | ||
|
|
ff3fc48a31 | ||
|
|
0f0c4d740a | ||
|
|
4054e46cab | ||
|
|
eb620ddefc | ||
|
|
17f9a00eaf | ||
|
|
a3ee718ab5 | ||
|
|
4f7dc28f08 | ||
|
|
453702dbe1 | ||
|
|
ecd7b6133b | ||
|
|
7863e0fea8 | ||
|
|
27a9bf2433 | ||
|
|
66c4e9de92 | ||
|
|
ef69c3f5e6 | ||
|
|
5afee735a9 | ||
|
|
b657eb245f | ||
|
|
0b5ebf667c | ||
|
|
b352c29d46 | ||
|
|
dec0c46816 | ||
|
|
80faf3a11e | ||
|
|
90aead2ecd | ||
|
|
55fa36eef8 | ||
|
|
60ad46c7fb | ||
|
|
218aca2e20 | ||
|
|
f1f742ce44 | ||
|
|
43b389aa16 | ||
|
|
3ecbd5cf21 | ||
|
|
89b986c3bc | ||
|
|
8b7c2e417c | ||
|
|
14c34516ba | ||
|
|
46b872a854 | ||
|
|
bb8224025e | ||
|
|
a438b2bb15 | ||
|
|
57b4f9306d | ||
|
|
e5e0706a96 | ||
|
|
29c8c7f2c6 | ||
|
|
9dee1f28c0 | ||
|
|
ee867c23a8 | ||
|
|
a8dc7e38df | ||
|
|
099e8c1d6c | ||
|
|
0f62f197fd | ||
|
|
dc52bb134d | ||
|
|
33683f447c | ||
|
|
b22e15ab90 | ||
|
|
82cc85bde4 | ||
|
|
d819a6d7ce | ||
|
|
8fdc877a3e | ||
|
|
b035f2046f | ||
|
|
5290730dee | ||
|
|
27ef86f8e6 | ||
|
|
e093f8694a | ||
|
|
20f1daff1a | ||
|
|
23052629ad | ||
|
|
ab73f6ad93 | ||
|
|
24324563d6 | ||
|
|
99e9d1d730 | ||
|
|
ae35b049ff | ||
|
|
b1ba2003be | ||
|
|
a072905893 | ||
|
|
5c0c1eed93 | ||
|
|
99ee2e9fd8 | ||
|
|
46a7690df5 | ||
|
|
ad613a67d6 | ||
|
|
0f2ea29194 | ||
|
|
f5a85a1955 | ||
|
|
88fee48d8c | ||
|
|
0a605bf4b5 | ||
|
|
875dae64bf | ||
|
|
8477368f9c | ||
|
|
8d669cdb4c | ||
|
|
e07183a08c | ||
|
|
f7044f6fe0 | ||
|
|
c7106317a0 | ||
|
|
89fe20b59f | ||
|
|
486586c647 | ||
|
|
f7e120fe56 | ||
|
|
4295f6069d | ||
|
|
c7b069de09 | ||
|
|
ec3ccd3363 | ||
|
|
3324c12d69 | ||
|
|
561c40afee | ||
|
|
994401670b | ||
|
|
11bcb58ae6 | ||
|
|
9dc9ac6380 | ||
|
|
8d4ef39c6c | ||
|
|
1c5f71beee | ||
|
|
5f010823aa | ||
|
|
cf07c014c4 | ||
|
|
25e5ab1282 | ||
|
|
f0955768f5 | ||
|
|
3d33191bd7 | ||
|
|
e4df46bcaa | ||
|
|
684b893d4e | ||
|
|
a867cca747 | ||
|
|
a1fd0b8750 | ||
|
|
c4ee36b8ac | ||
|
|
e7b06a9072 | ||
|
|
92185962ce | ||
|
|
d0c2cfbd0c | ||
|
|
22ac29a665 | ||
|
|
26070ced34 | ||
|
|
67e8416181 | ||
|
|
5eccb58759 | ||
|
|
46ae480e66 | ||
|
|
bde1585cd5 | ||
|
|
52c6f2d29f | ||
|
|
dd40c5d179 | ||
|
|
1ee12266e8 | ||
|
|
a1b1b78108 | ||
|
|
eeea00e294 | ||
|
|
3d17478588 | ||
|
|
42a22c927b | ||
|
|
3fd47b0d40 | ||
|
|
78e940f0a7 | ||
|
|
bca32cae83 | ||
|
|
6f8da56083 | ||
|
|
52cf43a479 | ||
|
|
9f7eb3cea8 | ||
|
|
c79058f064 | ||
|
|
1e43fcfbe3 | ||
|
|
c439e66fa2 | ||
|
|
ad843a646d | ||
|
|
8742001982 | ||
|
|
919a400cec | ||
|
|
4634a578f4 | ||
|
|
9140a5e70c | ||
|
|
037b5e4502 | ||
|
|
b8272bfe14 | ||
|
|
06f6e1913e | ||
|
|
e352a04cbb | ||
|
|
d2983e935f | ||
|
|
5acea5e4fc | ||
|
|
68e593a64a | ||
|
|
0f3fc5234d | ||
|
|
483b7dd291 | ||
|
|
12316d0658 | ||
|
|
67e26fe730 | ||
|
|
5f38645fa1 | ||
|
|
2707f58aa1 | ||
|
|
9c422cc6bc | ||
|
|
f986b756ff | ||
|
|
14295392d0 | ||
|
|
6960216125 | ||
|
|
e102bbf021 | ||
|
|
871a871651 | ||
|
|
3712eec6a9 | ||
|
|
d8ae43a897 | ||
|
|
498c17cd7c | ||
|
|
94b4599cda | ||
|
|
2681728329 | ||
|
|
6d1523728b | ||
|
|
e8a37ff4c7 | ||
|
|
9f5d1afc31 | ||
|
|
d5ffdaa281 | ||
|
|
aa26645407 | ||
|
|
6c251bd2cf | ||
|
|
65412e6cfb | ||
|
|
ff7ac0333e | ||
|
|
09248391c2 | ||
|
|
e057c88621 | ||
|
|
62ce5f0775 | ||
|
|
49e47231af | ||
|
|
7d837078e7 | ||
|
|
00d80335fe | ||
|
|
5524c47645 | ||
|
|
7c74933285 | ||
|
|
d9cc4e918c | ||
|
|
75cf9f9b4e | ||
|
|
55a414ecfa | ||
|
|
4c008c25d4 | ||
|
|
33bbb4779e | ||
|
|
18d6b68643 | ||
|
|
eef6b340c3 | ||
|
|
c3924b2ece | ||
|
|
4f88ed796c | ||
|
|
cb8466900f | ||
|
|
a5417e1cf5 | ||
|
|
1f5c0b14c5 | ||
|
|
f9f6b336b3 | ||
|
|
dfcd6f6102 | ||
|
|
537269d5c4 | ||
|
|
458ed26b79 | ||
|
|
2eadf8d9f5 | ||
|
|
5bac7591db | ||
|
|
78c20e33b7 | ||
|
|
ff8cecd780 | ||
|
|
228e460b1a | ||
|
|
7935162f4a | ||
|
|
fa431b90ef | ||
|
|
dab0530a2f | ||
|
|
a0cb14de23 | ||
|
|
1678392cee | ||
|
|
0fca4c9590 | ||
|
|
f54effb3ab | ||
|
|
48ab7c4fa6 | ||
|
|
2563b16af0 | ||
|
|
f9638a5eac | ||
|
|
d647bd8e74 | ||
|
|
11f5d387fc | ||
|
|
7fcbec3409 | ||
|
|
31121945fc | ||
|
|
5284fb2726 | ||
|
|
c86dd4dd47 | ||
|
|
3ff885d0d3 | ||
|
|
fc9d9f3c25 | ||
|
|
839911a330 | ||
|
|
01bf39bc89 | ||
|
|
80854c4bcd | ||
|
|
621a0fe686 | ||
|
|
144d9a8ca3 | ||
|
|
5b17cfc2e7 | ||
|
|
82f1b7f2e3 | ||
|
|
1a1989d860 | ||
|
|
6a273501ee | ||
|
|
d2716addd6 | ||
|
|
6e7078cf24 | ||
|
|
ff8182f49a | ||
|
|
37b2fb18b7 | ||
|
|
6e23253013 | ||
|
|
bbb1c0707d | ||
|
|
dd93837343 | ||
|
|
6e022d0d45 | ||
|
|
dafa94efac | ||
|
|
a8ac40163c | ||
|
|
5575079e6b | ||
|
|
8635f63c31 | ||
|
|
f173d8b6b1 | ||
|
|
dcf6e4ffeb | ||
|
|
64406cee45 | ||
|
|
d43b383ea5 | ||
|
|
88175c2e32 | ||
|
|
f02df58f41 | ||
|
|
b0714eed5e | ||
|
|
ecc0c43ab9 | ||
|
|
4b2dc17d65 | ||
|
|
8289338c47 | ||
|
|
31655339f0 | ||
|
|
5852b4c62e | ||
|
|
2987c9cd52 | ||
|
|
e1c56edb6f | ||
|
|
f403ad755c | ||
|
|
cda18c6a60 | ||
|
|
7aae5b4507 | ||
|
|
264981e4b6 | ||
|
|
9f07ead562 | ||
|
|
24223cfb6f | ||
|
|
519a77428a | ||
|
|
c0a6bdb5d9 | ||
|
|
02fecb8c53 | ||
|
|
fbdc62d910 | ||
|
|
55504fcd4d | ||
|
|
a256f25ea9 | ||
|
|
466bfb7a3f | ||
|
|
3ff1ac6e96 | ||
|
|
7d32a890cd | ||
|
|
8612b34415 | ||
|
|
980019fb73 | ||
|
|
6468fe2a2c | ||
|
|
e7f66a6be0 | ||
|
|
b06b711cb3 | ||
|
|
809eba8c67 | ||
|
|
c613c41d0e | ||
|
|
e8b2ddea51 | ||
|
|
b4087a7f78 | ||
|
|
f7609b37f5 | ||
|
|
36050572e6 | ||
|
|
6be9f18ec9 | ||
|
|
b9c8468c5f | ||
|
|
64275cb703 | ||
|
|
47e9bdb122 | ||
|
|
eb5f6647b6 | ||
|
|
85ad6cbe65 | ||
|
|
bb8f090bbb | ||
|
|
7f25c9fa8e | ||
|
|
d898c7dd6c | ||
|
|
4150af003a | ||
|
|
e2d7452c7b | ||
|
|
3111fb56bd | ||
|
|
12fa4a2bb7 | ||
|
|
2c72a00954 | ||
|
|
26393f488b | ||
|
|
0a4dd1987d | ||
|
|
63453f2340 | ||
|
|
3adfecaa4d | ||
|
|
fb7c9170a4 | ||
|
|
23a6b26f3e | ||
|
|
0d9db37f45 | ||
|
|
41cd0fd8ae | ||
|
|
6ac8f73a52 | ||
|
|
7497b261b3 | ||
|
|
e629526589 | ||
|
|
51d0b7f855 | ||
|
|
fc7197fe5f | ||
|
|
21940a3563 | ||
|
|
14d998b8e6 | ||
|
|
47922d2ee3 | ||
|
|
07ff81a119 | ||
|
|
0adcc64dae | ||
|
|
22f35bd726 | ||
|
|
09f57741fa | ||
|
|
b03634fb7c | ||
|
|
2c1fd2bdd5 | ||
|
|
e0cf26019b | ||
|
|
4e9de7df41 | ||
|
|
3f43199449 | ||
|
|
8c4b2f9f6d | ||
|
|
57d4d0a29e | ||
|
|
e567b862cc | ||
|
|
8db6cd5119 | ||
|
|
a0b312d749 | ||
|
|
4c8429cb15 | ||
|
|
35aa8119f8 | ||
|
|
a4e67a67b4 | ||
|
|
c98c4888e9 | ||
|
|
7a406e4263 | ||
|
|
4df9d76f8a | ||
|
|
0e8cc0d16c | ||
|
|
2ebcd1369e | ||
|
|
b6033cb3d8 | ||
|
|
bf2466d91c | ||
|
|
7f8fcc81c4 | ||
|
|
d9dfd1942c | ||
|
|
b95ad4fcbc | ||
|
|
474e3414c5 | ||
|
|
66b104d26a |
19
.dockerignore
Normal file
@ -0,0 +1,19 @@
|
||||
.idea
|
||||
# Langchain-Chatchat
|
||||
docs
|
||||
.github
|
||||
tests
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
.gitignore
|
||||
.gitmodules
|
||||
README.md
|
||||
README_en.md
|
||||
README_ja.md
|
||||
LICENSE
|
||||
requirements_api.txt
|
||||
requirements_lite.txt
|
||||
requirements_webui.txt
|
||||
# bge-large-zh-v1.5
|
||||
bge-large-zh-v1.5/README.md
|
||||
# chatglm3-6b
|
||||
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Bug 报告 / Bug Report
|
||||
about: 报告项目中的错误或问题 / Report errors or issues in the project
|
||||
title: "[BUG] 简洁阐述问题 / Concise description of the issue"
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**问题描述 / Problem Description**
|
||||
用简洁明了的语言描述这个问题 / Describe the problem in a clear and concise manner.
|
||||
|
||||
**复现问题的步骤 / Steps to Reproduce**
|
||||
1. 执行 '...' / Run '...'
|
||||
2. 点击 '...' / Click '...'
|
||||
3. 滚动到 '...' / Scroll to '...'
|
||||
4. 问题出现 / Problem occurs
|
||||
|
||||
**预期的结果 / Expected Result**
|
||||
描述应该出现的结果 / Describe the expected result.
|
||||
|
||||
**实际结果 / Actual Result**
|
||||
描述实际发生的结果 / Describe the actual result.
|
||||
|
||||
**环境信息 / Environment Information**
|
||||
- langchain-ChatGLM 版本/commit 号:(例如:v2.0.1 或 commit 123456) / langchain-ChatGLM version/commit number: (e.g., v2.0.1 or commit 123456)
|
||||
- 是否使用 Docker 部署(是/否):是 / Is Docker deployment used (yes/no): yes
|
||||
- 使用的模型(ChatGLM2-6B / Qwen-7B 等):ChatGLM-6B / Model used (ChatGLM2-6B / Qwen-7B, etc.): ChatGLM2-6B
|
||||
- 使用的 Embedding 模型(moka-ai/m3e-base 等):moka-ai/m3e-base / Embedding model used (moka-ai/m3e-base, etc.): moka-ai/m3e-base
|
||||
- 使用的向量库类型 (faiss / milvus / pg_vector 等): faiss / Vector library used (faiss, milvus, pg_vector, etc.): faiss
|
||||
- 操作系统及版本 / Operating system and version:
|
||||
- Python 版本 / Python version:
|
||||
- 其他相关环境信息 / Other relevant environment information:
|
||||
|
||||
**附加信息 / Additional Information**
|
||||
添加与问题相关的任何其他信息 / Add any other information related to the issue.
|
||||
23
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
name: 功能请求 / Feature Request
|
||||
about: 为项目提出新功能或建议 / Propose new features or suggestions for the project
|
||||
title: "[FEATURE] 简洁阐述功能 / Concise description of the feature"
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**功能描述 / Feature Description**
|
||||
用简洁明了的语言描述所需的功能 / Describe the desired feature in a clear and concise manner.
|
||||
|
||||
**解决的问题 / Problem Solved**
|
||||
解释此功能如何解决现有问题或改进项目 / Explain how this feature solves existing problems or improves the project.
|
||||
|
||||
**实现建议 / Implementation Suggestions**
|
||||
如果可能,请提供关于如何实现此功能的建议 / If possible, provide suggestions on how to implement this feature.
|
||||
|
||||
**替代方案 / Alternative Solutions**
|
||||
描述您考虑过的替代方案 / Describe alternative solutions you have considered.
|
||||
|
||||
**其他信息 / Additional Information**
|
||||
添加与功能请求相关的任何其他信息 / Add any other information related to the feature request.
|
||||
22
.github/workflows/close-issue.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
name: Close inactive issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 21 * * *"
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
days-before-issue-stale: 30
|
||||
days-before-issue-close: 14
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "这个问题已经被标记为 `stale` ,因为它已经超过 30 天没有任何活动。"
|
||||
close-issue-message: "这个问题已经被自动关闭,因为它被标为 `stale` 后超过 14 天没有任何活动。"
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
155
.github/workflows/docker-build.yaml
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
name: docker-build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- 'README.md'
|
||||
- 'README_en.md'
|
||||
- 'README_ja.md'
|
||||
env:
|
||||
TZ: Asia/Shanghai
|
||||
jobs:
|
||||
docker-build:
|
||||
runs-on: ubuntu-latest
|
||||
# if: github.event.pull_request.merged == true
|
||||
steps:
|
||||
- name: Optimize Disk Space
|
||||
uses: hugoalh/disk-space-optimizer-ghaction@v0.8.0
|
||||
with:
|
||||
operate_sudo: "True"
|
||||
general_include: ".+"
|
||||
general_exclude: |-
|
||||
^GCC$
|
||||
^G\+\+$
|
||||
Clang
|
||||
LLVM
|
||||
docker_include: ".+"
|
||||
docker_prune: "True"
|
||||
docker_clean: "True"
|
||||
apt_prune: "True"
|
||||
apt_clean: "True"
|
||||
homebrew_prune: "True"
|
||||
homebrew_clean: "True"
|
||||
npm_prune: "True"
|
||||
npm_clean: "True"
|
||||
os_swap: "True"
|
||||
- name: Remove Unnecessary Tools And Files
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
sudo apt-get remove -y '^dotnet-.*' '^llvm-.*' 'php.*' azure-cli google-chrome-stable firefox powershell mono-devel
|
||||
sudo apt-get autoremove --purge -y
|
||||
sudo find /var/log -name "*.gz" -type f -delete
|
||||
sudo rm -rf /var/cache/apt/archives
|
||||
sudo rm -rf /tmp/*
|
||||
sudo rm -rf /etc/apt/sources.list.d/* /usr/share/dotnet /usr/local/lib/android /opt/ghc /etc/mysql /etc/php
|
||||
sudo -E apt-get -y purge azure-cli* docker* ghc* zulu* hhvm* llvm* firefox* google* dotnet* aspnetcore* powershell* openjdk* adoptopenjdk* mysql* php* mongodb* moby* snap* || true
|
||||
sudo rm -rf /etc/apt/sources.list.d/* /usr/local/lib/android /opt/ghc /usr/share/dotnet /usr/local/graalvm /usr/local/.ghcup \
|
||||
/usr/local/share/powershell /usr/local/share/chromium /usr/local/lib/node_modules
|
||||
sudo rm -rf /etc/apt/sources.list.d/* /usr/share/dotnet /usr/local/lib/android /opt/ghc /etc/mysql /etc/php
|
||||
sudo -E apt-get -y purge azure-cli* docker* ghc* zulu* hhvm* llvm* firefox* google* dotnet* aspnetcore* powershell* openjdk* adoptopenjdk* mysql* php* mongodb* moby* snap* || true
|
||||
sudo -E apt-get -qq update
|
||||
sudo -E apt-get -qq install libfuse-dev $(curl -fsSL git.io/depends-ubuntu-2204)
|
||||
sudo -E apt-get -qq autoremove --purge
|
||||
sudo -E apt-get -qq clean
|
||||
sudo apt-get clean
|
||||
rm -rf /opt/hostedtoolcache
|
||||
sudo timedatectl set-timezone "$TZ"
|
||||
- name: Free Up Disk Space
|
||||
uses: easimon/maximize-build-space@master
|
||||
with:
|
||||
root-reserve-mb: 62464 # 给 / 预留 61GiB 空间( docker 预留)
|
||||
swap-size-mb: 1
|
||||
remove-dotnet: 'true'
|
||||
remove-android: 'true'
|
||||
remove-haskell: 'true'
|
||||
remove-codeql: 'true'
|
||||
remove-docker-images: 'true'
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Get Latest Release
|
||||
id: get_version
|
||||
run: |
|
||||
VERSION=$(curl --silent "https://api.github.com/repos/${{ github.repository }}/releases/latest" | jq -r .tag_name)
|
||||
echo "RELEASE_VERSION=${VERSION}" >> $GITHUB_ENV
|
||||
- name: Set Image Tag
|
||||
id: imageTag
|
||||
run: echo "::set-output name=image_tag::$RELEASE_VERSION-$(date +%Y%m%d)-$(git rev-parse --short HEAD)"
|
||||
- name: Set Up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set Up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Clone Model
|
||||
run: |
|
||||
sudo mkdir -p $GITHUB_WORKSPACE/bge-large-zh-v1.5
|
||||
cd $GITHUB_WORKSPACE/bge-large-zh-v1.5
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/.gitattributes &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/config.json &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/config_sentence_transformers.json &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/modules.json &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/pytorch_model.bin &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/sentence_bert_config.json &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/special_tokens_map.json &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/tokenizer.json &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/tokenizer_config.json &> /dev/null
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/vocab.txt &> /dev/null
|
||||
sudo mkdir -p $GITHUB_WORKSPACE/bge-large-zh-v1.5/1_Pooling
|
||||
cd $GITHUB_WORKSPACE/bge-large-zh-v1.5/1_Pooling
|
||||
sudo wget https://huggingface.co/BAAI/bge-large-zh-v1.5/resolve/main/1_Pooling/config.json &> /dev/null
|
||||
sudo mkdir -p $GITHUB_WORKSPACE/chatglm3-6b
|
||||
cd $GITHUB_WORKSPACE/chatglm3-6b
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/config.json &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/configuration_chatglm.py &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/model-00001-of-00007.safetensors &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/model-00002-of-00007.safetensors &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/model-00003-of-00007.safetensors &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/model-00004-of-00007.safetensors &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/model-00005-of-00007.safetensors &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/model-00006-of-00007.safetensors &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/model-00007-of-00007.safetensors &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/model.safetensors.index.json &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/modeling_chatglm.py &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/pytorch_model.bin.index.json &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/quantization.py &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/special_tokens_map.json &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/tokenization_chatglm.py &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/tokenizer.model &> /dev/null
|
||||
sudo wget https://huggingface.co/THUDM/chatglm3-6b/resolve/main/tokenizer_config.json &> /dev/null
|
||||
du -sh $GITHUB_WORKSPACE
|
||||
du -sh $GITHUB_WORKSPACE/*
|
||||
du -sh $GITHUB_WORKSPACE/bge-large-zh-v1.5/*
|
||||
du -sh $GITHUB_WORKSPACE/chatglm3-6b/*
|
||||
- name: Show Runner Disk
|
||||
run: df -hT
|
||||
- name: Docker Build
|
||||
run: |
|
||||
docker build -t uswccr.ccs.tencentyun.com/chatchat/chatchat:${{ steps.imageTag.outputs.image_tag }} -f Dockerfile .
|
||||
- name: Show Images Size
|
||||
run: docker images
|
||||
- name: Login To Tencent CCR
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: uswccr.ccs.tencentyun.com
|
||||
username: ${{ secrets.CCR_REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.CCR_REGISTRY_PASSWORD }}
|
||||
- name: Docker Push
|
||||
run: docker push uswccr.ccs.tencentyun.com/chatchat/chatchat:${{ steps.imageTag.outputs.image_tag }}
|
||||
# - name: Login to Docker Hub
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Update README.md
|
||||
run: |
|
||||
sed -i "s|uswccr.ccs.tencentyun.com/chatchat/chatchat:[^ ]*|uswccr.ccs.tencentyun.com/chatchat/chatchat:${{ steps.imageTag.outputs.image_tag }}|g" README.md
|
||||
sed -i "s|uswccr.ccs.tencentyun.com/chatchat/chatchat:[^ ]*|uswccr.ccs.tencentyun.com/chatchat/chatchat:${{ steps.imageTag.outputs.image_tag }}|g" README_en.md
|
||||
sed -i "s|uswccr.ccs.tencentyun.com/chatchat/chatchat:[^ ]*|uswccr.ccs.tencentyun.com/chatchat/chatchat:${{ steps.imageTag.outputs.image_tag }}|g" README_ja.md
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git commit -am "feat:update docker image:tag"
|
||||
- name: Push README.md
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.GH_PAT }}
|
||||
branch: ${{ github.ref }}
|
||||
32
.gitignore
vendored
@ -1,6 +1,17 @@
|
||||
*.log
|
||||
*.log.*
|
||||
*.bak
|
||||
logs
|
||||
/knowledge_base/*
|
||||
!/knowledge_base/samples
|
||||
/knowledge_base/samples/vector_store
|
||||
|
||||
/configs/*.py
|
||||
.vscode/
|
||||
|
||||
# below are standard python ignore files
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*/**/__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
@ -86,21 +97,21 @@ ipython_config.py
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
@ -159,12 +170,11 @@ cython_debug/
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
.pytest_cache
|
||||
.DS_Store
|
||||
|
||||
# Test File
|
||||
test.py
|
||||
configs/*.py
|
||||
|
||||
# Other files
|
||||
output/*
|
||||
log/*
|
||||
.chroma
|
||||
vector_store/*
|
||||
|
||||
llm/*
|
||||
embedding/*
|
||||
3
.gitmodules
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
[submodule "knowledge_base/samples/content/wiki"]
|
||||
path = knowledge_base/samples/content/wiki
|
||||
url = https://github.com/chatchat-space/Langchain-Chatchat.wiki.git
|
||||
@ -1,22 +0,0 @@
|
||||
# 贡献指南
|
||||
|
||||
欢迎!我们是一个非常友好的社区,非常高兴您想要帮助我们让这个应用程序变得更好。但是,请您遵循一些通用准则以保持组织有序。
|
||||
|
||||
1. 确保为您要修复的错误或要添加的功能创建了一个[问题](https://github.com/imClumsyPanda/langchain-ChatGLM/issues),尽可能保持它们小。
|
||||
2. 请使用 `git pull --rebase` 来拉取和衍合上游的更新。
|
||||
3. 将提交合并为格式良好的提交。在提交说明中单独一行提到要解决的问题,如`Fix #<bug>`(有关更多可以使用的关键字,请参见[将拉取请求链接到问题](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))。
|
||||
4. 推送到`dev`。在说明中提到正在解决的问题。
|
||||
|
||||
---
|
||||
|
||||
# Contribution Guide
|
||||
|
||||
Welcome! We're a pretty friendly community, and we're thrilled that you want to help make this app even better. However, we ask that you follow some general guidelines to keep things organized around here.
|
||||
|
||||
1. Make sure an [issue](https://github.com/imClumsyPanda/langchain-ChatGLM/issues) is created for the bug you're about to fix, or feature you're about to add. Keep them as small as possible.
|
||||
|
||||
2. Please use `git pull --rebase` to fetch and merge updates from the upstream.
|
||||
|
||||
3. Rebase commits into well-formatted commits. Mention the issue being resolved in the commit message on a line all by itself like `Fixes #<bug>` (refer to [Linking a pull request to an issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) for more keywords you can use).
|
||||
|
||||
4. Push into `dev`. Mention which bug is being resolved in the description.
|
||||
63
Dockerfile
@ -1,36 +1,27 @@
|
||||
FROM python:3.8
|
||||
|
||||
MAINTAINER "chatGLM"
|
||||
|
||||
COPY agent /chatGLM/agent
|
||||
|
||||
COPY chains /chatGLM/chains
|
||||
|
||||
COPY configs /chatGLM/configs
|
||||
|
||||
COPY content /chatGLM/content
|
||||
|
||||
COPY models /chatGLM/models
|
||||
|
||||
COPY nltk_data /chatGLM/content
|
||||
|
||||
COPY requirements.txt /chatGLM/
|
||||
|
||||
COPY cli_demo.py /chatGLM/
|
||||
|
||||
COPY textsplitter /chatGLM/
|
||||
|
||||
COPY webui.py /chatGLM/
|
||||
|
||||
WORKDIR /chatGLM
|
||||
|
||||
RUN pip install --user torch torchvision tensorboard cython -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
# RUN pip install --user 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
|
||||
|
||||
# RUN pip install --user 'git+https://github.com/facebookresearch/fvcore'
|
||||
# install detectron2
|
||||
# RUN git clone https://github.com/facebookresearch/detectron2
|
||||
|
||||
RUN pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple/ --trusted-host pypi.tuna.tsinghua.edu.cn
|
||||
|
||||
CMD ["python","-u", "webui.py"]
|
||||
# Base Image
|
||||
FROM nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04
|
||||
# Labels
|
||||
LABEL maintainer=chatchat
|
||||
# Environment Variables
|
||||
ENV HOME=/Langchain-Chatchat
|
||||
# Commands
|
||||
WORKDIR /
|
||||
RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
|
||||
echo "Asia/Shanghai" > /etc/timezone && \
|
||||
apt-get update -y && \
|
||||
apt-get install -y --no-install-recommends python3.11 python3-pip curl libgl1 libglib2.0-0 jq && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rm -f /usr/bin/python3 && \
|
||||
ln -s /usr/bin/python3.11 /usr/bin/python3 && \
|
||||
mkdir -p $HOME
|
||||
# Copy the application files
|
||||
COPY . $HOME
|
||||
WORKDIR $HOME
|
||||
# Install dependencies from requirements.txt
|
||||
RUN pip3 install -r requirements.txt -i https://pypi.org/simple && \
|
||||
python3 copy_config_example.py && \
|
||||
sed -i 's|MODEL_ROOT_PATH = ""|MODEL_ROOT_PATH = "/Langchain-Chatchat"|' configs/model_config.py && \
|
||||
python3 init_database.py --recreate-vs
|
||||
EXPOSE 22 7861 8501
|
||||
ENTRYPOINT ["python3", "startup.py", "-a"]
|
||||
303
README.md
@ -1,161 +1,206 @@
|
||||
# 基于本地知识的 ChatGLM 应用实现
|
||||

|
||||
|
||||
🌍 [READ THIS IN ENGLISH](README_en.md)
|
||||
🌍 [日本語で読む](README_ja.md)
|
||||
|
||||
📃 **LangChain-Chatchat** (原 Langchain-ChatGLM)
|
||||
|
||||
基于 ChatGLM 等大语言模型与 Langchain 等应用框架实现,开源、可离线部署的检索增强生成(RAG)大模型知识库项目。
|
||||
|
||||
### ⚠️ 重要提示
|
||||
|
||||
`0.2.10`将会是`0.2.x`系列的最后一个版本,`0.2.x`系列版本将会停止更新和技术支持,全力研发具有更强应用性的 `Langchain-Chatchat 0.3.x`。
|
||||
`0.2.10` 的后续 bug 修复将会直接推送到`master`分支,而不再进行版本更新。
|
||||
|
||||
---
|
||||
|
||||
## 目录
|
||||
|
||||
* [介绍](README.md#介绍)
|
||||
* [解决的痛点](README.md#解决的痛点)
|
||||
* [快速上手](README.md#快速上手)
|
||||
* [1. 环境配置](README.md#1-环境配置)
|
||||
* [2. 模型下载](README.md#2-模型下载)
|
||||
* [3. 初始化知识库和配置文件](README.md#3-初始化知识库和配置文件)
|
||||
* [4. 一键启动](README.md#4-一键启动)
|
||||
* [5. 启动界面示例](README.md#5-启动界面示例)
|
||||
* [联系我们](README.md#联系我们)
|
||||
|
||||
## 介绍
|
||||
|
||||
🌍 [_READ THIS IN ENGLISH_](README_en.md)
|
||||
🤖️ 一种利用 [langchain](https://github.com/langchain-ai/langchain)
|
||||
思想实现的基于本地知识库的问答应用,目标期望建立一套对中文场景与开源模型支持友好、可离线运行的知识库问答解决方案。
|
||||
|
||||
🤖️ 一种利用 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) + [langchain](https://github.com/hwchase17/langchain) 实现的基于本地知识的 ChatGLM 应用。增加 [clue-ai/ChatYuan](https://github.com/clue-ai/ChatYuan) 项目的模型 [ClueAI/ChatYuan-large-v2](https://huggingface.co/ClueAI/ChatYuan-large-v2) 的支持。
|
||||
💡 受 [GanymedeNil](https://github.com/GanymedeNil) 的项目 [document.ai](https://github.com/GanymedeNil/document.ai)
|
||||
和 [AlexZhangji](https://github.com/AlexZhangji)
|
||||
创建的 [ChatGLM-6B Pull Request](https://github.com/THUDM/ChatGLM-6B/pull/216)
|
||||
启发,建立了全流程可使用开源模型实现的本地知识库问答应用。本项目的最新版本中通过使用 [FastChat](https://github.com/lm-sys/FastChat)
|
||||
接入 Vicuna, Alpaca, LLaMA, Koala, RWKV 等模型,依托于 [langchain](https://github.com/langchain-ai/langchain)
|
||||
框架支持通过基于 [FastAPI](https://github.com/tiangolo/fastapi) 提供的 API
|
||||
调用服务,或使用基于 [Streamlit](https://github.com/streamlit/streamlit) 的 WebUI 进行操作。
|
||||
|
||||
💡 受 [GanymedeNil](https://github.com/GanymedeNil) 的项目 [document.ai](https://github.com/GanymedeNil/document.ai) 和 [AlexZhangji](https://github.com/AlexZhangji) 创建的 [ChatGLM-6B Pull Request](https://github.com/THUDM/ChatGLM-6B/pull/216) 启发,建立了全部基于开源模型实现的本地知识问答应用。
|
||||
✅ 依托于本项目支持的开源 LLM 与 Embedding 模型,本项目可实现全部使用**开源**模型**离线私有部署**。与此同时,本项目也支持
|
||||
OpenAI GPT API 的调用,并将在后续持续扩充对各类模型及模型 API 的接入。
|
||||
|
||||
✅ 本项目中 Embedding 默认选用的是 [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese/tree/main),LLM 默认选用的是 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B)。依托上述模型,本项目可实现全部使用**开源**模型**离线私有部署**。
|
||||
⛓️ 本项目实现原理如下图所示,过程包括加载文件 -> 读取文本 -> 文本分割 -> 文本向量化 -> 问句向量化 ->
|
||||
在文本向量中匹配出与问句向量最相似的 `top k`个 -> 匹配出的文本作为上下文和问题一起添加到 `prompt`中 -> 提交给 `LLM`生成回答。
|
||||
|
||||
⛓️ 本项目实现原理如下图所示,过程包括加载文件 -> 读取文本 -> 文本分割 -> 文本向量化 -> 问句向量化 -> 在文本向量中匹配出与问句向量最相似的`top k`个 -> 匹配出的文本作为上下文和问题一起添加到`prompt`中 -> 提交给`LLM`生成回答。
|
||||
📺 [原理介绍视频](https://www.bilibili.com/video/BV13M4y1e7cN/?share_source=copy_web&vd_source=e6c5aafe684f30fbe41925d61ca6d514)
|
||||
|
||||

|
||||
|
||||
从文档处理角度来看,实现流程如下:
|
||||
|
||||

|
||||
|
||||
🚩 本项目未涉及微调、训练过程,但可利用微调或训练对本项目效果进行优化。
|
||||
|
||||
📓 [ModelWhale 在线运行项目](https://www.heywhale.com/mw/project/643977aa446c45f4592a1e59)
|
||||
🌐 [AutoDL 镜像](https://www.codewithgpu.com/i/chatchat-space/Langchain-Chatchat/Langchain-Chatchat) 中 `0.2.10`
|
||||
|
||||
## 变更日志
|
||||
版本所使用代码已更新至本项目 `v0.2.10` 版本。
|
||||
|
||||
参见 [变更日志](docs/CHANGELOG.md)。
|
||||
🐳 [Docker 镜像](isafetech/chatchat:0.2.10) 已经更新到 ```0.2.10``` 版本。
|
||||
|
||||
## 硬件需求
|
||||
|
||||
- ChatGLM-6B 模型硬件需求
|
||||
|
||||
| **量化等级** | **最低 GPU 显存**(推理) | **最低 GPU 显存**(高效参数微调) |
|
||||
| -------------- | ------------------------- | --------------------------------- |
|
||||
| FP16(无量化) | 13 GB | 14 GB |
|
||||
| INT8 | 8 GB | 9 GB |
|
||||
| INT4 | 6 GB | 7 GB |
|
||||
|
||||
- Embedding 模型硬件需求
|
||||
|
||||
本项目中默认选用的 Embedding 模型 [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese/tree/main) 约占用显存 3GB,也可修改为在 CPU 中运行。
|
||||
|
||||
## Docker 部署
|
||||
|
||||
```commandline
|
||||
$ docker build -t chatglm:v1.0 .
|
||||
|
||||
$ docker run -d --restart=always --name chatglm -p 7860:7860 -v /www/wwwroot/code/langchain-ChatGLM:/chatGLM chatglm
|
||||
```
|
||||
|
||||
## 开发部署
|
||||
|
||||
### 软件需求
|
||||
|
||||
本项目已在 Python 3.8 - 3.10,CUDA 11.7 环境下完成测试。已在 Windows、ARM 架构的 macOS、Linux 系统中完成测试。
|
||||
|
||||
### 从本地加载模型
|
||||
|
||||
请参考 [THUDM/ChatGLM-6B#从本地加载模型](https://github.com/THUDM/ChatGLM-6B#从本地加载模型)
|
||||
|
||||
### 1. 安装环境
|
||||
|
||||
参见 [安装指南](docs/INSTALL.md)。
|
||||
|
||||
### 2. 设置模型默认参数
|
||||
|
||||
在开始执行 Web UI 或命令行交互前,请先检查 [configs/model_config.py](configs/model_config.py) 中的各项模型参数设计是否符合需求。
|
||||
|
||||
### 3. 执行脚本体验 Web UI 或命令行交互
|
||||
|
||||
> 注:鉴于环境部署过程中可能遇到问题,建议首先测试命令行脚本。建议命令行脚本测试可正常运行后再运行 Web UI。
|
||||
|
||||
执行 [cli_demo.py](cli_demo.py) 脚本体验**命令行交互**:
|
||||
```shell
|
||||
$ python cli_demo.py
|
||||
```
|
||||
|
||||
或执行 [webui.py](webui.py) 脚本体验 **Web 交互**
|
||||
🌲 本次更新后同时支持DockerHub、阿里云、腾讯云镜像源:
|
||||
|
||||
```shell
|
||||
$ python webui.py
|
||||
docker run -d --gpus all -p 80:8501 isafetech/chatchat:0.2.10
|
||||
docker run -d --gpus all -p 80:8501 uswccr.ccs.tencentyun.com/chatchat/chatchat:0.2.10
|
||||
docker run -d --gpus all -p 80:8501 registry.cn-beijing.aliyuncs.com/chatchat/chatchat:0.2.10
|
||||
```
|
||||
|
||||
注:如未将模型下载至本地,请执行前检查`$HOME/.cache/huggingface/`文件夹剩余空间,至少15G。
|
||||
🧩 本项目有一个非常完整的[Wiki](https://github.com/chatchat-space/Langchain-Chatchat/wiki/) , README只是一个简单的介绍,_
|
||||
_仅仅是入门教程,能够基础运行__。
|
||||
如果你想要更深入的了解本项目,或者想对本项目做出贡献。请移步 [Wiki](https://github.com/chatchat-space/Langchain-Chatchat/wiki/)
|
||||
界面
|
||||
|
||||
执行后效果如下图所示:
|
||||

|
||||
Web UI 可以实现如下功能:
|
||||
## 解决的痛点
|
||||
|
||||
1. 运行前自动读取`configs/model_config.py`中`LLM`及`Embedding`模型枚举及默认模型设置运行模型,如需重新加载模型,可在 `模型配置` 标签页重新选择后点击 `重新加载模型` 进行模型加载;
|
||||
2. 可手动调节保留对话历史长度、匹配知识库文段数量,可根据显存大小自行调节;
|
||||
3. 具备模式选择功能,可选择 `LLM对话` 与 `知识库问答` 模式进行对话,支持流式对话;
|
||||
4. 添加 `配置知识库` 功能,支持选择已有知识库或新建知识库,并可向知识库中**新增**上传文件/文件夹,使用文件上传组件选择好文件后点击 `上传文件并加载知识库`,会将所选上传文档数据加载至知识库中,并基于更新后知识库进行问答;
|
||||
5. 后续版本中将会增加对知识库的修改或删除,及知识库中已导入文件的查看。
|
||||
该项目是一个可以实现 __完全本地化__推理的知识库增强方案, 重点解决数据安全保护,私域化部署的企业痛点。
|
||||
本开源方案采用```Apache License```,可以免费商用,无需付费。
|
||||
|
||||
### 常见问题
|
||||
我们支持市面上主流的本地大语言模型和Embedding模型,支持开源的本地向量数据库。
|
||||
支持列表详见[Wiki](https://github.com/chatchat-space/Langchain-Chatchat/wiki/)
|
||||
|
||||
参见 [常见问题](docs/FAQ.md)。
|
||||
## 快速上手
|
||||
|
||||
## Demo
|
||||
### 1. 环境配置
|
||||
|
||||
以问题`chatglm-6b 的局限性具体体现在哪里,如何实现改进`为例:
|
||||
+ 首先,确保你的机器安装了 Python 3.8 - 3.11 (我们强烈推荐使用 Python3.11)。
|
||||
|
||||
未使用 langchain 接入本地文档时:
|
||||
```
|
||||
$ python --version
|
||||
Python 3.11.7
|
||||
```
|
||||
|
||||
>ChatGLM-6B 是一个基于 GPT-3 的聊天机器人,它的局限性主要体现在以下几个方面:
|
||||
>1. 语言模型的精度:GPT-3 是一种大型的语言模型,它的精度比较高,但是仍然有一些局限性。例如,它可能无法准确地理解某些方言、口音或者复杂的句子结构。
|
||||
>2. 知识图谱的完整性:GPT-3 可以生成一些知识图谱,但是其完整性和准确性可能不够。例如,它可能无法准确地理解某些领域的专业知识和术语。
|
||||
>3. 对话生成的连贯性:GPT-3 可以生成一些连贯的对话,但是其生成的对话可能不够自然。例如,它可能会生成一些虚假的、不连贯的对话或者误解用户的意图。
|
||||
>
|
||||
>为了改进 ChatGLM-6B,可以考虑以下几个方面:
|
||||
>1. 提高语言模型的精度:可以通过增加训练数据、使用更好的语言模型架构、优化模型训练过程等方式来提高语言模型的精度。
|
||||
>2. 增强知识图谱的完整性:可以通过增加知识图谱的维度、使用更好的知识图谱构建方法、优化知识图谱的匹配算法等方式来增强知识图谱的完整性。
|
||||
>3. 提高对话生成的连贯性:可以通过增加对话生成模型的上下文信息、使用更好的上下文信息提取方法、优化对话生成模型的算法等方式来提高对话生成的连贯性。
|
||||
>4. 引入更多的人类反馈:人类反馈可以帮助 ChatGLM-6B 更好地理解用户的意图和需求,从而提高对话生成的质量和准确性。可以通过引入更多的人类客服、使用人类反馈收集器等方式来引入更多的人类反馈。
|
||||
接着,创建一个虚拟环境,并在虚拟环境内安装项目的依赖
|
||||
|
||||
使用 langchain 接入 ChatGLM-6B 项目的 README.md 文件后 ChatGLM 的回答:
|
||||
>ChatGLM-6B 是一个基于深度学习的自然语言处理模型,它在回答问题方面表现出色。但是,它也有一些局限性,具体如下:
|
||||
>1. 语言理解有限:ChatGLM-6B 只学习了中文自然语言,对于其他语言可能会存在理解上的局限性。
|
||||
>2. 知识储备不足:ChatGLM-6B 的训练数据集只包含了中文语料,因此它可能无法回答一些非中文的问题或者某些特定领域的问题。
|
||||
>3. 数据量有限:ChatGLM-6B 的训练数据集只有几百万条记录,因此它可能无法回答一些非常具体或者复杂的问题。
|
||||
>
|
||||
>为了改进 ChatGLM-6B,可以考虑以下几个方面:
|
||||
>1. 学习更多的语言知识:可以学习其他语言的自然语言处理技术,扩大语言理解的范围。
|
||||
>2. 扩大知识储备:可以收集更多的中文语料,或者使用其他语言的数据集来扩充知识储备。
|
||||
>3. 增加数据量:可以使用更大的数据集来训练 ChatGLM-6B,提高模型的表现。
|
||||
>4. 引入更多的评估指标:可以引入更多的评估指标来评估模型的表现,从而发现 ChatGLM-6B 存在的不足和局限性。
|
||||
>5. 改进模型架构:可以改进 ChatGLM-6B 的模型架构,提高模型的性能和表现。例如,可以使用更大的神经网络或者改进的卷积神经网络结构。
|
||||
```shell
|
||||
|
||||
## 路线图
|
||||
# 拉取仓库
|
||||
$ git clone https://github.com/chatchat-space/Langchain-Chatchat.git
|
||||
|
||||
- [ ] Langchain 应用
|
||||
- [x] 接入非结构化文档(已支持 md、pdf、docx、txt 文件格式)
|
||||
- [ ] 搜索引擎与本地网页接入
|
||||
- [ ] 结构化数据接入(如 csv、Excel、SQL 等)
|
||||
- [ ] 知识图谱/图数据库接入
|
||||
- [ ] Agent 实现
|
||||
- [ ] 增加更多 LLM 模型支持
|
||||
- [x] [THUDM/chatglm-6b](https://huggingface.co/THUDM/chatglm-6b)
|
||||
- [x] [THUDM/chatglm-6b-int8](https://huggingface.co/THUDM/chatglm-6b-int8)
|
||||
- [x] [THUDM/chatglm-6b-int4](https://huggingface.co/THUDM/chatglm-6b-int4)
|
||||
- [x] [THUDM/chatglm-6b-int4-qe](https://huggingface.co/THUDM/chatglm-6b-int4-qe)
|
||||
- [x] [ClueAI/ChatYuan-large-v2](https://huggingface.co/ClueAI/ChatYuan-large-v2)
|
||||
- [ ] 增加更多 Embedding 模型支持
|
||||
- [x] [nghuyong/ernie-3.0-nano-zh](https://huggingface.co/nghuyong/ernie-3.0-nano-zh)
|
||||
- [x] [nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh)
|
||||
- [x] [shibing624/text2vec-base-chinese](https://huggingface.co/shibing624/text2vec-base-chinese)
|
||||
- [x] [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese)
|
||||
- [ ] Web UI
|
||||
- [x] 利用 gradio 实现 Web UI DEMO
|
||||
- [x] 添加输出内容及错误提示
|
||||
- [x] 引用标注
|
||||
- [ ] 增加知识库管理
|
||||
- [x] 选择知识库开始问答
|
||||
- [x] 上传文件/文件夹至知识库
|
||||
- [ ] 删除知识库中文件
|
||||
- [ ] 利用 streamlit 实现 Web UI Demo
|
||||
- [ ] 增加 API 支持
|
||||
- [ ] 利用 fastapi 实现 API 部署方式
|
||||
- [ ] 实现调用 API 的 Web UI Demo
|
||||
# 进入目录
|
||||
$ cd Langchain-Chatchat
|
||||
|
||||
## 项目交流群
|
||||

|
||||
# 安装全部依赖
|
||||
$ pip install -r requirements.txt
|
||||
$ pip install -r requirements_api.txt
|
||||
$ pip install -r requirements_webui.txt
|
||||
|
||||
🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
|
||||
# 默认依赖包括基本运行环境(FAISS向量库)。如果要使用 milvus/pg_vector 等向量库,请将 requirements.txt 中相应依赖取消注释再安装。
|
||||
```
|
||||
|
||||
请注意,LangChain-Chatchat `0.2.x` 系列是针对 Langchain `0.0.x` 系列版本的,如果你使用的是 Langchain `0.1.x`
|
||||
系列版本,需要降级您的`Langchain`版本。
|
||||
|
||||
### 2. 模型下载
|
||||
|
||||
如需在本地或离线环境下运行本项目,需要首先将项目所需的模型下载至本地,通常开源 LLM 与 Embedding
|
||||
模型可以从 [HuggingFace](https://huggingface.co/models) 下载。
|
||||
|
||||
以本项目中默认使用的 LLM 模型 [THUDM/ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b) 与 Embedding
|
||||
模型 [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) 为例:
|
||||
|
||||
下载模型需要先[安装 Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage)
|
||||
,然后运行
|
||||
|
||||
```Shell
|
||||
$ git lfs install
|
||||
$ git clone https://huggingface.co/THUDM/chatglm3-6b
|
||||
$ git clone https://huggingface.co/BAAI/bge-large-zh
|
||||
```
|
||||
|
||||
### 3. 初始化知识库和配置文件
|
||||
|
||||
按照下列方式初始化自己的知识库和简单的复制配置文件
|
||||
|
||||
```shell
|
||||
$ python copy_config_example.py
|
||||
$ python init_database.py --recreate-vs
|
||||
```
|
||||
|
||||
### 4. 一键启动
|
||||
|
||||
按照以下命令启动项目
|
||||
|
||||
```shell
|
||||
$ python startup.py -a
|
||||
```
|
||||
|
||||
### 5. 启动界面示例
|
||||
|
||||
如果正常启动,你将能看到以下界面
|
||||
|
||||
1. FastAPI Docs 界面
|
||||
|
||||

|
||||
|
||||
2. Web UI 启动界面示例:
|
||||
|
||||
- Web UI 对话界面:
|
||||
|
||||

|
||||
|
||||
- Web UI 知识库管理页面:
|
||||
|
||||

|
||||
|
||||
### 注意
|
||||
|
||||
以上方式只是为了快速上手,如果需要更多的功能和自定义启动方式
|
||||
,请参考[Wiki](https://github.com/chatchat-space/Langchain-Chatchat/wiki/)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 项目里程碑
|
||||
|
||||
+ `2023年4月`: `Langchain-ChatGLM 0.1.0` 发布,支持基于 ChatGLM-6B 模型的本地知识库问答。
|
||||
+ `2023年8月`: `Langchain-ChatGLM` 改名为 `Langchain-Chatchat`,`0.2.0` 发布,使用 `fastchat` 作为模型加载方案,支持更多的模型和数据库。
|
||||
+ `2023年10月`: `Langchain-Chatchat 0.2.5` 发布,推出 Agent 内容,开源项目在`Founder Park & Zhipu AI & Zilliz`
|
||||
举办的黑客马拉松获得三等奖。
|
||||
+ `2023年12月`: `Langchain-Chatchat` 开源项目获得超过 **20K** stars.
|
||||
+ `2024年1月`: `LangChain 0.1.x` 推出,`Langchain-Chatchat 0.2.x` 发布稳定版本`0.2.10`
|
||||
后将停止更新和技术支持,全力研发具有更强应用性的 `Langchain-Chatchat 0.3.x`。
|
||||
|
||||
+ 🔥 让我们一起期待未来 Chatchat 的故事 ···
|
||||
|
||||
---
|
||||
|
||||
## 联系我们
|
||||
|
||||
### Telegram
|
||||
|
||||
[](https://t.me/+RjliQ3jnJ1YyN2E9)
|
||||
|
||||
### 项目交流群
|
||||
<img src="img/qr_code_108.jpg" alt="二维码" width="300" />
|
||||
|
||||
🎉 Langchain-Chatchat 项目微信交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
|
||||
|
||||
### 公众号
|
||||
|
||||
<img src="img/official_wechat_mp_account.png" alt="二维码" width="300" />
|
||||
|
||||
🎉 Langchain-Chatchat 项目官方公众号,欢迎扫码关注。
|
||||
|
||||
410
README_en.md
@ -1,219 +1,207 @@
|
||||
# ChatGLM Application with Local Knowledge Implementation
|
||||

|
||||
|
||||
🌍 [中文文档](README.md)
|
||||
🌍 [日本語で読む](README_ja.md)
|
||||
|
||||
📃 **LangChain-Chatchat** (formerly Langchain-ChatGLM):
|
||||
|
||||
A LLM application aims to implement knowledge and search engine based QA based on Langchain and open-source or remote
|
||||
LLM API.
|
||||
|
||||
⚠️`0.2.10` will be the last version of the `0.2.x` series. The `0.2.x` series will stop updating and technical support,
|
||||
and strive to develop `Langchain-Chachat 0.3.x` with stronger applicability.
|
||||
Subsequent bug fixes for `0.2.10` will be pushed directly to the `master` branch without version updates.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Introduction](README.md#Introduction)
|
||||
- [Pain Points Addressed](README.md#Pain-Points-Addressed)
|
||||
- [Quick Start](README.md#Quick-Start)
|
||||
- [1. Environment Setup](README.md#1-Environment-Setup)
|
||||
- [2. Model Download](README.md#2-Model-Download)
|
||||
- [3. Initialize Knowledge Base and Configuration Files](README.md#3-Initialize-Knowledge-Base-and-Configuration-Files)
|
||||
- [4. One-Click Startup](README.md#4-One-Click-Startup)
|
||||
- [5. Startup Interface Examples](README.md#5-Startup-Interface-Examples)
|
||||
- [Contact Us](README.md#Contact-Us)
|
||||
|
||||
## Introduction
|
||||
|
||||
🤖️ A Q&A application based on local knowledge base implemented using the idea
|
||||
of [langchain](https://github.com/langchain-ai/langchain). The goal is to build a KBQA(Knowledge based Q&A) solution
|
||||
that
|
||||
is friendly to Chinese scenarios and open source models and can run both offline and online.
|
||||
|
||||
💡 Inspired by [document.ai](https://github.com/GanymedeNil/document.ai)
|
||||
and [ChatGLM-6B Pull Request](https://github.com/THUDM/ChatGLM-6B/pull/216) , we build a local knowledge base question
|
||||
answering application that can be implemented using an open source model or remote LLM api throughout the process. In
|
||||
the latest version of this project, [FastChat](https://github.com/lm-sys/FastChat) is used to access Vicuna, Alpaca,
|
||||
LLaMA, Koala, RWKV and many other models. Relying on [langchain](https://github.com/langchain-ai/langchain) , this
|
||||
project supports calling services through the API provided based on [FastAPI](https://github.com/tiangolo/fastapi), or
|
||||
using the WebUI based on [Streamlit](https://github.com/streamlit/streamlit).
|
||||
|
||||
✅ Relying on the open source LLM and Embedding models, this project can realize full-process **offline private
|
||||
deployment**. At the same time, this project also supports the call of OpenAI GPT API- and Zhipu API, and will continue
|
||||
to expand the access to various models and remote APIs in the future.
|
||||
|
||||
⛓️ The implementation principle of this project is shown in the graph below. The main process includes: loading files ->
|
||||
reading text -> text segmentation -> text vectorization -> question vectorization -> matching the `top-k` most similar
|
||||
to the question vector in the text vector -> The matched text is added to `prompt `as context and question -> submitte
|
||||
to `LLM` to generate an answer.
|
||||
|
||||
📺[video introduction](https://www.bilibili.com/video/BV13M4y1e7cN/?share_source=copy_web&vd_source=e6c5aafe684f30fbe41925d61ca6d514)
|
||||
|
||||

|
||||
|
||||
The main process analysis from the aspect of document process:
|
||||
|
||||

|
||||
|
||||
🚩 The training or fine-tuning are not involved in the project, but still, one always can improve performance by do
|
||||
these.
|
||||
|
||||
🌐 [AutoDL image](https://www.codewithgpu.com/i/chatchat-space/Langchain-Chatchat/Langchain-Chatchat) is supported, and in `0.2.10` the codes are update to v0.2.10.
|
||||
|
||||
🐳 [Docker image](isafetech/chatchat:0.2.10) is supported to ```0.2.10```.
|
||||
|
||||
🌲 The latest update also provides support for image sources from DockerHub, Ali Cloud, and Tencent Cloud:
|
||||
|
||||
```shell
|
||||
docker run -d --gpus all -p 80:8501 isafetech/chatchat:0.2.10
|
||||
docker run -d --gpus all -p 80:8501 uswccr.ccs.tencentyun.com/chatchat/chatchat:0.2.10
|
||||
docker run -d --gpus all -p 80:8501 registry.cn-beijing.aliyuncs.com/chatchat/chatchat:0.2.10
|
||||
```
|
||||
|
||||
## Pain Points Addressed
|
||||
|
||||
This project is a solution for enhancing knowledge bases with fully localized inference, specifically addressing the
|
||||
pain points of data security and private deployments for businesses.
|
||||
This open-source solution is under the Apache License and can be used for commercial purposes for free, with no fees
|
||||
required.
|
||||
We support mainstream local large prophecy models and Embedding models available in the market, as well as open-source
|
||||
local vector databases. For a detailed list of supported models and databases, please refer to
|
||||
our [Wiki](https://github.com/chatchat-space/Langchain-Chatchat/wiki/)
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Environment Setup
|
||||
|
||||
First, make sure your machine has Python 3.10 installed.
|
||||
|
||||
```
|
||||
$ python --version
|
||||
Python 3.10.12
|
||||
```
|
||||
|
||||
Then, create a virtual environment and install the project's dependencies within the virtual environment.
|
||||
|
||||
```shell
|
||||
|
||||
# 拉取仓库
|
||||
$ git clone https://github.com/chatchat-space/Langchain-Chatchat.git
|
||||
|
||||
# 进入目录
|
||||
$ cd Langchain-Chatchat
|
||||
|
||||
# 安装全部依赖
|
||||
$ pip install -r requirements.txt
|
||||
$ pip install -r requirements_api.txt
|
||||
$ pip install -r requirements_webui.txt
|
||||
|
||||
# 默认依赖包括基本运行环境(FAISS向量库)。如果要使用 milvus/pg_vector 等向量库,请将 requirements.txt 中相应依赖取消注释再安装。
|
||||
```
|
||||
|
||||
Please note that the LangChain-Chachat `0.2.x` series is for the Langchain `0.0.x` series version. If you are using the
|
||||
Langchain `0.1.x` series version, you need to downgrade.
|
||||
|
||||
### Model Download
|
||||
|
||||
If you need to run this project locally or in an offline environment, you must first download the required models for
|
||||
the project. Typically, open-source LLM and Embedding models can be downloaded from HuggingFace.
|
||||
|
||||
Taking the default LLM model used in this project, [THUDM/chatglm2-6b](https://huggingface.co/THUDM/chatglm2-6b), and
|
||||
the Embedding model [moka-ai/m3e-base](https://huggingface.co/moka-ai/m3e-base) as examples:
|
||||
|
||||
To download the models, you need to first
|
||||
install [Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage)
|
||||
and then run:
|
||||
|
||||
```Shell
|
||||
$ git lfs install
|
||||
$ git clone https://huggingface.co/THUDM/chatglm2-6b
|
||||
$ git clone https://huggingface.co/moka-ai/m3e-base
|
||||
```
|
||||
|
||||
### Initializing the Knowledge Base and Config File
|
||||
|
||||
Follow the steps below to initialize your own knowledge base and config file:
|
||||
|
||||
```shell
|
||||
$ python copy_config_example.py
|
||||
$ python init_database.py --recreate-vs
|
||||
```
|
||||
|
||||
### One-Click Launch
|
||||
|
||||
To start the project, run the following command:
|
||||
|
||||
```shell
|
||||
$ python startup.py -a
|
||||
```
|
||||
|
||||
### Example of Launch Interface
|
||||
|
||||
1. FastAPI docs interface
|
||||
|
||||

|
||||
|
||||
2. webui page
|
||||
|
||||
- Web UI dialog page:
|
||||
|
||||

|
||||
|
||||
- Web UI knowledge base management page:
|
||||
|
||||

|
||||
|
||||
### Note
|
||||
|
||||
The above instructions are provided for a quick start. If you need more features or want to customize the launch method,
|
||||
please refer to the [Wiki](https://github.com/chatchat-space/Langchain-Chatchat/wiki/).
|
||||
|
||||
---
|
||||
|
||||
## Project Milestones
|
||||
|
||||
+ `April 2023`: `Langchain-ChatGLM 0.1.0` released, supporting local knowledge base question and answer based on the
|
||||
ChatGLM-6B model.
|
||||
+ `August 2023`: `Langchain-ChatGLM` was renamed to `Langchain-Chatchat`, `0.2.0` was released, using `fastchat` as the
|
||||
model loading solution, supporting more models and databases.
|
||||
+ `October 2023`: `Langchain-Chachat 0.2.5` was released, Agent content was launched, and the open source project won
|
||||
the third prize in the hackathon held by `Founder Park & Zhipu AI & Zilliz`.
|
||||
+ `December 2023`: `Langchain-Chachat` open source project received more than **20K** stars.
|
||||
+ `January 2024`: `LangChain 0.1.x` is launched, `Langchain-Chachat 0.2.x` is released. After the stable
|
||||
version `0.2.10` is released, updates and technical support will be stopped, and all efforts will be made to
|
||||
develop `Langchain with stronger applicability -Chat 0.3.x`.
|
||||
|
||||
|
||||
+ 🔥 Let’s look forward to the future Chatchat stories together···
|
||||
|
||||
---
|
||||
|
||||
## Contact Us
|
||||
|
||||
### Telegram
|
||||
|
||||
[](https://t.me/+RjliQ3jnJ1YyN2E9)
|
||||
|
||||
🌍 [_中文文档_](README.md)
|
||||
### WeChat Group
|
||||
|
||||
🤖️ This is a ChatGLM application based on local knowledge, implemented using [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) and [langchain](https://github.com/hwchase17/langchain).
|
||||
<img src="img/qr_code_90.jpg" alt="二维码" width="300" height="300" />
|
||||
|
||||
💡 Inspired by [document.ai](https://github.com/GanymedeNil/document.ai) and [Alex Zhangji](https://github.com/AlexZhangji)'s [ChatGLM-6B Pull Request](https://github.com/THUDM/ChatGLM-6B/pull/216), this project establishes a local knowledge question-answering application using open-source models.
|
||||
### WeChat Official Account
|
||||
|
||||
✅ The embeddings used in this project are [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese/tree/main), and the LLM is [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B). Relying on these models, this project enables the use of **open-source** models for **offline private deployment**.
|
||||
|
||||
⛓️ The implementation principle of this project is illustrated in the figure below. The process includes loading files -> reading text -> text segmentation -> text vectorization -> question vectorization -> matching the top k most similar text vectors to the question vector -> adding the matched text to `prompt` along with the question as context -> submitting to `LLM` to generate an answer.
|
||||
|
||||

|
||||
|
||||
🚩 This project does not involve fine-tuning or training; however, fine-tuning or training can be employed to optimize the effectiveness of this project.
|
||||
|
||||
📓 [ModelWhale online notebook](https://www.heywhale.com/mw/project/643977aa446c45f4592a1e59)
|
||||
|
||||
## Changelog
|
||||
|
||||
**[2023/04/15]**
|
||||
|
||||
1. refactor the project structure to keep the command line demo [cli_demo.py](cli_demo.py) and the Web UI demo [webui.py](webui.py) in the root directory.
|
||||
2. Improve the Web UI by modifying it to first load the model according to the default option of [configs/model_config.py](configs/model_config.py) after running the Web UI, and adding error messages, etc.
|
||||
3. Update FAQ.
|
||||
|
||||
**[2023/04/12]**
|
||||
|
||||
1. Replaced the sample files in the Web UI to avoid issues with unreadable files due to encoding problems in Ubuntu;
|
||||
2. Replaced the prompt template in `knowledge_based_chatglm.py` to prevent confusion in the content returned by ChatGLM, which may arise from the prompt template containing Chinese and English bilingual text.
|
||||
|
||||
**[2023/04/11]**
|
||||
|
||||
1. Added Web UI V0.1 version (thanks to [@liangtongt](https://github.com/liangtongt));
|
||||
2. Added Frequently Asked Questions in `README.md` (thanks to [@calcitem](https://github.com/calcitem) and [@bolongliu](https://github.com/bolongliu));
|
||||
3. Enhanced automatic detection for the availability of `cuda`, `mps`, and `cpu` for LLM and Embedding model running devices;
|
||||
4. Added a check for `filepath` in `knowledge_based_chatglm.py`. In addition to supporting single file import, it now supports a single folder path as input. After input, it will traverse each file in the folder and display a command-line message indicating the success of each file load.
|
||||
|
||||
5. **[2023/04/09]**
|
||||
|
||||
1. Replaced the previously selected `ChatVectorDBChain` with `RetrievalQA` in `langchain`, effectively reducing the issue of stopping due to insufficient video memory after asking 2-3 times;
|
||||
2. Added `EMBEDDING_MODEL`, `VECTOR_SEARCH_TOP_K`, `LLM_MODEL`, `LLM_HISTORY_LEN`, `REPLY_WITH_SOURCE` parameter value settings in `knowledge_based_chatglm.py`;
|
||||
3. Added `chatglm-6b-int4` and `chatglm-6b-int4-qe`, which require less GPU memory, as LLM model options;
|
||||
4. Corrected code errors in `README.md` (thanks to [@calcitem](https://github.com/calcitem)).
|
||||
|
||||
**[2023/04/07]**
|
||||
|
||||
1. Resolved the issue of doubled video memory usage when loading the ChatGLM model (thanks to [@suc16](https://github.com/suc16) and [@myml](https://github.com/myml));
|
||||
2. Added a mechanism to clear video memory;
|
||||
3. Added `nghuyong/ernie-3.0-nano-zh` and `nghuyong/ernie-3.0-base-zh` as Embedding model options, which consume less video memory resources than `GanymedeNil/text2vec-large-chinese` (thanks to [@lastrei](https://github.com/lastrei)).
|
||||
|
||||
## How to Use
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
- ChatGLM-6B Model Hardware Requirements
|
||||
|
||||
| **Quantization Level** | **Minimum GPU Memory** (inference) | **Minimum GPU Memory** (efficient parameter fine-tuning) |
|
||||
| -------------- | ------------------------- | --------------------------------- |
|
||||
| FP16 (no quantization) | 13 GB | 14 GB |
|
||||
| INT8 | 8 GB | 9 GB |
|
||||
| INT4 | 6 GB | 7 GB |
|
||||
|
||||
- Embedding Model Hardware Requirements
|
||||
|
||||
The default Embedding model [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese/tree/main) in this project occupies around 3GB of video memory and can also be configured to run on a CPU.
|
||||
### Software Requirements
|
||||
|
||||
This repository has been tested with Python 3.8 and CUDA 11.7 environments.
|
||||
|
||||
### 1. Setting up the environment
|
||||
|
||||
* Environment check
|
||||
|
||||
```shell
|
||||
# First, make sure your machine has Python 3.8 or higher installed
|
||||
$ python --version
|
||||
Python 3.8.13
|
||||
|
||||
# If your version is lower, you can use conda to install the environment
|
||||
$ conda create -p /your_path/env_name python=3.8
|
||||
|
||||
# Activate the environment
|
||||
$ source activate /your_path/env_name
|
||||
|
||||
# Deactivate the environment
|
||||
$ source deactivate /your_path/env_name
|
||||
|
||||
# Remove the environment
|
||||
$ conda env remove -p /your_path/env_name
|
||||
```
|
||||
|
||||
* Project dependencies
|
||||
|
||||
```shell
|
||||
|
||||
# Clone the repository
|
||||
$ git clone https://github.com/imClumsyPanda/langchain-ChatGLM.git
|
||||
|
||||
# Install dependencies
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Note: When using langchain.document_loaders.UnstructuredFileLoader for unstructured file integration, you may need to install other dependency packages according to the documentation. Please refer to [langchain documentation](https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/unstructured_file.html).
|
||||
|
||||
### 2. Run Scripts to Experience Web UI or Command Line Interaction
|
||||
|
||||
Execute [webui.py](webui.py) script to experience **Web interaction** <img src="https://img.shields.io/badge/Version-0.1-brightgreen">
|
||||
```commandline
|
||||
python webui.py
|
||||
```
|
||||
Note: Before executing, check the remaining space in the `$HOME/.cache/huggingface/` folder, at least 15G.
|
||||
|
||||
The resulting interface is shown below:
|
||||

|
||||
The Web UI supports the following features:
|
||||
|
||||
1. Automatically reads the `LLM` and `embedding` model enumerations in `configs/model_config.py`, allowing you to select and reload the model by clicking `重新加载模型`.
|
||||
2. The length of retained dialogue history can be manually adjusted according to the available video memory.
|
||||
3. Adds a file upload function. Select the uploaded file through the drop-down box, click `加载文件` to load the file, and change the loaded file at any time during the process.
|
||||
|
||||
Alternatively, execute the [knowledge_based_chatglm.py](https://chat.openai.com/chat/cli_demo.py) script to experience **command line interaction**:
|
||||
|
||||
```commandline
|
||||
python knowledge_based_chatglm.py
|
||||
```
|
||||
|
||||
### FAQ
|
||||
|
||||
Q1: What file formats does this project support?
|
||||
|
||||
A1: Currently, this project has been tested with txt, docx, and md file formats. For more file formats, please refer to the [langchain documentation](https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/unstructured_file.html). It is known that if the document contains special characters, there might be issues with loading the file.
|
||||
|
||||
Q2: How can I resolve the `detectron2` dependency issue when reading specific file formats?
|
||||
|
||||
A2: As the installation process for this package can be problematic and it is only required for some file formats, it is not included in `requirements.txt`. You can install it with the following command:
|
||||
|
||||
```commandline
|
||||
pip install "detectron2@git+https://github.com/facebookresearch/detectron2.git@v0.6#egg=detectron2"
|
||||
```
|
||||
|
||||
Q3: How can I solve the `Resource punkt not found.` error?
|
||||
|
||||
A3: Unzip the `packages/tokenizers` folder from https://github.com/nltk/nltk_data/raw/gh-pages/packages/tokenizers/punkt.zip, and place it in the `nltk_data/tokenizers` storage path.
|
||||
|
||||
The `nltk_data` storage path can be found using `nltk.data.path`.
|
||||
|
||||
Q4: How can I solve the `Resource averaged_perceptron_tagger not found.` error?
|
||||
|
||||
A4: Download https://github.com/nltk/nltk_data/blob/gh-pages/packages/taggers/averaged_perceptron_tagger.zip, extract it, and place it in the `nltk_data/taggers` storage path.
|
||||
|
||||
The `nltk_data` storage path can be found using `nltk.data.path`.
|
||||
|
||||
Q5: Can this project run in Google Colab?
|
||||
|
||||
A5: You can try running the chatglm-6b-int4 model in Google Colab. Please note that if you want to run the Web UI in Colab, you need to set the `share` parameter in `demo.queue(concurrency_count=3).launch(server_name='0.0.0.0', share=False, inbrowser=False)` to `True`.
|
||||
|
||||
This issue is related to the system environment. For more details, please refer to [Issues with installing packages using pip in Anaconda](docs/Issue-with-Installing-Packages-Using-pip-in-Anaconda.md).
|
||||
|
||||
## DEMO
|
||||
|
||||
Using the question "What are the limitations of ChatGLM-6B and how can they be improved?" as an example:
|
||||
|
||||
Without using langchain to access local documents:
|
||||
|
||||
> ChatGLM-6B is a chatbot based on GPT-3, and its limitations are mainly reflected in the following aspects:
|
||||
>
|
||||
> 1. Language model accuracy: Although GPT-3 is a large language model with relatively high accuracy, it still has some limitations. For instance, it may not be able to accurately understand certain dialects, accents, or complex sentence structures.
|
||||
> 2. Completeness of the knowledge graph: GPT-3 can generate some knowledge graphs, but its completeness and accuracy may not be sufficient. For example, it may struggle to accurately understand specific domain expertise and terminology.
|
||||
> 3. Coherence of dialogue generation: While GPT-3 can generate somewhat coherent dialogues, they may not always be natural. For instance, it may generate false, incoherent dialogue or misinterpret the user's intent.
|
||||
>
|
||||
> To improve ChatGLM-6B, consider the following aspects:
|
||||
>
|
||||
> 1. Enhance the accuracy of the language model: The accuracy of the language model can be improved by increasing the training data, using better language model architectures, and optimizing the model training process.
|
||||
> 2. Strengthen the integrity of the knowledge graph: The integrity of the knowledge graph can be enhanced by increasing its dimensions, employing better knowledge graph construction methods, and optimizing the knowledge graph's matching algorithm.
|
||||
> 3. Boost the coherence of dialogue generation: The coherence of dialogue generation can be improved by augmenting the context information of the dialogue generation model, utilizing better context information extraction methods, and optimizing the dialogue generation model's algorithm.
|
||||
> 4. Incorporate more human feedback: Human feedback can help ChatGLM-6B better understand users' intentions and needs, thereby improving the quality and accuracy of dialogue generation. More human feedback can be introduced by involving more human agents and using human feedback collectors.
|
||||
|
||||
ChatGLM's answer after using LangChain to access the README.md file of the ChatGLM-6B project:
|
||||
>ChatGLM-6B is a deep learning-based natural language processing model that excels at answering questions. However, it also has some limitations, as follows:
|
||||
>1. Limited language understanding: ChatGLM-6B has been primarily trained on Chinese natural language, and its understanding of other languages may be limited.
|
||||
>2. Insufficient knowledge base: The training dataset of ChatGLM-6B contains only a Chinese corpus, so it may not be able to answer non-Chinese questions or queries in specific domains.
|
||||
>3. Limited data volume: ChatGLM-6B's training dataset has only a few million records, which may hinder its ability to answer very specific or complex questions.
|
||||
>
|
||||
>To improve ChatGLM-6B, consider the following aspects:
|
||||
>1. Expand language knowledge: Learn natural language processing techniques in other languages to broaden the model's language understanding capabilities.
|
||||
>2. Broaden the knowledge base: Collect more Chinese corpora or use datasets in other languages to expand the model's knowledge base.
|
||||
>3. Increase data volume: Use larger datasets to train ChatGLM-6B, which can improve the model's performance.
|
||||
>4. Introduce more evaluation metrics: Incorporate additional evaluation metrics to assess the model's performance, which can help identify the shortcomings and limitations of ChatGLM-6B.
|
||||
>5. Enhance the model architecture: Improve ChatGLM-6B's model architecture to boost its performance and capabilities. For example, employ larger neural networks or refined convolutional neural network structures.
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [x] Implement LangChain + ChatGLM-6B for local knowledge application
|
||||
- [x] Unstructured file access based on langchain
|
||||
- [x].md
|
||||
- [x].pdf
|
||||
- [x].docx
|
||||
- [x].txt
|
||||
- [ ] Add support for more LLM models
|
||||
- [x] THUDM/chatglm-6b
|
||||
- [x] THUDM/chatglm-6b-int4
|
||||
- [x] THUDM/chatglm-6b-int4-qe
|
||||
- [ ] Add Web UI DEMO
|
||||
- [x] Implement Web UI DEMO using Gradio
|
||||
- [x] Add output and error messages
|
||||
- [x] Citation callout
|
||||
- [ ] Knowledge base management
|
||||
- [x] QA based on selected knowledge base
|
||||
- [x] Add files/folder to knowledge base
|
||||
- [ ] Add files/folder to knowledge base
|
||||
- [ ] Implement Web UI DEMO using Streamlit
|
||||
- [ ] Add support for API deployment
|
||||
- [x] Use fastapi to implement API
|
||||
- [ ] Implement Web UI DEMO for API calls
|
||||
<img src="img/official_wechat_mp_account.png" alt="图片" width="900" height="300" />
|
||||
|
||||
200
README_ja.md
Normal file
@ -0,0 +1,200 @@
|
||||

|
||||
|
||||
🌍 [中文文档](README.md)
|
||||
🌍 [READ THIS IN ENGLISH](README_en.md)
|
||||
|
||||
📃 **LangChain-Chatchat** (旧名 Langchain-ChatGLM)
|
||||
|
||||
ChatGLM などの大規模な言語モデルや Langchain などのアプリケーション フレームワークに基づいた、オープン
|
||||
ソースのオフライン展開可能な検索拡張生成 (RAG) 大規模モデル ナレッジ ベース プロジェクトです。
|
||||
|
||||
⚠️`0.2.10` は `0.2.x` シリーズの最終バージョンとなり、`0.2.x`
|
||||
シリーズはアップデートと技術サポートを終了し、より適用性の高い `Langchain-Chachat 0.3.x` の開発に努めます。 。
|
||||
`0.2.10` のその後のバグ修正は、バージョン更新なしで `master` ブランチに直接プッシュされます。
|
||||
|
||||
---
|
||||
|
||||
## 目次
|
||||
|
||||
- [イントロ](README_ja.md#イントロ)
|
||||
- [ペインポイントへの対応](README_ja.md#ペインポイントへの対応)
|
||||
- [クイックスタート](README_ja.md#クイックスタート)
|
||||
- [1. 環境セットアップ](README_ja.md#環境セットアップ)
|
||||
- [2. モデルをダウンロード](README_ja.md#モデルをダウンロード)
|
||||
- [3. ナレッジベースと設定ファイルの初期化](README_ja.md#ナレッジベースと設定ファイルの初期化)
|
||||
- [4. ワンクリック起動](README_ja.md#ワンクリック起動)
|
||||
- [5. 起動インターフェースの例](README_ja.md#起動インターフェースの例)
|
||||
- [お問い合わせ](README_ja.md#お問い合わせ)
|
||||
|
||||
## イントロ
|
||||
|
||||
🤖️ [langchain](https://github.com/hwchase17/langchain) のアイデアを用いて実装された、ローカルナレッジベースに基づく Q&A
|
||||
アプリケーション。
|
||||
目標は、中国のシナリオとオープンソースモデルに親和性があり、オフラインとオンラインの両方で実行可能な KBQA(ナレッジベースの
|
||||
Q&A)ソリューションを構築することです。
|
||||
|
||||
💡 [document.ai](https://github.com/GanymedeNil/document.ai)
|
||||
と [ChatGLM-6B Pull Request](https://github.com/THUDM/ChatGLM-6B/pull/216) に触発され、
|
||||
プロセス全体を通してオープンソースモデルまたはリモート LLM api を使用して実装することができるローカルナレッジベースの質問応答アプリケーションを構築します。
|
||||
このプロジェクトの最新バージョンでは、[FastChat](https://github.com/lm-sys/FastChat)
|
||||
を使用して、Vicuna、Alpaca、LLaMA、Koala、RWKV、その他多くのモデルにアクセスしています。
|
||||
このプロジェクトは [langchain](https://github.com/langchain-ai/langchain)
|
||||
に依存し、[FastAPI](https://github.com/tiangolo/fastapi) に基づいて提供されるAPIを通してサービスを呼び出したり、
|
||||
[Streamlit](https://github.com/streamlit/streamlit) に基づいて WebUI を使ったりすることをサポートしています。
|
||||
|
||||
✅ オープンソースの LLM と Embedding モデルに依存して、このプロジェクトはフルプロセスの **オフラインプライベートデプロイメント
|
||||
** を実現することができます。
|
||||
同時に、本プロジェクトは OpenAI GPT API や Zhipu API の呼び出しにも対応しており、今後も様々な機種やリモート API
|
||||
へのアクセスを拡大していきます。
|
||||
|
||||
⛓️ このプロジェクトの実施原則を下のグラフに示します。主なプロセスは以下の通りです:
|
||||
ファイルの読み込み -> テキストの読み込み -> テキストのセグメンテーション -> テキストのベクトル化 -> 質問のベクトル化 ->
|
||||
質問ベクトルと最も似ている `top-k` をテキストベクトルでマッチング ->
|
||||
マッチしたテキストをコンテキストと質問として `prompt` に追加 -> 回答を生成するために `LLM` に送信。
|
||||
|
||||
📺[video introduction](https://www.bilibili.com/video/BV13M4y1e7cN/?share_source=copy_web&vd_source=e6c5aafe684f30fbe41925d61ca6d514)
|
||||
|
||||

|
||||
|
||||
文書プロセスの側面からの主なプロセス分析:
|
||||
|
||||

|
||||
|
||||
🚩 トレーニングやファインチューニングはプロジェクトには含まれないが、これらを行うことで必ずパフォーマンスを向上させることができます。
|
||||
|
||||
🌐 [AutoDL イメージ](registry.cn-beijing.aliyuncs.com/chatchat/chatchat:0.2.5)がサポートされ、`0.2.10` では v0.2.10
|
||||
にアップデートされました。
|
||||
|
||||
🐳 [Docker イメージ](isafetech/chatchat:0.2.10)
|
||||
|
||||
🌲 今回のアップデートにより、DockerHub、阿里雲、騰訊のクラウドにも対応しました。より広範なクラウド環境で利用可能となりました。
|
||||
|
||||
```shell
|
||||
docker run -d --gpus all -p 80:8501 isafetech/chatchat:0.2.10
|
||||
docker run -d --gpus all -p 80:8501 uswccr.ccs.tencentyun.com/chatchat/chatchat:0.2.10
|
||||
docker run -d --gpus all -p 80:8501 registry.cn-beijing.aliyuncs.com/chatchat/chatchat:0.2.10
|
||||
```
|
||||
|
||||
## ペインポイントへの対応
|
||||
|
||||
このプロジェクトは、完全にローカライズされた推論によってナレッジベースを強化するソリューションであり、特にデータセキュリティと企業向けのプライベートな展開の問題に取り組んでいます。
|
||||
このオープンソースソリューションは Apache ライセンスに基づき、無償で商用利用できます。
|
||||
私たちは、市場で入手可能な主流のローカル大予言モデルや Embedding モデル、オープンソースのローカルベクターデータベースをサポートしています。
|
||||
対応機種とデータベースの詳細については、[Wiki](https://github.com/chatchat-space/Langchain-Chatchat/wiki/) をご参照ください。
|
||||
|
||||
## クイックスタート
|
||||
|
||||
### 環境セットアップ
|
||||
|
||||
まず、マシンにPython 3.10がインストールされていることを確認してください。
|
||||
|
||||
```
|
||||
$ python --version
|
||||
Python 3.11.7
|
||||
```
|
||||
|
||||
次に、仮想環境を作成し、プロジェクトの依存関係を仮想環境内にインストールする。
|
||||
|
||||
```shell
|
||||
|
||||
# リポジトリをクローン
|
||||
$ git clone https://github.com/chatchat-space/Langchain-Chatchat.git
|
||||
|
||||
# ディレクトリに移動
|
||||
$ cd Langchain-Chatchat
|
||||
|
||||
# すべての依存関係をインストール
|
||||
$ pip install -r requirements.txt
|
||||
$ pip install -r requirements_api.txt
|
||||
$ pip install -r requirements_webui.txt
|
||||
|
||||
# デフォルトの依存関係には、基本的な実行環境(FAISS ベクターライブラリ)が含まれます。milvus/pg_vector などのベクターライブラリを使用する場合は、requirements.txt 内の対応する依存関係のコメントを解除してからインストールしてください。
|
||||
```
|
||||
|
||||
LangChain-Chachat `0.2.x` シリーズは Langchain `0.0.x` シリーズ用です。Langchain `0.1.x` シリーズをお使いの場合は、ダウングレードする必要があります。
|
||||
|
||||
### モデルをダウンロード
|
||||
|
||||
このプロジェクトをローカルまたはオフライン環境で実行する必要がある場合は、まずプロジェクトに必要なモデルをダウンロードする必要があります。
|
||||
通常、オープンソースの LLM と Embedding モデルは Hugging Face からダウンロードできる。
|
||||
|
||||
このプロジェクトで使用されているデフォルトの LLM
|
||||
モデルである [THUDM/chatglm2-6b](https://huggingface.co/THUDM/chatglm2-6b)と、Embedding
|
||||
モデル [moka-ai/m3e-base](https://huggingface.co/moka-ai/m3e-base) を例にとると、次のようになります:
|
||||
|
||||
モデルをダウンロードするには、まず [Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage)
|
||||
をインストールし、次のように実行する必要があります:
|
||||
|
||||
```Shell
|
||||
$ git lfs install
|
||||
$ git clone https://huggingface.co/THUDM/chatglm2-6b
|
||||
$ git clone https://huggingface.co/moka-ai/m3e-base
|
||||
```
|
||||
|
||||
### ナレッジベースと設定ファイルの初期化
|
||||
|
||||
以下の手順に従って、ナレッジベースと設定ファイルを初期化してください:
|
||||
|
||||
```shell
|
||||
$ python copy_config_example.py
|
||||
$ python init_database.py --recreate-vs
|
||||
```
|
||||
|
||||
### ワンクリック起動
|
||||
|
||||
プロジェクトを開始するには、次のコマンドを実行します:
|
||||
|
||||
```shell
|
||||
$ python startup.py -a
|
||||
```
|
||||
|
||||
### 起動インターフェースの例
|
||||
|
||||
1. FastAPI docs インターフェース
|
||||
|
||||

|
||||
|
||||
2. webui ページ
|
||||
|
||||
- Web UI ダイアログページ:
|
||||
|
||||

|
||||
|
||||
- Web UI ナレッジベースマネジメントページ:
|
||||
|
||||

|
||||
|
||||
### 注
|
||||
|
||||
上記の手順はクイックスタートのために提供されています。より多くの機能が必要な場合や、起動方法をカスタマイズしたい場合は、[Wiki](https://github.com/chatchat-space/Langchain-Chatchat/wiki/)
|
||||
を参照してください。
|
||||
|
||||
---
|
||||
|
||||
## プロジェクトのマイルストーン
|
||||
|
||||
+ `2023 年 4 月`: `Langchain-ChatGLM 0.1.0` がリリースされ、ChatGLM-6B モデルに基づくローカル ナレッジ ベースの質問と回答がサポートされました。
|
||||
+ `2023 年 8 月`: `Langchain-ChatGLM` は `Langchain-Chatchat` に名前変更され、モデル読み込みソリューションとして `fastchat` を使用し、より多くのモデルとデータベースをサポートする `0.2.0` がリリースされました。
|
||||
+ `2023 年 10 月`: `Langchain-Chachat 0.2.5` リリース、エージェント コンテンツ、オープンソース プロジェクトを`Founder Park & Zhipu AI & Zilliz`で開始
|
||||
開催したハッカソンでは3位に入賞しました。
|
||||
+ `2023 年 12 月`: `Langchain-Chachat`オープンソース プロジェクトは **20,000** つ以上のスターを獲得しました。
|
||||
+ `2024 年 1 月`: `LangChain 0.1.x` がリリースされ、`Langchain-Chachat 0.2.x` が安定版 `0.2.10` をリリースしました。
|
||||
今後はアップデートと技術サポートを停止し、より適用性の高い`Langchain-Chachat 0.3.x`の開発に努める予定です。
|
||||
|
||||
+ 🔥 これからのChatchatストーリーを一緒に楽しみにしましょう···
|
||||
|
||||
---
|
||||
|
||||
## お問い合わせ
|
||||
|
||||
### Telegram
|
||||
|
||||
[](https://t.me/+RjliQ3jnJ1YyN2E9)
|
||||
|
||||
### WeChat グループ
|
||||
|
||||
<img src="img/qr_code_90.jpg" alt="二维码" width="300" height="300" />
|
||||
|
||||
### WeChat 公式アカウント
|
||||
|
||||
<img src="img/official_wechat_mp_account.png" alt="图片" width="900" height="300" />
|
||||
@ -1 +0,0 @@
|
||||
from .chatglm_with_shared_memory_openai_llm import *
|
||||
22
chains/llmchain_with_history.py
Normal file
@ -0,0 +1,22 @@
|
||||
from server.utils import get_ChatOpenAI
|
||||
from configs.model_config import LLM_MODELS, TEMPERATURE
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
)
|
||||
|
||||
model = get_ChatOpenAI(model_name=LLM_MODELS[0], temperature=TEMPERATURE)
|
||||
|
||||
|
||||
human_prompt = "{input}"
|
||||
human_message_template = HumanMessagePromptTemplate.from_template(human_prompt)
|
||||
|
||||
chat_prompt = ChatPromptTemplate.from_messages(
|
||||
[("human", "我们来玩成语接龙,我先来,生龙活虎"),
|
||||
("ai", "虎头虎脑"),
|
||||
("human", "{input}")])
|
||||
|
||||
|
||||
chain = LLMChain(prompt=chat_prompt, llm=model, verbose=True)
|
||||
print(chain({"input": "恼羞成怒"}))
|
||||
@ -1,215 +0,0 @@
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from langchain.vectorstores import FAISS
|
||||
from langchain.document_loaders import UnstructuredFileLoader
|
||||
from models.chatglm_llm import ChatGLM
|
||||
from configs.model_config import *
|
||||
import datetime
|
||||
from textsplitter import ChineseTextSplitter
|
||||
from typing import List, Tuple
|
||||
from langchain.docstore.document import Document
|
||||
import numpy as np
|
||||
|
||||
# return top-k text chunk from vector store
|
||||
VECTOR_SEARCH_TOP_K = 6
|
||||
|
||||
# LLM input history length
|
||||
LLM_HISTORY_LEN = 3
|
||||
|
||||
|
||||
def load_file(filepath):
|
||||
if filepath.lower().endswith(".md"):
|
||||
loader = UnstructuredFileLoader(filepath, mode="elements")
|
||||
docs = loader.load()
|
||||
elif filepath.lower().endswith(".pdf"):
|
||||
loader = UnstructuredFileLoader(filepath)
|
||||
textsplitter = ChineseTextSplitter(pdf=True)
|
||||
docs = loader.load_and_split(textsplitter)
|
||||
else:
|
||||
loader = UnstructuredFileLoader(filepath, mode="elements")
|
||||
textsplitter = ChineseTextSplitter(pdf=False)
|
||||
docs = loader.load_and_split(text_splitter=textsplitter)
|
||||
return docs
|
||||
|
||||
def generate_prompt(related_docs: List[str],
|
||||
query: str,
|
||||
prompt_template=PROMPT_TEMPLATE) -> str:
|
||||
context = "\n".join([doc.page_content for doc in related_docs])
|
||||
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
|
||||
return prompt
|
||||
|
||||
|
||||
def get_docs_with_score(docs_with_score):
|
||||
docs=[]
|
||||
for doc, score in docs_with_score:
|
||||
doc.metadata["score"] = score
|
||||
docs.append(doc)
|
||||
return docs
|
||||
|
||||
|
||||
def seperate_list(ls: List[int]) -> List[List[int]]:
|
||||
lists = []
|
||||
ls1 = [ls[0]]
|
||||
for i in range(1, len(ls)):
|
||||
if ls[i-1] + 1 == ls[i]:
|
||||
ls1.append(ls[i])
|
||||
else:
|
||||
lists.append(ls1)
|
||||
ls1 = [ls[i]]
|
||||
lists.append(ls1)
|
||||
return lists
|
||||
|
||||
|
||||
|
||||
def similarity_search_with_score_by_vector(
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k)
|
||||
docs = []
|
||||
id_set = set()
|
||||
for j, i in enumerate(indices[0]):
|
||||
if i == -1:
|
||||
# This happens when not enough docs are returned.
|
||||
continue
|
||||
_id = self.index_to_docstore_id[i]
|
||||
doc = self.docstore.search(_id)
|
||||
id_set.add(i)
|
||||
docs_len = len(doc.page_content)
|
||||
for k in range(1, max(i, len(docs)-i)):
|
||||
for l in [i+k, i-k]:
|
||||
if 0 <= l < len(self.index_to_docstore_id):
|
||||
_id0 = self.index_to_docstore_id[l]
|
||||
doc0 = self.docstore.search(_id0)
|
||||
if docs_len + len(doc0.page_content) > self.chunk_size:
|
||||
break
|
||||
elif doc0.metadata["source"] == doc.metadata["source"]:
|
||||
docs_len += len(doc0.page_content)
|
||||
id_set.add(l)
|
||||
id_list = sorted(list(id_set))
|
||||
id_lists = seperate_list(id_list)
|
||||
for id_seq in id_lists:
|
||||
for id in id_seq:
|
||||
if id == id_seq[0]:
|
||||
_id = self.index_to_docstore_id[id]
|
||||
doc = self.docstore.search(_id)
|
||||
else:
|
||||
_id0 = self.index_to_docstore_id[id]
|
||||
doc0 = self.docstore.search(_id0)
|
||||
doc.page_content += doc0.page_content
|
||||
if not isinstance(doc, Document):
|
||||
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
||||
docs.append((doc, scores[0][j]))
|
||||
return docs
|
||||
|
||||
|
||||
|
||||
class LocalDocQA:
|
||||
llm: object = None
|
||||
embeddings: object = None
|
||||
top_k: int = VECTOR_SEARCH_TOP_K
|
||||
chunk_size: int = CHUNK_SIZE
|
||||
|
||||
def init_cfg(self,
|
||||
embedding_model: str = EMBEDDING_MODEL,
|
||||
embedding_device=EMBEDDING_DEVICE,
|
||||
llm_history_len: int = LLM_HISTORY_LEN,
|
||||
llm_model: str = LLM_MODEL,
|
||||
llm_device=LLM_DEVICE,
|
||||
top_k=VECTOR_SEARCH_TOP_K,
|
||||
use_ptuning_v2: bool = USE_PTUNING_V2
|
||||
):
|
||||
self.llm = ChatGLM()
|
||||
self.llm.load_model(model_name_or_path=llm_model_dict[llm_model],
|
||||
llm_device=llm_device,
|
||||
use_ptuning_v2=use_ptuning_v2)
|
||||
self.llm.history_len = llm_history_len
|
||||
|
||||
self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[embedding_model],
|
||||
model_kwargs={'device': embedding_device})
|
||||
self.top_k = top_k
|
||||
|
||||
def init_knowledge_vector_store(self,
|
||||
filepath: str or List[str],
|
||||
vs_path: str or os.PathLike = None):
|
||||
loaded_files = []
|
||||
if isinstance(filepath, str):
|
||||
if not os.path.exists(filepath):
|
||||
print("路径不存在")
|
||||
return None
|
||||
elif os.path.isfile(filepath):
|
||||
file = os.path.split(filepath)[-1]
|
||||
try:
|
||||
docs = load_file(filepath)
|
||||
print(f"{file} 已成功加载")
|
||||
loaded_files.append(filepath)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(f"{file} 未能成功加载")
|
||||
return None
|
||||
elif os.path.isdir(filepath):
|
||||
docs = []
|
||||
for file in os.listdir(filepath):
|
||||
fullfilepath = os.path.join(filepath, file)
|
||||
try:
|
||||
docs += load_file(fullfilepath)
|
||||
print(f"{file} 已成功加载")
|
||||
loaded_files.append(fullfilepath)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(f"{file} 未能成功加载")
|
||||
else:
|
||||
docs = []
|
||||
for file in filepath:
|
||||
try:
|
||||
docs += load_file(file)
|
||||
print(f"{file} 已成功加载")
|
||||
loaded_files.append(file)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(f"{file} 未能成功加载")
|
||||
if len(docs) > 0:
|
||||
if vs_path and os.path.isdir(vs_path):
|
||||
vector_store = FAISS.load_local(vs_path, self.embeddings)
|
||||
vector_store.add_documents(docs)
|
||||
else:
|
||||
if not vs_path:
|
||||
vs_path = f"""{VS_ROOT_PATH}{os.path.splitext(file)[0]}_FAISS_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}"""
|
||||
vector_store = FAISS.from_documents(docs, self.embeddings)
|
||||
|
||||
vector_store.save_local(vs_path)
|
||||
return vs_path, loaded_files
|
||||
else:
|
||||
print("文件均未成功加载,请检查依赖包或替换为其他文件再次上传。")
|
||||
return None, loaded_files
|
||||
|
||||
def get_knowledge_based_answer(self,
|
||||
query,
|
||||
vs_path,
|
||||
chat_history=[],
|
||||
streaming=True):
|
||||
self.llm.streaming = streaming
|
||||
vector_store = FAISS.load_local(vs_path, self.embeddings)
|
||||
FAISS.similarity_search_with_score_by_vector = similarity_search_with_score_by_vector
|
||||
vector_store.chunk_size=self.chunk_size
|
||||
related_docs_with_score = vector_store.similarity_search_with_score(query,
|
||||
k=self.top_k)
|
||||
related_docs = get_docs_with_score(related_docs_with_score)
|
||||
prompt = generate_prompt(related_docs, query)
|
||||
|
||||
if streaming:
|
||||
for result, history in self.llm._call(prompt=prompt,
|
||||
history=chat_history):
|
||||
history[-1][0] = query
|
||||
response = {"query": query,
|
||||
"result": result,
|
||||
"source_documents": related_docs}
|
||||
yield response, history
|
||||
else:
|
||||
result, history = self.llm._call(prompt=prompt,
|
||||
history=chat_history)
|
||||
history[-1][0] = query
|
||||
response = {"query": query,
|
||||
"result": result,
|
||||
"source_documents": related_docs}
|
||||
return response, history
|
||||
@ -1,34 +0,0 @@
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
|
||||
from typing import Any, List
|
||||
|
||||
|
||||
class MyEmbeddings(HuggingFaceEmbeddings):
|
||||
def __init__(self, **kwargs: Any):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Compute doc embeddings using a HuggingFace transformer model.
|
||||
|
||||
Args:
|
||||
texts: The list of texts to embed.
|
||||
|
||||
Returns:
|
||||
List of embeddings, one for each text.
|
||||
"""
|
||||
texts = list(map(lambda x: x.replace("\n", " "), texts))
|
||||
embeddings = self.client.encode(texts, normalize_embeddings=True)
|
||||
return embeddings.tolist()
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
"""Compute query embeddings using a HuggingFace transformer model.
|
||||
|
||||
Args:
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
Embeddings for the text.
|
||||
"""
|
||||
text = text.replace("\n", " ")
|
||||
embedding = self.client.encode(text, normalize_embeddings=True)
|
||||
return embedding.tolist()
|
||||
@ -1,121 +0,0 @@
|
||||
from langchain.vectorstores import FAISS
|
||||
from typing import Any, Callable, List, Optional, Tuple, Dict
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.docstore.base import Docstore
|
||||
|
||||
from langchain.vectorstores.utils import maximal_marginal_relevance
|
||||
from langchain.embeddings.base import Embeddings
|
||||
import uuid
|
||||
from langchain.docstore.in_memory import InMemoryDocstore
|
||||
|
||||
import numpy as np
|
||||
|
||||
def dependable_faiss_import() -> Any:
|
||||
"""Import faiss if available, otherwise raise error."""
|
||||
try:
|
||||
import faiss
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import faiss python package. "
|
||||
"Please install it with `pip install faiss` "
|
||||
"or `pip install faiss-cpu` (depending on Python version)."
|
||||
)
|
||||
return faiss
|
||||
|
||||
class FAISSVS(FAISS):
|
||||
def __init__(self,
|
||||
embedding_function: Callable[..., Any],
|
||||
index: Any,
|
||||
docstore: Docstore,
|
||||
index_to_docstore_id: Dict[int, str]):
|
||||
super().__init__(embedding_function, index, docstore, index_to_docstore_id)
|
||||
|
||||
def max_marginal_relevance_search_by_vector(
|
||||
self, embedding: List[float], k: int = 4, fetch_k: int = 20, **kwargs: Any
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""Return docs selected using the maximal marginal relevance.
|
||||
|
||||
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||
among selected documents.
|
||||
|
||||
Args:
|
||||
embedding: Embedding to look up documents similar to.
|
||||
k: Number of Documents to return. Defaults to 4.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
|
||||
Returns:
|
||||
List of Documents with scores selected by maximal marginal relevance.
|
||||
"""
|
||||
scores, indices = self.index.search(np.array([embedding], dtype=np.float32), fetch_k)
|
||||
# -1 happens when not enough docs are returned.
|
||||
embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1]
|
||||
mmr_selected = maximal_marginal_relevance(
|
||||
np.array([embedding], dtype=np.float32), embeddings, k=k
|
||||
)
|
||||
selected_indices = [indices[0][i] for i in mmr_selected]
|
||||
selected_scores = [scores[0][i] for i in mmr_selected]
|
||||
docs = []
|
||||
for i, score in zip(selected_indices, selected_scores):
|
||||
if i == -1:
|
||||
# This happens when not enough docs are returned.
|
||||
continue
|
||||
_id = self.index_to_docstore_id[i]
|
||||
doc = self.docstore.search(_id)
|
||||
if not isinstance(doc, Document):
|
||||
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
||||
docs.append((doc, score))
|
||||
return docs
|
||||
|
||||
def max_marginal_relevance_search(
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
**kwargs: Any,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""Return docs selected using the maximal marginal relevance.
|
||||
|
||||
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||
among selected documents.
|
||||
|
||||
Args:
|
||||
query: Text to look up documents similar to.
|
||||
k: Number of Documents to return. Defaults to 4.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
|
||||
Returns:
|
||||
List of Documents with scores selected by maximal marginal relevance.
|
||||
"""
|
||||
embedding = self.embedding_function(query)
|
||||
docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k)
|
||||
return docs
|
||||
|
||||
@classmethod
|
||||
def __from(
|
||||
cls,
|
||||
texts: List[str],
|
||||
embeddings: List[List[float]],
|
||||
embedding: Embeddings,
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
**kwargs: Any,
|
||||
) -> FAISS:
|
||||
faiss = dependable_faiss_import()
|
||||
index = faiss.IndexFlatIP(len(embeddings[0]))
|
||||
index.add(np.array(embeddings, dtype=np.float32))
|
||||
|
||||
# # my code, for speeding up search
|
||||
# quantizer = faiss.IndexFlatL2(len(embeddings[0]))
|
||||
# index = faiss.IndexIVFFlat(quantizer, len(embeddings[0]), 100)
|
||||
# index.train(np.array(embeddings, dtype=np.float32))
|
||||
# index.add(np.array(embeddings, dtype=np.float32))
|
||||
|
||||
documents = []
|
||||
for i, text in enumerate(texts):
|
||||
metadata = metadatas[i] if metadatas else {}
|
||||
documents.append(Document(page_content=text, metadata=metadata))
|
||||
index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))}
|
||||
docstore = InMemoryDocstore(
|
||||
{index_to_id[i]: doc for i, doc in enumerate(documents)}
|
||||
)
|
||||
return cls(embedding.embed_query, index, docstore, index_to_id)
|
||||
|
||||
@ -1,52 +0,0 @@
|
||||
import os
|
||||
import pinecone
|
||||
from tqdm import tqdm
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.text_splitter import SpacyTextSplitter
|
||||
from langchain.document_loaders import TextLoader
|
||||
from langchain.document_loaders import DirectoryLoader
|
||||
from langchain.indexes import VectorstoreIndexCreator
|
||||
from langchain.embeddings.openai import OpenAIEmbeddings
|
||||
from langchain.vectorstores import Pinecone
|
||||
|
||||
#一些配置文件
|
||||
openai_key="你的key" # 注册 openai.com 后获得
|
||||
pinecone_key="你的key" # 注册 app.pinecone.io 后获得
|
||||
pinecone_index="你的库" #app.pinecone.io 获得
|
||||
pinecone_environment="你的Environment" # 登录pinecone后,在indexes页面 查看Environment
|
||||
pinecone_namespace="你的Namespace" #如果不存在自动创建
|
||||
|
||||
#科学上网你懂得
|
||||
os.environ['HTTP_PROXY'] = 'http://127.0.0.1:7890'
|
||||
os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'
|
||||
|
||||
#初始化pinecone
|
||||
pinecone.init(
|
||||
api_key=pinecone_key,
|
||||
environment=pinecone_environment
|
||||
)
|
||||
index = pinecone.Index(pinecone_index)
|
||||
|
||||
#初始化OpenAI的embeddings
|
||||
embeddings = OpenAIEmbeddings(openai_api_key=openai_key)
|
||||
|
||||
#初始化text_splitter
|
||||
text_splitter = SpacyTextSplitter(pipeline='zh_core_web_sm',chunk_size=1000,chunk_overlap=200)
|
||||
|
||||
# 读取目录下所有后缀是txt的文件
|
||||
loader = DirectoryLoader('../docs', glob="**/*.txt", loader_cls=TextLoader)
|
||||
|
||||
#读取文本文件
|
||||
documents = loader.load()
|
||||
|
||||
# 使用text_splitter对文档进行分割
|
||||
split_text = text_splitter.split_documents(documents)
|
||||
try:
|
||||
for document in tqdm(split_text):
|
||||
# 获取向量并储存到pinecone
|
||||
Pinecone.from_documents([document], embeddings, index_name=pinecone_index)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
quit()
|
||||
|
||||
|
||||
43
cli_demo.py
@ -1,43 +0,0 @@
|
||||
from configs.model_config import *
|
||||
from chains.local_doc_qa import LocalDocQA
|
||||
import os
|
||||
import nltk
|
||||
|
||||
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")] + nltk.data.path
|
||||
|
||||
# return top-k text chunk from vector store
|
||||
VECTOR_SEARCH_TOP_K = 6
|
||||
|
||||
# LLM input history length
|
||||
LLM_HISTORY_LEN = 3
|
||||
|
||||
# Show reply with source text from input document
|
||||
REPLY_WITH_SOURCE = True
|
||||
|
||||
if __name__ == "__main__":
|
||||
local_doc_qa = LocalDocQA()
|
||||
local_doc_qa.init_cfg(llm_model=LLM_MODEL,
|
||||
embedding_model=EMBEDDING_MODEL,
|
||||
embedding_device=EMBEDDING_DEVICE,
|
||||
llm_history_len=LLM_HISTORY_LEN,
|
||||
top_k=VECTOR_SEARCH_TOP_K)
|
||||
vs_path = None
|
||||
while not vs_path:
|
||||
filepath = input("Input your local knowledge file path 请输入本地知识文件路径:")
|
||||
vs_path, _ = local_doc_qa.init_knowledge_vector_store(filepath)
|
||||
history = []
|
||||
while True:
|
||||
query = input("Input your question 请输入问题:")
|
||||
last_print_len = 0
|
||||
for resp, history in local_doc_qa.get_knowledge_based_answer(query=query,
|
||||
vs_path=vs_path,
|
||||
chat_history=history,
|
||||
streaming=True):
|
||||
print(resp["result"][last_print_len:], end="", flush=True)
|
||||
last_print_len = len(resp["result"])
|
||||
if REPLY_WITH_SOURCE:
|
||||
source_text = [f"""出处 [{inum + 1}] {os.path.split(doc.metadata['source'])[-1]}:\n\n{doc.page_content}\n\n"""
|
||||
# f"""相关度:{doc.metadata['score']}\n\n"""
|
||||
for inum, doc in
|
||||
enumerate(resp["source_documents"])]
|
||||
print("\n\n" + "\n\n".join(source_text))
|
||||
0
common/__init__.py
Normal file
8
configs/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
from .basic_config import *
|
||||
from .model_config import *
|
||||
from .kb_config import *
|
||||
from .server_config import *
|
||||
from .prompt_config import *
|
||||
|
||||
|
||||
VERSION = "v0.2.10"
|
||||
32
configs/basic_config.py.example
Normal file
@ -0,0 +1,32 @@
|
||||
import logging
|
||||
import os
|
||||
import langchain
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
|
||||
# 是否显示详细日志
|
||||
log_verbose = False
|
||||
langchain.verbose = False
|
||||
|
||||
# 通常情况下不需要更改以下内容
|
||||
|
||||
# 日志格式
|
||||
LOG_FORMAT = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.INFO)
|
||||
logging.basicConfig(format=LOG_FORMAT)
|
||||
|
||||
|
||||
# 日志存储路径
|
||||
LOG_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "logs")
|
||||
if not os.path.exists(LOG_PATH):
|
||||
os.mkdir(LOG_PATH)
|
||||
|
||||
# 临时文件目录,主要用于文件对话
|
||||
BASE_TEMP_DIR = os.path.join(tempfile.gettempdir(), "chatchat")
|
||||
try:
|
||||
shutil.rmtree(BASE_TEMP_DIR)
|
||||
except Exception:
|
||||
pass
|
||||
os.makedirs(BASE_TEMP_DIR, exist_ok=True)
|
||||
146
configs/kb_config.py.example
Normal file
@ -0,0 +1,146 @@
|
||||
import os
|
||||
|
||||
# 默认使用的知识库
|
||||
DEFAULT_KNOWLEDGE_BASE = "samples"
|
||||
|
||||
# 默认向量库/全文检索引擎类型。可选:faiss, milvus(离线) & zilliz(在线), pgvector, chromadb 全文检索引擎es
|
||||
DEFAULT_VS_TYPE = "faiss"
|
||||
|
||||
# 缓存向量库数量(针对FAISS)
|
||||
CACHED_VS_NUM = 1
|
||||
|
||||
# 缓存临时向量库数量(针对FAISS),用于文件对话
|
||||
CACHED_MEMO_VS_NUM = 10
|
||||
|
||||
# 知识库中单段文本长度(不适用MarkdownHeaderTextSplitter)
|
||||
CHUNK_SIZE = 250
|
||||
|
||||
# 知识库中相邻文本重合长度(不适用MarkdownHeaderTextSplitter)
|
||||
OVERLAP_SIZE = 50
|
||||
|
||||
# 知识库匹配向量数量
|
||||
VECTOR_SEARCH_TOP_K = 3
|
||||
|
||||
# 知识库匹配的距离阈值,一般取值范围在0-1之间,SCORE越小,距离越小从而相关度越高。
|
||||
# 但有用户报告遇到过匹配分值超过1的情况,为了兼容性默认设为1,在WEBUI中调整范围为0-2
|
||||
SCORE_THRESHOLD = 1.0
|
||||
|
||||
# 默认搜索引擎。可选:bing, duckduckgo, metaphor
|
||||
DEFAULT_SEARCH_ENGINE = "duckduckgo"
|
||||
|
||||
# 搜索引擎匹配结题数量
|
||||
SEARCH_ENGINE_TOP_K = 3
|
||||
|
||||
|
||||
# Bing 搜索必备变量
|
||||
# 使用 Bing 搜索需要使用 Bing Subscription Key,需要在azure port中申请试用bing search
|
||||
# 具体申请方式请见
|
||||
# https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/create-bing-search-service-resource
|
||||
# 使用python创建bing api 搜索实例详见:
|
||||
# https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/quickstarts/rest/python
|
||||
BING_SEARCH_URL = "https://api.bing.microsoft.com/v7.0/search"
|
||||
# 注意不是bing Webmaster Tools的api key,
|
||||
|
||||
# 此外,如果是在服务器上,报Failed to establish a new connection: [Errno 110] Connection timed out
|
||||
# 是因为服务器加了防火墙,需要联系管理员加白名单,如果公司的服务器的话,就别想了GG
|
||||
BING_SUBSCRIPTION_KEY = ""
|
||||
|
||||
# metaphor搜索需要KEY
|
||||
METAPHOR_API_KEY = ""
|
||||
|
||||
# 心知天气 API KEY,用于天气Agent。申请:https://www.seniverse.com/
|
||||
SENIVERSE_API_KEY = ""
|
||||
|
||||
# 是否开启中文标题加强,以及标题增强的相关配置
|
||||
# 通过增加标题判断,判断哪些文本为标题,并在metadata中进行标记;
|
||||
# 然后将文本与往上一级的标题进行拼合,实现文本信息的增强。
|
||||
ZH_TITLE_ENHANCE = False
|
||||
|
||||
# PDF OCR 控制:只对宽高超过页面一定比例(图片宽/页面宽,图片高/页面高)的图片进行 OCR。
|
||||
# 这样可以避免 PDF 中一些小图片的干扰,提高非扫描版 PDF 处理速度
|
||||
PDF_OCR_THRESHOLD = (0.6, 0.6)
|
||||
|
||||
# 每个知识库的初始化介绍,用于在初始化知识库时显示和Agent调用,没写则没有介绍,不会被Agent调用。
|
||||
KB_INFO = {
|
||||
"知识库名称": "知识库介绍",
|
||||
"samples": "关于本项目issue的解答",
|
||||
}
|
||||
|
||||
|
||||
# 通常情况下不需要更改以下内容
|
||||
|
||||
# 知识库默认存储路径
|
||||
KB_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "knowledge_base")
|
||||
if not os.path.exists(KB_ROOT_PATH):
|
||||
os.mkdir(KB_ROOT_PATH)
|
||||
# 数据库默认存储路径。
|
||||
# 如果使用sqlite,可以直接修改DB_ROOT_PATH;如果使用其它数据库,请直接修改SQLALCHEMY_DATABASE_URI。
|
||||
DB_ROOT_PATH = os.path.join(KB_ROOT_PATH, "info.db")
|
||||
SQLALCHEMY_DATABASE_URI = f"sqlite:///{DB_ROOT_PATH}"
|
||||
|
||||
# 可选向量库类型及对应配置
|
||||
kbs_config = {
|
||||
"faiss": {
|
||||
},
|
||||
"milvus": {
|
||||
"host": "127.0.0.1",
|
||||
"port": "19530",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"secure": False,
|
||||
},
|
||||
"zilliz": {
|
||||
"host": "in01-a7ce524e41e3935.ali-cn-hangzhou.vectordb.zilliz.com.cn",
|
||||
"port": "19530",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"secure": True,
|
||||
},
|
||||
"pg": {
|
||||
"connection_uri": "postgresql://postgres:postgres@127.0.0.1:5432/langchain_chatchat",
|
||||
},
|
||||
|
||||
"es": {
|
||||
"host": "127.0.0.1",
|
||||
"port": "9200",
|
||||
"index_name": "test_index",
|
||||
"user": "",
|
||||
"password": ""
|
||||
},
|
||||
"milvus_kwargs":{
|
||||
"search_params":{"metric_type": "L2"}, #在此处增加search_params
|
||||
"index_params":{"metric_type": "L2","index_type": "HNSW"} # 在此处增加index_params
|
||||
},
|
||||
"chromadb": {}
|
||||
}
|
||||
|
||||
# TextSplitter配置项,如果你不明白其中的含义,就不要修改。
|
||||
text_splitter_dict = {
|
||||
"ChineseRecursiveTextSplitter": {
|
||||
"source": "huggingface", # 选择tiktoken则使用openai的方法
|
||||
"tokenizer_name_or_path": "",
|
||||
},
|
||||
"SpacyTextSplitter": {
|
||||
"source": "huggingface",
|
||||
"tokenizer_name_or_path": "gpt2",
|
||||
},
|
||||
"RecursiveCharacterTextSplitter": {
|
||||
"source": "tiktoken",
|
||||
"tokenizer_name_or_path": "cl100k_base",
|
||||
},
|
||||
"MarkdownHeaderTextSplitter": {
|
||||
"headers_to_split_on":
|
||||
[
|
||||
("#", "head1"),
|
||||
("##", "head2"),
|
||||
("###", "head3"),
|
||||
("####", "head4"),
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
# TEXT_SPLITTER 名称
|
||||
TEXT_SPLITTER_NAME = "ChineseRecursiveTextSplitter"
|
||||
|
||||
# Embedding模型定制词语的词表文件
|
||||
EMBEDDING_KEYWORD_FILE = "embedding_keywords.txt"
|
||||
@ -1,45 +0,0 @@
|
||||
import torch.cuda
|
||||
import torch.backends
|
||||
import os
|
||||
|
||||
embedding_model_dict = {
|
||||
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
|
||||
"ernie-base": "nghuyong/ernie-3.0-base-zh",
|
||||
"text2vec-base": "shibing624/text2vec-base-chinese",
|
||||
"text2vec": "GanymedeNil/text2vec-large-chinese",
|
||||
}
|
||||
|
||||
# Embedding model name
|
||||
EMBEDDING_MODEL = "text2vec"
|
||||
|
||||
# Embedding running device
|
||||
EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
||||
|
||||
# supported LLM models
|
||||
llm_model_dict = {
|
||||
"chatyuan": "ClueAI/ChatYuan-large-v2",
|
||||
"chatglm-6b-int4-qe": "THUDM/chatglm-6b-int4-qe",
|
||||
"chatglm-6b-int4": "THUDM/chatglm-6b-int4",
|
||||
"chatglm-6b-int8": "THUDM/chatglm-6b-int8",
|
||||
"chatglm-6b": "THUDM/chatglm-6b",
|
||||
}
|
||||
|
||||
# LLM model name
|
||||
LLM_MODEL = "chatglm-6b"
|
||||
|
||||
# Use p-tuning-v2 PrefixEncoder
|
||||
USE_PTUNING_V2 = False
|
||||
|
||||
# LLM running device
|
||||
LLM_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
||||
|
||||
VS_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "vector_store", "")
|
||||
|
||||
UPLOAD_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "content", "")
|
||||
|
||||
# 基于上下文的prompt模版,请务必保留"{question}"和"{context}"
|
||||
PROMPT_TEMPLATE = """基于以下已知信息,简洁和专业的来回答用户的问题,问题是"{question}"。如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。已知内容如下:
|
||||
{context} """
|
||||
|
||||
# 匹配后单段上下文长度
|
||||
CHUNK_SIZE = 500
|
||||
327
configs/model_config.py.example
Normal file
@ -0,0 +1,327 @@
|
||||
import os
|
||||
|
||||
# 可以指定一个绝对路径,统一存放所有的Embedding和LLM模型。
|
||||
# 每个模型可以是一个单独的目录,也可以是某个目录下的二级子目录。
|
||||
# 如果模型目录名称和 MODEL_PATH 中的 key 或 value 相同,程序会自动检测加载,无需修改 MODEL_PATH 中的路径。
|
||||
MODEL_ROOT_PATH = ""
|
||||
|
||||
# 选用的 Embedding 名称
|
||||
EMBEDDING_MODEL = "bge-large-zh-v1.5"
|
||||
|
||||
# Embedding 模型运行设备。设为 "auto" 会自动检测(会有警告),也可手动设定为 "cuda","mps","cpu","xpu" 其中之一。
|
||||
EMBEDDING_DEVICE = "auto"
|
||||
|
||||
# 选用的reranker模型
|
||||
RERANKER_MODEL = "bge-reranker-large"
|
||||
# 是否启用reranker模型
|
||||
USE_RERANKER = False
|
||||
RERANKER_MAX_LENGTH = 1024
|
||||
|
||||
# 如果需要在 EMBEDDING_MODEL 中增加自定义的关键字时配置
|
||||
EMBEDDING_KEYWORD_FILE = "keywords.txt"
|
||||
EMBEDDING_MODEL_OUTPUT_PATH = "output"
|
||||
|
||||
# 要运行的 LLM 名称,可以包括本地模型和在线模型。列表中本地模型将在启动项目时全部加载。
|
||||
# 列表中第一个模型将作为 API 和 WEBUI 的默认模型。
|
||||
# 在这里,我们使用目前主流的两个离线模型,其中,chatglm3-6b 为默认加载模型。
|
||||
# 如果你的显存不足,可使用 Qwen-1_8B-Chat, 该模型 FP16 仅需 3.8G显存。
|
||||
|
||||
LLM_MODELS = ["chatglm3-6b", "zhipu-api", "openai-api"]
|
||||
Agent_MODEL = None
|
||||
|
||||
# LLM 模型运行设备。设为"auto"会自动检测(会有警告),也可手动设定为 "cuda","mps","cpu","xpu" 其中之一。
|
||||
LLM_DEVICE = "auto"
|
||||
|
||||
HISTORY_LEN = 3
|
||||
|
||||
MAX_TOKENS = 2048
|
||||
|
||||
TEMPERATURE = 0.7
|
||||
|
||||
ONLINE_LLM_MODEL = {
|
||||
"openai-api": {
|
||||
"model_name": "gpt-4",
|
||||
"api_base_url": "https://api.openai.com/v1",
|
||||
"api_key": "",
|
||||
"openai_proxy": "",
|
||||
},
|
||||
|
||||
# 智谱AI API,具体注册及api key获取请前往 http://open.bigmodel.cn
|
||||
"zhipu-api": {
|
||||
"api_key": "",
|
||||
"version": "glm-4",
|
||||
"provider": "ChatGLMWorker",
|
||||
},
|
||||
|
||||
# 具体注册及api key获取请前往 https://api.minimax.chat/
|
||||
"minimax-api": {
|
||||
"group_id": "",
|
||||
"api_key": "",
|
||||
"is_pro": False,
|
||||
"provider": "MiniMaxWorker",
|
||||
},
|
||||
|
||||
# 具体注册及api key获取请前往 https://xinghuo.xfyun.cn/
|
||||
"xinghuo-api": {
|
||||
"APPID": "",
|
||||
"APISecret": "",
|
||||
"api_key": "",
|
||||
"version": "v3.5", # 你使用的讯飞星火大模型版本,可选包括 "v3.5","v3.0", "v2.0", "v1.5"
|
||||
"provider": "XingHuoWorker",
|
||||
},
|
||||
|
||||
# 百度千帆 API,申请方式请参考 https://cloud.baidu.com/doc/WENXINWORKSHOP/s/4lilb2lpf
|
||||
"qianfan-api": {
|
||||
"version": "ERNIE-Bot", # 注意大小写。当前支持 "ERNIE-Bot" 或 "ERNIE-Bot-turbo", 更多的见官方文档。
|
||||
"version_url": "", # 也可以不填写version,直接填写在千帆申请模型发布的API地址
|
||||
"api_key": "",
|
||||
"secret_key": "",
|
||||
"provider": "QianFanWorker",
|
||||
},
|
||||
|
||||
# 火山方舟 API,文档参考 https://www.volcengine.com/docs/82379
|
||||
"fangzhou-api": {
|
||||
"version": "", # 对应火山方舟的 endpoint_id
|
||||
"version_url": "",
|
||||
"api_key": "",
|
||||
"secret_key": "",
|
||||
"provider": "FangZhouWorker",
|
||||
},
|
||||
|
||||
# 阿里云通义千问 API,文档参考 https://help.aliyun.com/zh/dashscope/developer-reference/api-details
|
||||
"qwen-api": {
|
||||
"version": "qwen-max",
|
||||
"api_key": "",
|
||||
"provider": "QwenWorker",
|
||||
"embed_model": "text-embedding-v1" # embedding 模型名称
|
||||
},
|
||||
|
||||
# 百川 API,申请方式请参考 https://www.baichuan-ai.com/home#api-enter
|
||||
"baichuan-api": {
|
||||
"version": "Baichuan2-53B",
|
||||
"api_key": "",
|
||||
"secret_key": "",
|
||||
"provider": "BaiChuanWorker",
|
||||
},
|
||||
|
||||
# Azure API
|
||||
"azure-api": {
|
||||
"deployment_name": "", # 部署容器的名字
|
||||
"resource_name": "", # https://{resource_name}.openai.azure.com/openai/ 填写resource_name的部分,其他部分不要填写
|
||||
"api_version": "", # API的版本,不是模型版本
|
||||
"api_key": "",
|
||||
"provider": "AzureWorker",
|
||||
},
|
||||
|
||||
# 昆仑万维天工 API https://model-platform.tiangong.cn/
|
||||
"tiangong-api": {
|
||||
"version": "SkyChat-MegaVerse",
|
||||
"api_key": "",
|
||||
"secret_key": "",
|
||||
"provider": "TianGongWorker",
|
||||
},
|
||||
|
||||
# Gemini API https://makersuite.google.com/app/apikey
|
||||
"gemini-api": {
|
||||
"api_key": "",
|
||||
"provider": "GeminiWorker",
|
||||
},
|
||||
|
||||
# Claude API : https://www.anthropic.com/api
|
||||
# Available models:
|
||||
# Claude 3 Opus: claude-3-opus-20240229
|
||||
# Claude 3 Sonnet claude-3-sonnet-20240229
|
||||
# Claude 3 Haiku claude-3-haiku-20240307
|
||||
"claude-api": {
|
||||
"api_key": "",
|
||||
"version": "2023-06-01",
|
||||
"model_name":"claude-3-opus-20240229",
|
||||
"provider": "ClaudeWorker",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# 在以下字典中修改属性值,以指定本地embedding模型存储位置。支持3种设置方法:
|
||||
# 1、将对应的值修改为模型绝对路径
|
||||
# 2、不修改此处的值(以 text2vec 为例):
|
||||
# 2.1 如果{MODEL_ROOT_PATH}下存在如下任一子目录:
|
||||
# - text2vec
|
||||
# - GanymedeNil/text2vec-large-chinese
|
||||
# - text2vec-large-chinese
|
||||
# 2.2 如果以上本地路径不存在,则使用huggingface模型
|
||||
|
||||
MODEL_PATH = {
|
||||
"embed_model": {
|
||||
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
|
||||
"ernie-base": "nghuyong/ernie-3.0-base-zh",
|
||||
"text2vec-base": "shibing624/text2vec-base-chinese",
|
||||
"text2vec": "GanymedeNil/text2vec-large-chinese",
|
||||
"text2vec-paraphrase": "shibing624/text2vec-base-chinese-paraphrase",
|
||||
"text2vec-sentence": "shibing624/text2vec-base-chinese-sentence",
|
||||
"text2vec-multilingual": "shibing624/text2vec-base-multilingual",
|
||||
"text2vec-bge-large-chinese": "shibing624/text2vec-bge-large-chinese",
|
||||
"m3e-small": "moka-ai/m3e-small",
|
||||
"m3e-base": "moka-ai/m3e-base",
|
||||
"m3e-large": "moka-ai/m3e-large",
|
||||
|
||||
"bge-small-zh": "BAAI/bge-small-zh",
|
||||
"bge-base-zh": "BAAI/bge-base-zh",
|
||||
"bge-large-zh": "BAAI/bge-large-zh",
|
||||
"bge-large-zh-noinstruct": "BAAI/bge-large-zh-noinstruct",
|
||||
"bge-base-zh-v1.5": "BAAI/bge-base-zh-v1.5",
|
||||
"bge-large-zh-v1.5": "BAAI/bge-large-zh-v1.5",
|
||||
|
||||
"bge-m3": "BAAI/bge-m3",
|
||||
|
||||
"piccolo-base-zh": "sensenova/piccolo-base-zh",
|
||||
"piccolo-large-zh": "sensenova/piccolo-large-zh",
|
||||
"nlp_gte_sentence-embedding_chinese-large": "damo/nlp_gte_sentence-embedding_chinese-large",
|
||||
"text-embedding-ada-002": "your OPENAI_API_KEY",
|
||||
},
|
||||
|
||||
"llm_model": {
|
||||
"chatglm2-6b": "THUDM/chatglm2-6b",
|
||||
"chatglm2-6b-32k": "THUDM/chatglm2-6b-32k",
|
||||
"chatglm3-6b": "THUDM/chatglm3-6b",
|
||||
"chatglm3-6b-32k": "THUDM/chatglm3-6b-32k",
|
||||
|
||||
"Orion-14B-Chat": "OrionStarAI/Orion-14B-Chat",
|
||||
"Orion-14B-Chat-Plugin": "OrionStarAI/Orion-14B-Chat-Plugin",
|
||||
"Orion-14B-LongChat": "OrionStarAI/Orion-14B-LongChat",
|
||||
|
||||
"Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
|
||||
"Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
|
||||
"Llama-2-70b-chat-hf": "meta-llama/Llama-2-70b-chat-hf",
|
||||
|
||||
"Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat",
|
||||
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
|
||||
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
|
||||
"Qwen-72B-Chat": "Qwen/Qwen-72B-Chat",
|
||||
|
||||
# Qwen1.5 模型 VLLM可能出现问题
|
||||
"Qwen1.5-0.5B-Chat": "Qwen/Qwen1.5-0.5B-Chat",
|
||||
"Qwen1.5-1.8B-Chat": "Qwen/Qwen1.5-1.8B-Chat",
|
||||
"Qwen1.5-4B-Chat": "Qwen/Qwen1.5-4B-Chat",
|
||||
"Qwen1.5-7B-Chat": "Qwen/Qwen1.5-7B-Chat",
|
||||
"Qwen1.5-14B-Chat": "Qwen/Qwen1.5-14B-Chat",
|
||||
"Qwen1.5-72B-Chat": "Qwen/Qwen1.5-72B-Chat",
|
||||
|
||||
"baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat",
|
||||
"baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
|
||||
"baichuan2-7b-chat": "baichuan-inc/Baichuan2-7B-Chat",
|
||||
"baichuan2-13b-chat": "baichuan-inc/Baichuan2-13B-Chat",
|
||||
|
||||
"internlm-7b": "internlm/internlm-7b",
|
||||
"internlm-chat-7b": "internlm/internlm-chat-7b",
|
||||
"internlm2-chat-7b": "internlm/internlm2-chat-7b",
|
||||
"internlm2-chat-20b": "internlm/internlm2-chat-20b",
|
||||
|
||||
"BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat",
|
||||
"BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k",
|
||||
|
||||
"Yi-34B-Chat": "https://huggingface.co/01-ai/Yi-34B-Chat",
|
||||
|
||||
"agentlm-7b": "THUDM/agentlm-7b",
|
||||
"agentlm-13b": "THUDM/agentlm-13b",
|
||||
"agentlm-70b": "THUDM/agentlm-70b",
|
||||
|
||||
"falcon-7b": "tiiuae/falcon-7b",
|
||||
"falcon-40b": "tiiuae/falcon-40b",
|
||||
"falcon-rw-7b": "tiiuae/falcon-rw-7b",
|
||||
|
||||
"aquila-7b": "BAAI/Aquila-7B",
|
||||
"aquilachat-7b": "BAAI/AquilaChat-7B",
|
||||
"open_llama_13b": "openlm-research/open_llama_13b",
|
||||
"vicuna-13b-v1.5": "lmsys/vicuna-13b-v1.5",
|
||||
"koala": "young-geng/koala",
|
||||
"mpt-7b": "mosaicml/mpt-7b",
|
||||
"mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter",
|
||||
"mpt-30b": "mosaicml/mpt-30b",
|
||||
"opt-66b": "facebook/opt-66b",
|
||||
"opt-iml-max-30b": "facebook/opt-iml-max-30b",
|
||||
"gpt2": "gpt2",
|
||||
"gpt2-xl": "gpt2-xl",
|
||||
"gpt-j-6b": "EleutherAI/gpt-j-6b",
|
||||
"gpt4all-j": "nomic-ai/gpt4all-j",
|
||||
"gpt-neox-20b": "EleutherAI/gpt-neox-20b",
|
||||
"pythia-12b": "EleutherAI/pythia-12b",
|
||||
"oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
||||
"dolly-v2-12b": "databricks/dolly-v2-12b",
|
||||
"stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b",
|
||||
},
|
||||
|
||||
"reranker": {
|
||||
"bge-reranker-large": "BAAI/bge-reranker-large",
|
||||
"bge-reranker-base": "BAAI/bge-reranker-base",
|
||||
}
|
||||
}
|
||||
|
||||
# 通常情况下不需要更改以下内容
|
||||
|
||||
# nltk 模型存储路径
|
||||
NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data")
|
||||
|
||||
# 使用VLLM可能导致模型推理能力下降,无法完成Agent任务
|
||||
VLLM_MODEL_DICT = {
|
||||
"chatglm2-6b": "THUDM/chatglm2-6b",
|
||||
"chatglm2-6b-32k": "THUDM/chatglm2-6b-32k",
|
||||
"chatglm3-6b": "THUDM/chatglm3-6b",
|
||||
"chatglm3-6b-32k": "THUDM/chatglm3-6b-32k",
|
||||
|
||||
"Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
|
||||
"Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
|
||||
"Llama-2-70b-chat-hf": "meta-llama/Llama-2-70b-chat-hf",
|
||||
|
||||
"Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat",
|
||||
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
|
||||
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
|
||||
"Qwen-72B-Chat": "Qwen/Qwen-72B-Chat",
|
||||
|
||||
"baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat",
|
||||
"baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
|
||||
"baichuan2-7b-chat": "baichuan-inc/Baichuan-7B-Chat",
|
||||
"baichuan2-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
|
||||
|
||||
"BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat",
|
||||
"BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k",
|
||||
|
||||
"internlm-7b": "internlm/internlm-7b",
|
||||
"internlm-chat-7b": "internlm/internlm-chat-7b",
|
||||
"internlm2-chat-7b": "internlm/Models/internlm2-chat-7b",
|
||||
"internlm2-chat-20b": "internlm/Models/internlm2-chat-20b",
|
||||
|
||||
"aquila-7b": "BAAI/Aquila-7B",
|
||||
"aquilachat-7b": "BAAI/AquilaChat-7B",
|
||||
|
||||
"falcon-7b": "tiiuae/falcon-7b",
|
||||
"falcon-40b": "tiiuae/falcon-40b",
|
||||
"falcon-rw-7b": "tiiuae/falcon-rw-7b",
|
||||
"gpt2": "gpt2",
|
||||
"gpt2-xl": "gpt2-xl",
|
||||
"gpt-j-6b": "EleutherAI/gpt-j-6b",
|
||||
"gpt4all-j": "nomic-ai/gpt4all-j",
|
||||
"gpt-neox-20b": "EleutherAI/gpt-neox-20b",
|
||||
"pythia-12b": "EleutherAI/pythia-12b",
|
||||
"oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
||||
"dolly-v2-12b": "databricks/dolly-v2-12b",
|
||||
"stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b",
|
||||
"open_llama_13b": "openlm-research/open_llama_13b",
|
||||
"vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3",
|
||||
"koala": "young-geng/koala",
|
||||
"mpt-7b": "mosaicml/mpt-7b",
|
||||
"mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter",
|
||||
"mpt-30b": "mosaicml/mpt-30b",
|
||||
"opt-66b": "facebook/opt-66b",
|
||||
"opt-iml-max-30b": "facebook/opt-iml-max-30b",
|
||||
|
||||
}
|
||||
|
||||
SUPPORT_AGENT_MODEL = [
|
||||
"openai-api", # GPT4 模型
|
||||
"qwen-api", # Qwen Max模型
|
||||
"zhipu-api", # 智谱AI GLM4模型
|
||||
"Qwen", # 所有Qwen系列本地模型
|
||||
"chatglm3-6b",
|
||||
"internlm2-chat-20b",
|
||||
"Orion-14B-Chat-Plugin",
|
||||
]
|
||||
127
configs/prompt_config.py.example
Normal file
@ -0,0 +1,127 @@
|
||||
# prompt模板使用Jinja2语法,简单点就是用双大括号代替f-string的单大括号
|
||||
# 本配置文件支持热加载,修改prompt模板后无需重启服务。
|
||||
|
||||
# LLM对话支持的变量:
|
||||
# - input: 用户输入内容
|
||||
|
||||
# 知识库和搜索引擎对话支持的变量:
|
||||
# - context: 从检索结果拼接的知识文本
|
||||
# - question: 用户提出的问题
|
||||
|
||||
# Agent对话支持的变量:
|
||||
|
||||
# - tools: 可用的工具列表
|
||||
# - tool_names: 可用的工具名称列表
|
||||
# - history: 用户和Agent的对话历史
|
||||
# - input: 用户输入内容
|
||||
# - agent_scratchpad: Agent的思维记录
|
||||
|
||||
PROMPT_TEMPLATES = {
|
||||
"llm_chat": {
|
||||
"default":
|
||||
'{{ input }}',
|
||||
|
||||
"with_history":
|
||||
'The following is a friendly conversation between a human and an AI. '
|
||||
'The AI is talkative and provides lots of specific details from its context. '
|
||||
'If the AI does not know the answer to a question, it truthfully says it does not know.\n\n'
|
||||
'Current conversation:\n'
|
||||
'{history}\n'
|
||||
'Human: {input}\n'
|
||||
'AI:',
|
||||
|
||||
"py":
|
||||
'你是一个聪明的代码助手,请你给我写出简单的py代码。 \n'
|
||||
'{{ input }}',
|
||||
},
|
||||
|
||||
|
||||
"knowledge_base_chat": {
|
||||
"default":
|
||||
'<指令>根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,'
|
||||
'不允许在答案中添加编造成分,答案请使用中文。 </指令>\n'
|
||||
'<已知信息>{{ context }}</已知信息>\n'
|
||||
'<问题>{{ question }}</问题>\n',
|
||||
|
||||
"text":
|
||||
'<指令>根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,答案请使用中文。 </指令>\n'
|
||||
'<已知信息>{{ context }}</已知信息>\n'
|
||||
'<问题>{{ question }}</问题>\n',
|
||||
|
||||
"empty": # 搜不到知识库的时候使用
|
||||
'请你回答我的问题:\n'
|
||||
'{{ question }}\n\n',
|
||||
},
|
||||
|
||||
|
||||
"search_engine_chat": {
|
||||
"default":
|
||||
'<指令>这是我搜索到的互联网信息,请你根据这些信息进行提取并有调理,简洁的回答问题。'
|
||||
'如果无法从中得到答案,请说 “无法搜索到能回答问题的内容”。 </指令>\n'
|
||||
'<已知信息>{{ context }}</已知信息>\n'
|
||||
'<问题>{{ question }}</问题>\n',
|
||||
|
||||
"search":
|
||||
'<指令>根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,答案请使用中文。 </指令>\n'
|
||||
'<已知信息>{{ context }}</已知信息>\n'
|
||||
'<问题>{{ question }}</问题>\n',
|
||||
},
|
||||
|
||||
|
||||
"agent_chat": {
|
||||
"default":
|
||||
'Answer the following questions as best you can. If it is in order, you can use some tools appropriately. '
|
||||
'You have access to the following tools:\n\n'
|
||||
'{tools}\n\n'
|
||||
'Use the following format:\n'
|
||||
'Question: the input question you must answer1\n'
|
||||
'Thought: you should always think about what to do and what tools to use.\n'
|
||||
'Action: the action to take, should be one of [{tool_names}]\n'
|
||||
'Action Input: the input to the action\n'
|
||||
'Observation: the result of the action\n'
|
||||
'... (this Thought/Action/Action Input/Observation can be repeated zero or more times)\n'
|
||||
'Thought: I now know the final answer\n'
|
||||
'Final Answer: the final answer to the original input question\n'
|
||||
'Begin!\n\n'
|
||||
'history: {history}\n\n'
|
||||
'Question: {input}\n\n'
|
||||
'Thought: {agent_scratchpad}\n',
|
||||
|
||||
"ChatGLM3":
|
||||
'You can answer using the tools, or answer directly using your knowledge without using the tools. '
|
||||
'Respond to the human as helpfully and accurately as possible.\n'
|
||||
'You have access to the following tools:\n'
|
||||
'{tools}\n'
|
||||
'Use a json blob to specify a tool by providing an action key (tool name) '
|
||||
'and an action_input key (tool input).\n'
|
||||
'Valid "action" values: "Final Answer" or [{tool_names}]'
|
||||
'Provide only ONE action per $JSON_BLOB, as shown:\n\n'
|
||||
'```\n'
|
||||
'{{{{\n'
|
||||
' "action": $TOOL_NAME,\n'
|
||||
' "action_input": $INPUT\n'
|
||||
'}}}}\n'
|
||||
'```\n\n'
|
||||
'Follow this format:\n\n'
|
||||
'Question: input question to answer\n'
|
||||
'Thought: consider previous and subsequent steps\n'
|
||||
'Action:\n'
|
||||
'```\n'
|
||||
'$JSON_BLOB\n'
|
||||
'```\n'
|
||||
'Observation: action result\n'
|
||||
'... (repeat Thought/Action/Observation N times)\n'
|
||||
'Thought: I know what to respond\n'
|
||||
'Action:\n'
|
||||
'```\n'
|
||||
'{{{{\n'
|
||||
' "action": "Final Answer",\n'
|
||||
' "action_input": "Final response to human"\n'
|
||||
'}}}}\n'
|
||||
'Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. '
|
||||
'Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n'
|
||||
'history: {history}\n\n'
|
||||
'Question: {input}\n\n'
|
||||
'Thought: {agent_scratchpad}',
|
||||
}
|
||||
}
|
||||
139
configs/server_config.py.example
Normal file
@ -0,0 +1,139 @@
|
||||
import sys
|
||||
from configs.model_config import LLM_DEVICE
|
||||
|
||||
# httpx 请求默认超时时间(秒)。如果加载模型或对话较慢,出现超时错误,可以适当加大该值。
|
||||
HTTPX_DEFAULT_TIMEOUT = 300.0
|
||||
|
||||
# API 是否开启跨域,默认为False,如果需要开启,请设置为True
|
||||
# is open cross domain
|
||||
OPEN_CROSS_DOMAIN = False
|
||||
|
||||
# 各服务器默认绑定host。如改为"0.0.0.0"需要修改下方所有XX_SERVER的host
|
||||
DEFAULT_BIND_HOST = "0.0.0.0" if sys.platform != "win32" else "127.0.0.1"
|
||||
|
||||
# webui.py server
|
||||
WEBUI_SERVER = {
|
||||
"host": DEFAULT_BIND_HOST,
|
||||
"port": 8501,
|
||||
}
|
||||
|
||||
# api.py server
|
||||
API_SERVER = {
|
||||
"host": DEFAULT_BIND_HOST,
|
||||
"port": 7861,
|
||||
}
|
||||
|
||||
# fastchat openai_api server
|
||||
FSCHAT_OPENAI_API = {
|
||||
"host": DEFAULT_BIND_HOST,
|
||||
"port": 20000,
|
||||
}
|
||||
|
||||
# fastchat model_worker server
|
||||
# 这些模型必须是在model_config.MODEL_PATH或ONLINE_MODEL中正确配置的。
|
||||
# 在启动startup.py时,可用通过`--model-name xxxx yyyy`指定模型,不指定则为LLM_MODELS
|
||||
FSCHAT_MODEL_WORKERS = {
|
||||
# 所有模型共用的默认配置,可在模型专项配置中进行覆盖。
|
||||
"default": {
|
||||
"host": DEFAULT_BIND_HOST,
|
||||
"port": 20002,
|
||||
"device": LLM_DEVICE,
|
||||
# False,'vllm',使用的推理加速框架,使用vllm如果出现HuggingFace通信问题,参见doc/FAQ
|
||||
# vllm对一些模型支持还不成熟,暂时默认关闭
|
||||
"infer_turbo": False,
|
||||
|
||||
# model_worker多卡加载需要配置的参数
|
||||
# "gpus": None, # 使用的GPU,以str的格式指定,如"0,1",如失效请使用CUDA_VISIBLE_DEVICES="0,1"等形式指定
|
||||
# "num_gpus": 1, # 使用GPU的数量
|
||||
# "max_gpu_memory": "20GiB", # 每个GPU占用的最大显存
|
||||
|
||||
# 以下为model_worker非常用参数,可根据需要配置
|
||||
# "load_8bit": False, # 开启8bit量化
|
||||
# "cpu_offloading": None,
|
||||
# "gptq_ckpt": None,
|
||||
# "gptq_wbits": 16,
|
||||
# "gptq_groupsize": -1,
|
||||
# "gptq_act_order": False,
|
||||
# "awq_ckpt": None,
|
||||
# "awq_wbits": 16,
|
||||
# "awq_groupsize": -1,
|
||||
# "model_names": LLM_MODELS,
|
||||
# "conv_template": None,
|
||||
# "limit_worker_concurrency": 5,
|
||||
# "stream_interval": 2,
|
||||
# "no_register": False,
|
||||
# "embed_in_truncate": False,
|
||||
|
||||
# 以下为vllm_worker配置参数,注意使用vllm必须有gpu,仅在Linux测试通过
|
||||
|
||||
# tokenizer = model_path # 如果tokenizer与model_path不一致在此处添加
|
||||
# 'tokenizer_mode':'auto',
|
||||
# 'trust_remote_code':True,
|
||||
# 'download_dir':None,
|
||||
# 'load_format':'auto',
|
||||
# 'dtype':'auto',
|
||||
# 'seed':0,
|
||||
# 'worker_use_ray':False,
|
||||
# 'pipeline_parallel_size':1,
|
||||
# 'tensor_parallel_size':1,
|
||||
# 'block_size':16,
|
||||
# 'swap_space':4 , # GiB
|
||||
# 'gpu_memory_utilization':0.90,
|
||||
# 'max_num_batched_tokens':2560,
|
||||
# 'max_num_seqs':256,
|
||||
# 'disable_log_stats':False,
|
||||
# 'conv_template':None,
|
||||
# 'limit_worker_concurrency':5,
|
||||
# 'no_register':False,
|
||||
# 'num_gpus': 1
|
||||
# 'engine_use_ray': False,
|
||||
# 'disable_log_requests': False
|
||||
|
||||
},
|
||||
"chatglm3-6b": {
|
||||
"device": "cuda",
|
||||
},
|
||||
"Qwen1.5-0.5B-Chat": {
|
||||
"device": "cuda",
|
||||
},
|
||||
# 以下配置可以不用修改,在model_config中设置启动的模型
|
||||
"zhipu-api": {
|
||||
"port": 21001,
|
||||
},
|
||||
"minimax-api": {
|
||||
"port": 21002,
|
||||
},
|
||||
"xinghuo-api": {
|
||||
"port": 21003,
|
||||
},
|
||||
"qianfan-api": {
|
||||
"port": 21004,
|
||||
},
|
||||
"fangzhou-api": {
|
||||
"port": 21005,
|
||||
},
|
||||
"qwen-api": {
|
||||
"port": 21006,
|
||||
},
|
||||
"baichuan-api": {
|
||||
"port": 21007,
|
||||
},
|
||||
"azure-api": {
|
||||
"port": 21008,
|
||||
},
|
||||
"tiangong-api": {
|
||||
"port": 21009,
|
||||
},
|
||||
"gemini-api": {
|
||||
"port": 21010,
|
||||
},
|
||||
"claude-api": {
|
||||
"port": 21011,
|
||||
},
|
||||
}
|
||||
|
||||
FSCHAT_CONTROLLER = {
|
||||
"host": DEFAULT_BIND_HOST,
|
||||
"port": 20001,
|
||||
"dispatch_method": "shortest_queue",
|
||||
}
|
||||
@ -1,152 +0,0 @@
|
||||
# 基于本地知识的 ChatGLM 应用实现
|
||||
|
||||
## 介绍
|
||||
|
||||
🌍 [_READ THIS IN ENGLISH_](README_en.md)
|
||||
|
||||
🤖️ 一种利用 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) + [langchain](https://github.com/hwchase17/langchain) 实现的基于本地知识的 ChatGLM 应用。
|
||||
|
||||
💡 受 [GanymedeNil](https://github.com/GanymedeNil) 的项目 [document.ai](https://github.com/GanymedeNil/document.ai) 和 [AlexZhangji](https://github.com/AlexZhangji) 创建的 [ChatGLM-6B Pull Request](https://github.com/THUDM/ChatGLM-6B/pull/216) 启发,建立了全部基于开源模型实现的本地知识问答应用。
|
||||
|
||||
✅ 本项目中 Embedding 选用的是 [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese/tree/main),LLM 选用的是 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B)。依托上述模型,本项目可实现全部使用**开源**模型**离线私有部署**。
|
||||
|
||||
⛓️ 本项目实现原理如下图所示,过程包括加载文件 -> 读取文本 -> 文本分割 -> 文本向量化 -> 问句向量化 -> 在文本向量中匹配出与问句向量最相似的`top k`个 -> 匹配出的文本作为上下文和问题一起添加到`prompt`中 -> 提交给`LLM`生成回答。
|
||||
|
||||

|
||||
|
||||
🚩 本项目未涉及微调、训练过程,但可利用微调或训练对本项目效果进行优化。
|
||||
|
||||
## 更新信息
|
||||
|
||||
**[2023/04/07]**
|
||||
1. 解决加载 ChatGLM 模型时发生显存占用为双倍的问题 (感谢 [@suc16](https://github.com/suc16) 和 [@myml](https://github.com/myml)) ;
|
||||
2. 新增清理显存机制;
|
||||
3. 新增`nghuyong/ernie-3.0-nano-zh`和`nghuyong/ernie-3.0-base-zh`作为 Embedding 模型备选项,相比`GanymedeNil/text2vec-large-chinese`占用显存资源更少 (感谢 [@lastrei](https://github.com/lastrei))。
|
||||
|
||||
**[2023/04/09]**
|
||||
1. 使用`langchain`中的`RetrievalQA`替代之前选用的`ChatVectorDBChain`,替换后可以有效减少提问 2-3 次后因显存不足而停止运行的问题;
|
||||
2. 在`knowledge_based_chatglm.py`中增加`EMBEDDING_MODEL`、`VECTOR_SEARCH_TOP_K`、`LLM_MODEL`、`LLM_HISTORY_LEN`、`REPLY_WITH_SOURCE`参数值设置;
|
||||
3. 增加 GPU 显存需求更小的`chatglm-6b-int4`、`chatglm-6b-int4-qe`作为 LLM 模型备选项;
|
||||
4. 更正`README.md`中的代码错误(感谢 [@calcitem](https://github.com/calcitem))。
|
||||
|
||||
**[2023/04/11]**
|
||||
1. 加入 Web UI V0.1 版本(感谢 [@liangtongt](https://github.com/liangtongt));
|
||||
2. `README.md`中增加常见问题(感谢 [@calcitem](https://github.com/calcitem));
|
||||
3. 增加 LLM 和 Embedding 模型运行设备是否可用`cuda`、`mps`、`cpu`的自动判断。
|
||||
4. 在`knowledge_based_chatglm.py`中增加对`filepath`的判断,在之前支持单个文件导入的基础上,现支持单个文件夹路径作为输入,输入后将会遍历文件夹中各个文件,并在命令行中显示每个文件是否成功加载。
|
||||
|
||||
## 使用方式
|
||||
|
||||
### 硬件需求
|
||||
- ChatGLM-6B 模型硬件需求
|
||||
|
||||
| **量化等级** | **最低 GPU 显存**(推理) | **最低 GPU 显存**(高效参数微调) |
|
||||
| -------------- | ------------------------- | --------------------------------- |
|
||||
| FP16(无量化) | 13 GB | 14 GB |
|
||||
| INT8 | 8 GB | 9 GB |
|
||||
| INT4 | 6 GB | 7 GB |
|
||||
|
||||
- Embedding 模型硬件需求
|
||||
|
||||
本项目中默认选用的 Embedding 模型 [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese/tree/main) 约占用显存 3GB,也可修改为在 CPU 中运行。
|
||||
### 软件需求
|
||||
本项目已在 python 3.8 环境下完成测试。
|
||||
### 1. 安装 python 依赖包
|
||||
```commandline
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
注:使用 langchain.document_loaders.UnstructuredFileLoader 进行非结构化文件接入时,可能需要依据文档进行其他依赖包的安装,请参考 [langchain 文档](https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/unstructured_file.html)
|
||||
|
||||
### 2. 执行脚本体验 Web UI 或命令行交互
|
||||
执行 [webui.py](webui.py) 脚本体验 **Web 交互** <img src="https://img.shields.io/badge/Version-0.1-brightgreen">
|
||||
```commandline
|
||||
python webui.py
|
||||
```
|
||||
执行后效果如下图所示:
|
||||

|
||||
Web UI 中提供的 API 接口如下图所示:
|
||||

|
||||
Web UI 可以实现如下功能:
|
||||
1. 自动读取`knowledge_based_chatglm.py`中`LLM`及`embedding`模型枚举,选择后点击`setting`进行模型加载,可随时切换模型进行测试
|
||||
2. 可手动调节保留对话历史长度,可根据显存大小自行调节
|
||||
3. 添加上传文件功能,通过下拉框选择已上传的文件,点击`loading`加载文件,过程中可随时更换加载的文件
|
||||
4. 底部添加`use via API`可对接到自己系统
|
||||
|
||||
或执行 [knowledge_based_chatglm.py](knowledge_based_chatglm.py) 脚本体验**命令行交互**
|
||||
```commandline
|
||||
python knowledge_based_chatglm.py
|
||||
```
|
||||
|
||||
|
||||
### 常见问题
|
||||
Q: 本项目支持哪些文件格式?
|
||||
|
||||
A: 目前已测试支持 txt、docx、md 格式文件,更多文件格式请参考 [langchain 文档](https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/unstructured_file.html)。目前已知文档中若含有特殊字符,可能存在文件无法加载的问题。
|
||||
|
||||
Q: 读取特定格式文件时遇到缺少`detectron2`时如何解决?
|
||||
|
||||
A: 因该包安装过程中遇到问题较多,且仅部分格式文件需要,所以未加入`requirements.txt`。可以通过一下命令安装
|
||||
|
||||
```commandline
|
||||
pip install "detectron2@git+https://github.com/facebookresearch/detectron2.git@v0.6#egg=detectron2"
|
||||
```
|
||||
|
||||
Q: `Resource punkt not found.` 如何解决?
|
||||
|
||||
A: https://github.com/nltk/nltk_data/raw/gh-pages/packages/tokenizers/punkt.zip 中的 `packages/tokenizers` 解压,放到 `Searched in:` 对应目录下。
|
||||
|
||||
Q: `Resource averaged_perceptron_tagger not found.` 如何解决?
|
||||
|
||||
A: 将 https://github.com/nltk/nltk_data/blob/gh-pages/packages/taggers/averaged_perceptron_tagger.zip 下载,解压放到 `Searched in:` 对应目录下。
|
||||
|
||||
Q: 本项目可否在 colab 中运行?
|
||||
|
||||
A: 可以尝试使用 chatglm-6b-int4 模型在 colab 中运行,需要注意的是,如需在 colab 中运行 Web UI,需将`webui.py`中`demo.queue(concurrency_count=3).launch(
|
||||
server_name='0.0.0.0', share=False, inbrowser=False)`中参数`share`设置为`True`。
|
||||
## DEMO
|
||||
|
||||
以问题`chatglm-6b 的局限性具体体现在哪里,如何实现改进`为例
|
||||
|
||||
未使用 langchain 接入本地文档时:
|
||||
|
||||
>ChatGLM-6B 是一个基于 GPT-3 的聊天机器人,它的局限性主要体现在以下几个方面:
|
||||
>1. 语言模型的精度:GPT-3 是一种大型的语言模型,它的精度比较高,但是仍然有一些局限性。例如,它可能无法准确地理解某些方言、口音或者复杂的句子结构。
|
||||
>2. 知识图谱的完整性:GPT-3 可以生成一些知识图谱,但是其完整性和准确性可能不够。例如,它可能无法准确地理解某些领域的专业知识和术语。
|
||||
>3. 对话生成的连贯性:GPT-3 可以生成一些连贯的对话,但是其生成的对话可能不够自然。例如,它可能会生成一些虚假的、不连贯的对话或者误解用户的意图。
|
||||
>
|
||||
>为了改进 ChatGLM-6B,可以考虑以下几个方面:
|
||||
>1. 提高语言模型的精度:可以通过增加训练数据、使用更好的语言模型架构、优化模型训练过程等方式来提高语言模型的精度。
|
||||
>2. 增强知识图谱的完整性:可以通过增加知识图谱的维度、使用更好的知识图谱构建方法、优化知识图谱的匹配算法等方式来增强知识图谱的完整性。
|
||||
>3. 提高对话生成的连贯性:可以通过增加对话生成模型的上下文信息、使用更好的上下文信息提取方法、优化对话生成模型的算法等方式来提高对话生成的连贯性。
|
||||
>4. 引入更多的人类反馈:人类反馈可以帮助 ChatGLM-6B 更好地理解用户的意图和需求,从而提高对话生成的质量和准确性。可以通过引入更多的人类客服、使用人类反馈收集器等方式来引入更多的人类反馈。
|
||||
|
||||
使用 langchain 接入 ChatGLM-6B 项目的 README.md 文件后 ChatGLM 的回答:
|
||||
>ChatGLM-6B 是一个基于深度学习的自然语言处理模型,它在回答问题方面表现出色。但是,它也有一些局限性,具体如下:
|
||||
>1. 语言理解有限:ChatGLM-6B 只学习了中文自然语言,对于其他语言可能会存在理解上的局限性。
|
||||
>2. 知识储备不足:ChatGLM-6B 的训练数据集只包含了中文语料,因此它可能无法回答一些非中文的问题或者某些特定领域的问题。
|
||||
>3. 数据量有限:ChatGLM-6B 的训练数据集只有几百万条记录,因此它可能无法回答一些非常具体或者复杂的问题。
|
||||
>
|
||||
>为了改进 ChatGLM-6B,可以考虑以下几个方面:
|
||||
>1. 学习更多的语言知识:可以学习其他语言的自然语言处理技术,扩大语言理解的范围。
|
||||
>2. 扩大知识储备:可以收集更多的中文语料,或者使用其他语言的数据集来扩充知识储备。
|
||||
>3. 增加数据量:可以使用更大的数据集来训练 ChatGLM-6B,提高模型的表现。
|
||||
>4. 引入更多的评估指标:可以引入更多的评估指标来评估模型的表现,从而发现 ChatGLM-6B 存在的不足和局限性。
|
||||
>5. 改进模型架构:可以改进 ChatGLM-6B 的模型架构,提高模型的性能和表现。例如,可以使用更大的神经网络或者改进的卷积神经网络结构。
|
||||
|
||||
## 路线图
|
||||
- [x] 实现 langchain + ChatGLM-6B 本地知识应用
|
||||
- [x] 基于 langchain 实现非结构化文件接入
|
||||
- [ ] 基于 langchain 实现更多类型本地知识文件接入
|
||||
- [ ] 增加 Web UI DEMO
|
||||
- [x] 利用 gradio 实现 Web UI DEMO
|
||||
- [ ] 添加模型加载进度条
|
||||
- [ ] 添加输出内容及错误提示
|
||||
- [ ] 国际化语言切换
|
||||
- [ ] 引用标注
|
||||
- [ ] 添加插件系统(可基础lora训练等)
|
||||
- [ ] 利用 fastapi 实现 API 部署方式,并实现调用 API 的 web ui DEMO
|
||||
|
||||
## 项目交流群
|
||||

|
||||
|
||||
🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
|
||||
12
copy_config_example.py
Normal file
@ -0,0 +1,12 @@
|
||||
# 用于批量将configs下的.example文件复制并命名为.py文件
|
||||
import os
|
||||
import shutil
|
||||
|
||||
if __name__ == "__main__":
|
||||
files = os.listdir("configs")
|
||||
|
||||
src_files = [os.path.join("configs", file) for file in files if ".example" in file]
|
||||
|
||||
for src_file in src_files:
|
||||
tar_file = src_file.replace(".example", "")
|
||||
shutil.copy(src_file, tar_file)
|
||||
@ -1,32 +0,0 @@
|
||||
## 变更日志
|
||||
|
||||
**[2023/04/15]**
|
||||
|
||||
1. 重构项目结构,在根目录下保留命令行 Demo [cli_demo.py](../cli_demo.py) 和 Web UI Demo [webui.py](../webui.py);
|
||||
2. 对 Web UI 进行改进,修改为运行 Web UI 后首先按照 [configs/model_config.py](../configs/model_config.py) 默认选项加载模型,并增加报错提示信息等;
|
||||
3. 对常见问题进行补充说明。
|
||||
|
||||
**[2023/04/12]**
|
||||
|
||||
1. 替换 Web UI 中的样例文件,避免出现 Ubuntu 中出现因文件编码无法读取的问题;
|
||||
2. 替换`knowledge_based_chatglm.py`中的 prompt 模版,避免出现因 prompt 模版包含中英双语导致 chatglm 返回内容错乱的问题。
|
||||
|
||||
**[2023/04/11]**
|
||||
|
||||
1. 加入 Web UI V0.1 版本(感谢 [@liangtongt](https://github.com/liangtongt));
|
||||
2. `README.md`中增加常见问题(感谢 [@calcitem](https://github.com/calcitem) 和 [@bolongliu](https://github.com/bolongliu));
|
||||
3. 增加 LLM 和 Embedding 模型运行设备是否可用`cuda`、`mps`、`cpu`的自动判断。
|
||||
4. 在`knowledge_based_chatglm.py`中增加对`filepath`的判断,在之前支持单个文件导入的基础上,现支持单个文件夹路径作为输入,输入后将会遍历文件夹中各个文件,并在命令行中显示每个文件是否成功加载。
|
||||
|
||||
**[2023/04/09]**
|
||||
|
||||
1. 使用`langchain`中的`RetrievalQA`替代之前选用的`ChatVectorDBChain`,替换后可以有效减少提问 2-3 次后因显存不足而停止运行的问题;
|
||||
2. 在`knowledge_based_chatglm.py`中增加`EMBEDDING_MODEL`、`VECTOR_SEARCH_TOP_K`、`LLM_MODEL`、`LLM_HISTORY_LEN`、`REPLY_WITH_SOURCE`参数值设置;
|
||||
3. 增加 GPU 显存需求更小的`chatglm-6b-int4`、`chatglm-6b-int4-qe`作为 LLM 模型备选项;
|
||||
4. 更正`README.md`中的代码错误(感谢 [@calcitem](https://github.com/calcitem))。
|
||||
|
||||
**[2023/04/07]**
|
||||
|
||||
1. 解决加载 ChatGLM 模型时发生显存占用为双倍的问题 (感谢 [@suc16](https://github.com/suc16) 和 [@myml](https://github.com/myml)) ;
|
||||
2. 新增清理显存机制;
|
||||
3. 新增`nghuyong/ernie-3.0-nano-zh`和`nghuyong/ernie-3.0-base-zh`作为 Embedding 模型备选项,相比`GanymedeNil/text2vec-large-chinese`占用显存资源更少 (感谢 [@lastrei](https://github.com/lastrei))。
|
||||
29
docs/ES部署指南.md
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
# 实现基于ES的数据插入、检索、删除、更新
|
||||
```shell
|
||||
author: 唐国梁Tommy
|
||||
e-mail: flytang186@qq.com
|
||||
|
||||
如果遇到任何问题,可以与我联系,我这边部署后服务是没有问题的。
|
||||
```
|
||||
|
||||
## 第1步:ES docker部署
|
||||
```shell
|
||||
docker network create elastic
|
||||
docker run -id --name elasticsearch --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e "xpack.security.enabled=false" -e "xpack.security.http.ssl.enabled=false" -t docker.elastic.co/elasticsearch/elasticsearch:8.8.2
|
||||
```
|
||||
|
||||
### 第2步:Kibana docker部署
|
||||
**注意:Kibana版本与ES保持一致**
|
||||
```shell
|
||||
docker pull docker.elastic.co/kibana/kibana:{version}
|
||||
docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version}
|
||||
```
|
||||
|
||||
### 第3步:核心代码
|
||||
```shell
|
||||
1. 核心代码路径
|
||||
server/knowledge_base/kb_service/es_kb_service.py
|
||||
|
||||
2. 需要在 configs/model_config.py 中 配置 ES参数(IP, PORT)等;
|
||||
```
|
||||
121
docs/FAQ.md
@ -1,121 +0,0 @@
|
||||
### 常见问题
|
||||
|
||||
Q1: 本项目支持哪些文件格式?
|
||||
|
||||
A1: 目前已测试支持 txt、docx、md、pdf 格式文件,更多文件格式请参考 [langchain 文档](https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/unstructured_file.html)。目前已知文档中若含有特殊字符,可能存在文件无法加载的问题。
|
||||
|
||||
---
|
||||
|
||||
Q2: 执行 `pip install -r requirements.txt` 过程中,安装 `detectron2` 时发生报错怎么办?
|
||||
|
||||
A2: 如果不需要对 `pdf` 格式文件读取,可不安装 `detectron2`;如需对 `pdf` 文件进行高精度文本提取,建议按照如下方法安装:
|
||||
|
||||
```commandline
|
||||
$ git clone https://github.com/facebookresearch/detectron2.git
|
||||
$ cd detectron2
|
||||
$ pip install -e .
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Q3: 使用过程中 Python 包`nltk`发生了`Resource punkt not found.`报错,该如何解决?
|
||||
|
||||
A3: 方法一:https://github.com/nltk/nltk_data/raw/gh-pages/packages/tokenizers/punkt.zip 中的 `packages/tokenizers` 解压,放到 `nltk_data/tokenizers` 存储路径下。
|
||||
|
||||
`nltk_data` 存储路径可以通过 `nltk.data.path` 查询。
|
||||
|
||||
方法二:执行python代码
|
||||
```
|
||||
import nltk
|
||||
nltk.download()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Q4: 使用过程中 Python 包`nltk`发生了`Resource averaged_perceptron_tagger not found.`报错,该如何解决?
|
||||
|
||||
A4: 方法一:将 https://github.com/nltk/nltk_data/blob/gh-pages/packages/taggers/averaged_perceptron_tagger.zip 下载,解压放到 `nltk_data/taggers` 存储路径下。
|
||||
|
||||
`nltk_data` 存储路径可以通过 `nltk.data.path` 查询。
|
||||
|
||||
方法二:执行python代码
|
||||
```
|
||||
import nltk
|
||||
nltk.download()
|
||||
```
|
||||
---
|
||||
|
||||
Q5: 本项目可否在 colab 中运行?
|
||||
|
||||
A5: 可以尝试使用 chatglm-6b-int4 模型在 colab 中运行,需要注意的是,如需在 colab 中运行 Web UI,需将`webui.py`中`demo.queue(concurrency_count=3).launch(
|
||||
server_name='0.0.0.0', share=False, inbrowser=False)`中参数`share`设置为`True`。
|
||||
|
||||
---
|
||||
|
||||
Q6: 在 Anaconda 中使用 pip 安装包无效如何解决?
|
||||
|
||||
A6: 此问题是系统环境问题,详细见 [在Anaconda中使用pip安装包无效问题](在Anaconda中使用pip安装包无效问题.md)
|
||||
|
||||
---
|
||||
|
||||
Q7: 本项目中所需模型如何下载至本地?
|
||||
|
||||
A7: 本项目中使用的模型均为`huggingface.com`中可下载的开源模型,以默认选择的`chatglm-6b`和`text2vec-large-chinese`模型为例,下载模型可执行如下代码:
|
||||
|
||||
```shell
|
||||
# 安装 git lfs
|
||||
$ git lfs install
|
||||
|
||||
# 下载 LLM 模型
|
||||
$ git clone https://huggingface.co/THUDM/chatglm-6b /your_path/chatglm-6b
|
||||
|
||||
# 下载 Embedding 模型
|
||||
$ git clone https://huggingface.co/GanymedeNil/text2vec-large-chinese /your_path/text2vec
|
||||
|
||||
# 模型需要更新时,可打开模型所在文件夹后拉取最新模型文件/代码
|
||||
$ git pull
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Q8: `huggingface.com`中模型下载速度较慢怎么办?
|
||||
|
||||
A8: 可使用本项目用到的模型权重文件百度网盘地址:
|
||||
|
||||
- ernie-3.0-base-zh.zip 链接: https://pan.baidu.com/s/1CIvKnD3qzE-orFouA8qvNQ?pwd=4wih
|
||||
- ernie-3.0-nano-zh.zip 链接: https://pan.baidu.com/s/1Fh8fgzVdavf5P1omAJJ-Zw?pwd=q6s5
|
||||
- text2vec-large-chinese.zip 链接: https://pan.baidu.com/s/1sMyPzBIXdEzHygftEoyBuA?pwd=4xs7
|
||||
- chatglm-6b-int4-qe.zip 链接: https://pan.baidu.com/s/1DDKMOMHtNZccOOBGWIOYww?pwd=22ji
|
||||
- chatglm-6b-int4.zip 链接: https://pan.baidu.com/s/1pvZ6pMzovjhkA6uPcRLuJA?pwd=3gjd
|
||||
- chatglm-6b.zip 链接: https://pan.baidu.com/s/1B-MpsVVs1GHhteVBetaquw?pwd=djay
|
||||
|
||||
---
|
||||
|
||||
Q9: 下载完模型后,如何修改代码以执行本地模型?
|
||||
|
||||
A9: 模型下载完成后,请在 [configs/model_config.py](../configs/model_config.py) 文件中,对`embedding_model_dict`和`llm_model_dict`参数进行修改,如把`llm_model_dict`从
|
||||
|
||||
```python
|
||||
embedding_model_dict = {
|
||||
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
|
||||
"ernie-base": "nghuyong/ernie-3.0-base-zh",
|
||||
"text2vec": "GanymedeNil/text2vec-large-chinese"
|
||||
}
|
||||
```
|
||||
|
||||
修改为
|
||||
|
||||
```python
|
||||
embedding_model_dict = {
|
||||
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
|
||||
"ernie-base": "nghuyong/ernie-3.0-base-zh",
|
||||
"text2vec": "/Users/liuqian/Downloads/ChatGLM-6B/text2vec-large-chinese"
|
||||
}
|
||||
```
|
||||
---
|
||||
|
||||
Q10: 执行`python cli_demo.py`过程中,显卡内存爆了,提示"OutOfMemoryError: CUDA out of memory"
|
||||
|
||||
A10: 将 `VECTOR_SEARCH_TOP_K` 和 `LLM_HISTORY_LEN` 的值调低,比如 `VECTOR_SEARCH_TOP_K = 5` 和 `LLM_HISTORY_LEN = 2`,这样由 `query` 和 `context` 拼接得到的 `prompt` 会变短,会减少内存的占用。
|
||||
|
||||
---
|
||||
@ -1,32 +0,0 @@
|
||||
# 安装
|
||||
|
||||
## 环境检查
|
||||
|
||||
```shell
|
||||
# 首先,确信你的机器安装了 Python 3.8 及以上版本
|
||||
$ python --version
|
||||
Python 3.8.13
|
||||
|
||||
# 如果低于这个版本,可使用conda安装环境
|
||||
$ conda create -p /your_path/env_name python=3.8
|
||||
|
||||
# 激活环境
|
||||
$ source activate /your_path/env_name
|
||||
|
||||
# 关闭环境
|
||||
$ source deactivate /your_path/env_name
|
||||
|
||||
# 删除环境
|
||||
$ conda env remove -p /your_path/env_name
|
||||
```
|
||||
|
||||
## 项目依赖
|
||||
|
||||
```shell
|
||||
# 拉取仓库
|
||||
$ git clone https://github.com/imClumsyPanda/langchain-ChatGLM.git
|
||||
|
||||
# 安装依赖
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
注:使用 `langchain.document_loaders.UnstructuredFileLoader` 进行非结构化文件接入时,可能需要依据文档进行其他依赖包的安装,请参考 [langchain 文档](https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/unstructured_file.html)。
|
||||
@ -1,114 +0,0 @@
|
||||
## Issue with Installing Packages Using pip in Anaconda
|
||||
|
||||
## Problem
|
||||
|
||||
Recently, when running open-source code, I encountered an issue: after creating a virtual environment with conda and switching to the new environment, using pip to install packages would be "ineffective." Here, "ineffective" means that the packages installed with pip are not in this new environment.
|
||||
|
||||
------
|
||||
|
||||
## Analysis
|
||||
|
||||
1. First, create a test environment called test: `conda create -n test`
|
||||
2. Activate the test environment: `conda activate test`
|
||||
3. Use pip to install numpy: `pip install numpy`. You'll find that numpy already exists in the default environment.
|
||||
|
||||
```powershell
|
||||
Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
Requirement already satisfied: numpy in c:\programdata\anaconda3\lib\site-packages (1.20.3)
|
||||
```
|
||||
|
||||
4. Check the information of pip: `pip show pip`
|
||||
|
||||
```powershell
|
||||
Name: pip
|
||||
Version: 21.2.4
|
||||
Summary: The PyPA recommended tool for installing Python packages.
|
||||
Home-page: https://pip.pypa.io/
|
||||
Author: The pip developers
|
||||
Author-email: distutils-sig@python.org
|
||||
License: MIT
|
||||
Location: c:\programdata\anaconda3\lib\site-packages
|
||||
Requires:
|
||||
Required-by:
|
||||
```
|
||||
|
||||
5. We can see that the current pip is in the default conda environment. This explains why the package is not in the new virtual environment when we directly use pip to install packages - because the pip being used belongs to the default environment, the installed package either already exists or is installed directly into the default environment.
|
||||
|
||||
------
|
||||
|
||||
## Solution
|
||||
|
||||
1. We can directly use the conda command to install new packages, but sometimes conda may not have certain packages/libraries, so we still need to use pip to install.
|
||||
2. We can first use the conda command to install the pip package for the current virtual environment, and then use pip to install new packages.
|
||||
|
||||
```powershell
|
||||
# Use conda to install the pip package
|
||||
(test) PS C:\Users\Administrator> conda install pip
|
||||
Collecting package metadata (current_repodata.json): done
|
||||
Solving environment: done
|
||||
....
|
||||
done
|
||||
|
||||
# Display the information of the current pip, and find that pip is in the test environment
|
||||
(test) PS C:\Users\Administrator> pip show pip
|
||||
Name: pip
|
||||
Version: 21.2.4
|
||||
Summary: The PyPA recommended tool for installing Python packages.
|
||||
Home-page: https://pip.pypa.io/
|
||||
Author: The pip developers
|
||||
Author-email: distutils-sig@python.org
|
||||
License: MIT
|
||||
Location: c:\programdata\anaconda3\envs\test\lib\site-packages
|
||||
Requires:
|
||||
Required-by:
|
||||
|
||||
# Now use pip to install the numpy package, and it is installed successfully
|
||||
(test) PS C:\Users\Administrator> pip install numpy
|
||||
Looking in indexes:
|
||||
https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
Collecting numpy
|
||||
Using cached https://pypi.tuna.tsinghua.edu.cn/packages/4b/23/140ec5a509d992fe39db17200e96c00fd29603c1531ce633ef93dbad5e9e/numpy-1.22.2-cp39-cp39-win_amd64.whl (14.7 MB)
|
||||
Installing collected packages: numpy
|
||||
Successfully installed numpy-1.22.2
|
||||
|
||||
# Use pip list to view the currently installed packages, no problem
|
||||
(test) PS C:\Users\Administrator> pip list
|
||||
Package Version
|
||||
------------ ---------
|
||||
certifi 2021.10.8
|
||||
numpy 1.22.2
|
||||
pip 21.2.4
|
||||
setuptools 58.0.4
|
||||
wheel 0.37.1
|
||||
wincertstore 0.2
|
||||
```
|
||||
|
||||
## Supplement
|
||||
|
||||
1. The reason I didn't notice this problem before might be because the packages installed in the virtual environment were of a specific version, which overwrote the packages in the default environment. The main issue was actually a lack of careful observation:), otherwise, I could have noticed `Successfully uninstalled numpy-xxx` **default version** and `Successfully installed numpy-1.20.3` **specified version**.
|
||||
2. During testing, I found that if the Python version is specified when creating a new package, there shouldn't be this issue. I guess this is because pip will be installed in the virtual environment, while in our case, including pip, no packages were installed, so the default environment's pip was used.
|
||||
3. There's a question: I should have specified the Python version when creating a new virtual environment before, but I still used the default environment's pip package. However, I just couldn't reproduce the issue successfully on two different machines, which led to the second point mentioned above.
|
||||
4. After encountering the problem mentioned in point 3, I solved it by using `python -m pip install package-name`, adding `python -m` before pip. As for why, you can refer to the answer on [StackOverflow](https://stackoverflow.com/questions/41060382/using-pip-to-install-packages-to-anaconda-environment):
|
||||
|
||||
>1. If you have a non-conda pip as your default pip but conda python as your default python (as below):
|
||||
>
|
||||
>```shell
|
||||
>>which -a pip
|
||||
>/home/<user>/.local/bin/pip
|
||||
>/home/<user>/.conda/envs/newenv/bin/pip
|
||||
>/usr/bin/pip
|
||||
>
|
||||
>>which -a python
|
||||
>/home/<user>/.conda/envs/newenv/bin/python
|
||||
>/usr/bin/python
|
||||
>```
|
||||
>
|
||||
>2. Then, instead of calling `pip install <package>` directly, you can use the module flag -m in python so that it installs with the anaconda python
|
||||
>
|
||||
>```shell
|
||||
>python -m pip install <package>
|
||||
>```
|
||||
>
|
||||
>3. This will install the package to the anaconda library directory rather than the library directory associated with the (non-anaconda) pip
|
||||
>4. The reason for doing this is as follows: the pip command references a specific pip file/shortcut (which -a pip will tell you which one). Similarly, the python command references a specific python file (which -a python will tell you which one). For one reason or another, these two commands can become out of sync, so your "default" pip is in a different folder than your default python and therefore is associated with different versions of python.
|
||||
>5. In contrast, the python -m pip construct does not use the shortcut that the pip command points to. Instead, it asks python to find its pip version and use that version to install a package.
|
||||
@ -1,125 +0,0 @@
|
||||
## 在 Anaconda 中使用 pip 安装包无效问题
|
||||
|
||||
## 问题
|
||||
|
||||
最近在跑开源代码的时候遇到的问题:使用 conda 创建虚拟环境并切换到新的虚拟环境后,再使用 pip 来安装包会“无效”。这里的“无效”指的是使用 pip 安装的包不在这个新的环境中。
|
||||
|
||||
------
|
||||
|
||||
## 分析
|
||||
|
||||
1、首先创建一个测试环境 test,`conda create -n test`
|
||||
|
||||
2、激活该测试环境,`conda activate test`
|
||||
|
||||
3、使用 pip 安装 numpy,`pip install numpy`,会发现 numpy 已经存在默认的环境中
|
||||
|
||||
```powershell
|
||||
Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
Requirement already satisfied: numpy in c:\programdata\anaconda3\lib\site-packages (1.20.3)
|
||||
```
|
||||
|
||||
4、这时候看一下 pip 的信息,`pip show pip`
|
||||
|
||||
```powershell
|
||||
Name: pip
|
||||
Version: 21.2.4
|
||||
Summary: The PyPA recommended tool for installing Python packages.
|
||||
Home-page: https://pip.pypa.io/
|
||||
Author: The pip developers
|
||||
Author-email: distutils-sig@python.org
|
||||
License: MIT
|
||||
Location: c:\programdata\anaconda3\lib\site-packages
|
||||
Requires:
|
||||
Required-by:
|
||||
```
|
||||
|
||||
5、可以发现当前 pip 是在默认的 conda 环境中。这也就解释了当我们直接使用 pip 安装包时为什么包不在这个新的虚拟环境中,因为使用的 pip 属于默认环境,安装的包要么已经存在,要么直接装到默认环境中去了。
|
||||
|
||||
------
|
||||
|
||||
## 解决
|
||||
|
||||
1、我们可以直接使用 conda 命令安装新的包,但有些时候 conda 可能没有某些包/库,所以还是得用 pip 安装
|
||||
|
||||
2、我们可以先使用 conda 命令为当前虚拟环境安装 pip 包,再使用 pip 安装新的包
|
||||
|
||||
```powershell
|
||||
# 使用 conda 安装 pip 包
|
||||
(test) PS C:\Users\Administrator> conda install pip
|
||||
Collecting package metadata (current_repodata.json): done
|
||||
Solving environment: done
|
||||
....
|
||||
done
|
||||
|
||||
# 显示当前 pip 的信息,发现 pip 在测试环境 test 中
|
||||
(test) PS C:\Users\Administrator> pip show pip
|
||||
Name: pip
|
||||
Version: 21.2.4
|
||||
Summary: The PyPA recommended tool for installing Python packages.
|
||||
Home-page: https://pip.pypa.io/
|
||||
Author: The pip developers
|
||||
Author-email: distutils-sig@python.org
|
||||
License: MIT
|
||||
Location: c:\programdata\anaconda3\envs\test\lib\site-packages
|
||||
Requires:
|
||||
Required-by:
|
||||
|
||||
# 再使用 pip 安装 numpy 包,成功安装
|
||||
(test) PS C:\Users\Administrator> pip install numpy
|
||||
Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
Collecting numpy
|
||||
Using cached https://pypi.tuna.tsinghua.edu.cn/packages/4b/23/140ec5a509d992fe39db17200e96c00fd29603c1531ce633ef93dbad5e9e/numpy-1.22.2-cp39-cp39-win_amd64.whl (14.7 MB)
|
||||
Installing collected packages: numpy
|
||||
Successfully installed numpy-1.22.2
|
||||
|
||||
# 使用 pip list 查看当前安装的包,没有问题
|
||||
(test) PS C:\Users\Administrator> pip list
|
||||
Package Version
|
||||
------------ ---------
|
||||
certifi 2021.10.8
|
||||
numpy 1.22.2
|
||||
pip 21.2.4
|
||||
setuptools 58.0.4
|
||||
wheel 0.37.1
|
||||
wincertstore 0.2
|
||||
```
|
||||
|
||||
------
|
||||
|
||||
## 补充
|
||||
|
||||
1、之前没有发现这个问题可能时因为在虚拟环境中安装的包是指定版本的,覆盖了默认环境中的包。其实主要还是观察不仔细:),不然可以发现 `Successfully uninstalled numpy-xxx`【默认版本】 以及 `Successfully installed numpy-1.20.3`【指定版本】
|
||||
|
||||
2、测试时发现如果在新建包的时候指定了 python 版本的话应该是没有这个问题的,猜测时因为会在虚拟环境中安装好 pip ,而我们这里包括 pip 在内啥包也没有装,所以使用的是默认环境的 pip
|
||||
|
||||
3、有个问题,之前我在创建新的虚拟环境时应该指定了 python 版本,但还是使用的默认环境的 pip 包,但是刚在在两台机器上都没有复现成功,于是有了上面的第 2 点
|
||||
|
||||
4、出现了第 3 点的问题后,我当时是使用 `python -m pip install package-name` 解决的,在 pip 前面加上了 python -m。至于为什么,可以参考 [StackOverflow](https://stackoverflow.com/questions/41060382/using-pip-to-install-packages-to-anaconda-environment) 上的回答:
|
||||
|
||||
> 1、如果你有一个非 conda 的 pip 作为你的默认 pip,但是 conda 的 python 是你的默认 python(如下):
|
||||
>
|
||||
> ```shell
|
||||
> >which -a pip
|
||||
> /home/<user>/.local/bin/pip
|
||||
> /home/<user>/.conda/envs/newenv/bin/pip
|
||||
> /usr/bin/pip
|
||||
>
|
||||
> >which -a python
|
||||
> /home/<user>/.conda/envs/newenv/bin/python
|
||||
> /usr/bin/python
|
||||
> ```
|
||||
>
|
||||
> 2、然后,而不是直接调用 `pip install <package>`,你可以在 python 中使用模块标志 -m,以便它使用 anaconda python 进行安装
|
||||
>
|
||||
> ```shell
|
||||
>python -m pip install <package>
|
||||
> ```
|
||||
>
|
||||
> 3、这将把包安装到 anaconda 库目录,而不是与(非anaconda) pip 关联的库目录
|
||||
>
|
||||
> 4、这样做的原因如下:命令 pip 引用了一个特定的 pip 文件 / 快捷方式(which -a pip 会告诉你是哪一个)。类似地,命令 python 引用一个特定的 python 文件(which -a python 会告诉你是哪个)。由于这样或那样的原因,这两个命令可能变得不同步,因此你的“默认” pip 与你的默认 python 位于不同的文件夹中,因此与不同版本的 python 相关联。
|
||||
>
|
||||
> 5、与此相反,python -m pip 构造不使用 pip 命令指向的快捷方式。相反,它要求 python 找到它的pip 版本,并使用该版本安装一个包。
|
||||
|
||||
-
|
||||
84
document_loaders/FilteredCSVloader.py
Normal file
@ -0,0 +1,84 @@
|
||||
## 指定制定列的csv文件加载器
|
||||
|
||||
from langchain.document_loaders import CSVLoader
|
||||
import csv
|
||||
from io import TextIOWrapper
|
||||
from typing import Dict, List, Optional
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.helpers import detect_file_encodings
|
||||
|
||||
|
||||
class FilteredCSVLoader(CSVLoader):
|
||||
def __init__(
|
||||
self,
|
||||
file_path: str,
|
||||
columns_to_read: List[str],
|
||||
source_column: Optional[str] = None,
|
||||
metadata_columns: List[str] = [],
|
||||
csv_args: Optional[Dict] = None,
|
||||
encoding: Optional[str] = None,
|
||||
autodetect_encoding: bool = False,
|
||||
):
|
||||
super().__init__(
|
||||
file_path=file_path,
|
||||
source_column=source_column,
|
||||
metadata_columns=metadata_columns,
|
||||
csv_args=csv_args,
|
||||
encoding=encoding,
|
||||
autodetect_encoding=autodetect_encoding,
|
||||
)
|
||||
self.columns_to_read = columns_to_read
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load data into document objects."""
|
||||
|
||||
docs = []
|
||||
try:
|
||||
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
|
||||
docs = self.__read_file(csvfile)
|
||||
except UnicodeDecodeError as e:
|
||||
if self.autodetect_encoding:
|
||||
detected_encodings = detect_file_encodings(self.file_path)
|
||||
for encoding in detected_encodings:
|
||||
try:
|
||||
with open(
|
||||
self.file_path, newline="", encoding=encoding.encoding
|
||||
) as csvfile:
|
||||
docs = self.__read_file(csvfile)
|
||||
break
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
else:
|
||||
raise RuntimeError(f"Error loading {self.file_path}") from e
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error loading {self.file_path}") from e
|
||||
|
||||
return docs
|
||||
|
||||
def __read_file(self, csvfile: TextIOWrapper) -> List[Document]:
|
||||
docs = []
|
||||
csv_reader = csv.DictReader(csvfile, **self.csv_args) # type: ignore
|
||||
for i, row in enumerate(csv_reader):
|
||||
content = []
|
||||
for col in self.columns_to_read:
|
||||
if col in row:
|
||||
content.append(f'{col}:{str(row[col])}')
|
||||
else:
|
||||
raise ValueError(f"Column '{self.columns_to_read[0]}' not found in CSV file.")
|
||||
content = '\n'.join(content)
|
||||
# Extract the source if available
|
||||
source = (
|
||||
row.get(self.source_column, None)
|
||||
if self.source_column is not None
|
||||
else self.file_path
|
||||
)
|
||||
metadata = {"source": source, "row": i}
|
||||
|
||||
for col in self.metadata_columns:
|
||||
if col in row:
|
||||
metadata[col] = row[col]
|
||||
|
||||
doc = Document(page_content=content, metadata=metadata)
|
||||
docs.append(doc)
|
||||
|
||||
return docs
|
||||
4
document_loaders/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from .mypdfloader import RapidOCRPDFLoader
|
||||
from .myimgloader import RapidOCRLoader
|
||||
from .mydocloader import RapidOCRDocLoader
|
||||
from .mypptloader import RapidOCRPPTLoader
|
||||
71
document_loaders/mydocloader.py
Normal file
@ -0,0 +1,71 @@
|
||||
from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
from typing import List
|
||||
import tqdm
|
||||
|
||||
|
||||
class RapidOCRDocLoader(UnstructuredFileLoader):
|
||||
def _get_elements(self) -> List:
|
||||
def doc2text(filepath):
|
||||
from docx.table import _Cell, Table
|
||||
from docx.oxml.table import CT_Tbl
|
||||
from docx.oxml.text.paragraph import CT_P
|
||||
from docx.text.paragraph import Paragraph
|
||||
from docx import Document, ImagePart
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
import numpy as np
|
||||
from rapidocr_onnxruntime import RapidOCR
|
||||
ocr = RapidOCR()
|
||||
doc = Document(filepath)
|
||||
resp = ""
|
||||
|
||||
def iter_block_items(parent):
|
||||
from docx.document import Document
|
||||
if isinstance(parent, Document):
|
||||
parent_elm = parent.element.body
|
||||
elif isinstance(parent, _Cell):
|
||||
parent_elm = parent._tc
|
||||
else:
|
||||
raise ValueError("RapidOCRDocLoader parse fail")
|
||||
|
||||
for child in parent_elm.iterchildren():
|
||||
if isinstance(child, CT_P):
|
||||
yield Paragraph(child, parent)
|
||||
elif isinstance(child, CT_Tbl):
|
||||
yield Table(child, parent)
|
||||
|
||||
b_unit = tqdm.tqdm(total=len(doc.paragraphs)+len(doc.tables),
|
||||
desc="RapidOCRDocLoader block index: 0")
|
||||
for i, block in enumerate(iter_block_items(doc)):
|
||||
b_unit.set_description(
|
||||
"RapidOCRDocLoader block index: {}".format(i))
|
||||
b_unit.refresh()
|
||||
if isinstance(block, Paragraph):
|
||||
resp += block.text.strip() + "\n"
|
||||
images = block._element.xpath('.//pic:pic') # 获取所有图片
|
||||
for image in images:
|
||||
for img_id in image.xpath('.//a:blip/@r:embed'): # 获取图片id
|
||||
part = doc.part.related_parts[img_id] # 根据图片id获取对应的图片
|
||||
if isinstance(part, ImagePart):
|
||||
image = Image.open(BytesIO(part._blob))
|
||||
result, _ = ocr(np.array(image))
|
||||
if result:
|
||||
ocr_result = [line[1] for line in result]
|
||||
resp += "\n".join(ocr_result)
|
||||
elif isinstance(block, Table):
|
||||
for row in block.rows:
|
||||
for cell in row.cells:
|
||||
for paragraph in cell.paragraphs:
|
||||
resp += paragraph.text.strip() + "\n"
|
||||
b_unit.update(1)
|
||||
return resp
|
||||
|
||||
text = doc2text(self.file_path)
|
||||
from unstructured.partition.text import partition_text
|
||||
return partition_text(text=text, **self.unstructured_kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
loader = RapidOCRDocLoader(file_path="../tests/samples/ocr_test.docx")
|
||||
docs = loader.load()
|
||||
print(docs)
|
||||
25
document_loaders/myimgloader.py
Normal file
@ -0,0 +1,25 @@
|
||||
from typing import List
|
||||
from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
from document_loaders.ocr import get_ocr
|
||||
|
||||
|
||||
class RapidOCRLoader(UnstructuredFileLoader):
|
||||
def _get_elements(self) -> List:
|
||||
def img2text(filepath):
|
||||
resp = ""
|
||||
ocr = get_ocr()
|
||||
result, _ = ocr(filepath)
|
||||
if result:
|
||||
ocr_result = [line[1] for line in result]
|
||||
resp += "\n".join(ocr_result)
|
||||
return resp
|
||||
|
||||
text = img2text(self.file_path)
|
||||
from unstructured.partition.text import partition_text
|
||||
return partition_text(text=text, **self.unstructured_kwargs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
loader = RapidOCRLoader(file_path="../tests/samples/ocr_test.jpg")
|
||||
docs = loader.load()
|
||||
print(docs)
|
||||
87
document_loaders/mypdfloader.py
Normal file
@ -0,0 +1,87 @@
|
||||
from typing import List
|
||||
from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
import cv2
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
from configs import PDF_OCR_THRESHOLD
|
||||
from document_loaders.ocr import get_ocr
|
||||
import tqdm
|
||||
|
||||
|
||||
class RapidOCRPDFLoader(UnstructuredFileLoader):
|
||||
def _get_elements(self) -> List:
|
||||
def rotate_img(img, angle):
|
||||
'''
|
||||
img --image
|
||||
angle --rotation angle
|
||||
return--rotated img
|
||||
'''
|
||||
|
||||
h, w = img.shape[:2]
|
||||
rotate_center = (w/2, h/2)
|
||||
#获取旋转矩阵
|
||||
# 参数1为旋转中心点;
|
||||
# 参数2为旋转角度,正值-逆时针旋转;负值-顺时针旋转
|
||||
# 参数3为各向同性的比例因子,1.0原图,2.0变成原来的2倍,0.5变成原来的0.5倍
|
||||
M = cv2.getRotationMatrix2D(rotate_center, angle, 1.0)
|
||||
#计算图像新边界
|
||||
new_w = int(h * np.abs(M[0, 1]) + w * np.abs(M[0, 0]))
|
||||
new_h = int(h * np.abs(M[0, 0]) + w * np.abs(M[0, 1]))
|
||||
#调整旋转矩阵以考虑平移
|
||||
M[0, 2] += (new_w - w) / 2
|
||||
M[1, 2] += (new_h - h) / 2
|
||||
|
||||
rotated_img = cv2.warpAffine(img, M, (new_w, new_h))
|
||||
return rotated_img
|
||||
|
||||
def pdf2text(filepath):
|
||||
import fitz # pyMuPDF里面的fitz包,不要与pip install fitz混淆
|
||||
import numpy as np
|
||||
ocr = get_ocr()
|
||||
doc = fitz.open(filepath)
|
||||
resp = ""
|
||||
|
||||
b_unit = tqdm.tqdm(total=doc.page_count, desc="RapidOCRPDFLoader context page index: 0")
|
||||
for i, page in enumerate(doc):
|
||||
b_unit.set_description("RapidOCRPDFLoader context page index: {}".format(i))
|
||||
b_unit.refresh()
|
||||
text = page.get_text("")
|
||||
resp += text + "\n"
|
||||
|
||||
img_list = page.get_image_info(xrefs=True)
|
||||
for img in img_list:
|
||||
if xref := img.get("xref"):
|
||||
bbox = img["bbox"]
|
||||
# 检查图片尺寸是否超过设定的阈值
|
||||
if ((bbox[2] - bbox[0]) / (page.rect.width) < PDF_OCR_THRESHOLD[0]
|
||||
or (bbox[3] - bbox[1]) / (page.rect.height) < PDF_OCR_THRESHOLD[1]):
|
||||
continue
|
||||
pix = fitz.Pixmap(doc, xref)
|
||||
samples = pix.samples
|
||||
if int(page.rotation)!=0: #如果Page有旋转角度,则旋转图片
|
||||
img_array = np.frombuffer(pix.samples, dtype=np.uint8).reshape(pix.height, pix.width, -1)
|
||||
tmp_img = Image.fromarray(img_array);
|
||||
ori_img = cv2.cvtColor(np.array(tmp_img),cv2.COLOR_RGB2BGR)
|
||||
rot_img = rotate_img(img=ori_img, angle=360-page.rotation)
|
||||
img_array = cv2.cvtColor(rot_img, cv2.COLOR_RGB2BGR)
|
||||
else:
|
||||
img_array = np.frombuffer(pix.samples, dtype=np.uint8).reshape(pix.height, pix.width, -1)
|
||||
|
||||
result, _ = ocr(img_array)
|
||||
if result:
|
||||
ocr_result = [line[1] for line in result]
|
||||
resp += "\n".join(ocr_result)
|
||||
|
||||
# 更新进度
|
||||
b_unit.update(1)
|
||||
return resp
|
||||
|
||||
text = pdf2text(self.file_path)
|
||||
from unstructured.partition.text import partition_text
|
||||
return partition_text(text=text, **self.unstructured_kwargs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
loader = RapidOCRPDFLoader(file_path="/Users/tonysong/Desktop/test.pdf")
|
||||
docs = loader.load()
|
||||
print(docs)
|
||||
59
document_loaders/mypptloader.py
Normal file
@ -0,0 +1,59 @@
|
||||
from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
from typing import List
|
||||
import tqdm
|
||||
|
||||
|
||||
class RapidOCRPPTLoader(UnstructuredFileLoader):
|
||||
def _get_elements(self) -> List:
|
||||
def ppt2text(filepath):
|
||||
from pptx import Presentation
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
from io import BytesIO
|
||||
from rapidocr_onnxruntime import RapidOCR
|
||||
ocr = RapidOCR()
|
||||
prs = Presentation(filepath)
|
||||
resp = ""
|
||||
|
||||
def extract_text(shape):
|
||||
nonlocal resp
|
||||
if shape.has_text_frame:
|
||||
resp += shape.text.strip() + "\n"
|
||||
if shape.has_table:
|
||||
for row in shape.table.rows:
|
||||
for cell in row.cells:
|
||||
for paragraph in cell.text_frame.paragraphs:
|
||||
resp += paragraph.text.strip() + "\n"
|
||||
if shape.shape_type == 13: # 13 表示图片
|
||||
image = Image.open(BytesIO(shape.image.blob))
|
||||
result, _ = ocr(np.array(image))
|
||||
if result:
|
||||
ocr_result = [line[1] for line in result]
|
||||
resp += "\n".join(ocr_result)
|
||||
elif shape.shape_type == 6: # 6 表示组合
|
||||
for child_shape in shape.shapes:
|
||||
extract_text(child_shape)
|
||||
|
||||
b_unit = tqdm.tqdm(total=len(prs.slides),
|
||||
desc="RapidOCRPPTLoader slide index: 1")
|
||||
# 遍历所有幻灯片
|
||||
for slide_number, slide in enumerate(prs.slides, start=1):
|
||||
b_unit.set_description(
|
||||
"RapidOCRPPTLoader slide index: {}".format(slide_number))
|
||||
b_unit.refresh()
|
||||
sorted_shapes = sorted(slide.shapes,
|
||||
key=lambda x: (x.top, x.left)) # 从上到下、从左到右遍历
|
||||
for shape in sorted_shapes:
|
||||
extract_text(shape)
|
||||
b_unit.update(1)
|
||||
return resp
|
||||
|
||||
text = ppt2text(self.file_path)
|
||||
from unstructured.partition.text import partition_text
|
||||
return partition_text(text=text, **self.unstructured_kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
loader = RapidOCRPPTLoader(file_path="../tests/samples/ocr_test.pptx")
|
||||
docs = loader.load()
|
||||
print(docs)
|
||||
18
document_loaders/ocr.py
Normal file
@ -0,0 +1,18 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
try:
|
||||
from rapidocr_paddle import RapidOCR
|
||||
except ImportError:
|
||||
from rapidocr_onnxruntime import RapidOCR
|
||||
|
||||
|
||||
def get_ocr(use_cuda: bool = True) -> "RapidOCR":
|
||||
try:
|
||||
from rapidocr_paddle import RapidOCR
|
||||
ocr = RapidOCR(det_use_cuda=use_cuda, cls_use_cuda=use_cuda, rec_use_cuda=use_cuda)
|
||||
except ImportError:
|
||||
from rapidocr_onnxruntime import RapidOCR
|
||||
ocr = RapidOCR()
|
||||
return ocr
|
||||
0
embeddings/__init__.py
Normal file
79
embeddings/add_embedding_keywords.py
Normal file
@ -0,0 +1,79 @@
|
||||
'''
|
||||
该功能是为了将关键词加入到embedding模型中,以便于在embedding模型中进行关键词的embedding
|
||||
该功能的实现是通过修改embedding模型的tokenizer来实现的
|
||||
该功能仅仅对EMBEDDING_MODEL参数对应的的模型有效,输出后的模型保存在原本模型
|
||||
感谢@CharlesJu1和@charlesyju的贡献提出了想法和最基础的PR
|
||||
|
||||
保存的模型的位置位于原本嵌入模型的目录下,模型的名称为原模型名称+Merge_Keywords_时间戳
|
||||
'''
|
||||
import sys
|
||||
|
||||
sys.path.append("..")
|
||||
import os
|
||||
import torch
|
||||
|
||||
from datetime import datetime
|
||||
from configs import (
|
||||
MODEL_PATH,
|
||||
EMBEDDING_MODEL,
|
||||
EMBEDDING_KEYWORD_FILE,
|
||||
)
|
||||
|
||||
from safetensors.torch import save_model
|
||||
from sentence_transformers import SentenceTransformer
|
||||
from langchain_core._api import deprecated
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.3.0",
|
||||
message="自定义关键词 Langchain-Chatchat 0.3.x 重写, 0.2.x中相关功能将废弃",
|
||||
removal="0.3.0"
|
||||
)
|
||||
def get_keyword_embedding(bert_model, tokenizer, key_words):
|
||||
tokenizer_output = tokenizer(key_words, return_tensors="pt", padding=True, truncation=True)
|
||||
input_ids = tokenizer_output['input_ids']
|
||||
input_ids = input_ids[:, 1:-1]
|
||||
|
||||
keyword_embedding = bert_model.embeddings.word_embeddings(input_ids)
|
||||
keyword_embedding = torch.mean(keyword_embedding, 1)
|
||||
return keyword_embedding
|
||||
|
||||
|
||||
def add_keyword_to_model(model_name=EMBEDDING_MODEL, keyword_file: str = "", output_model_path: str = None):
|
||||
key_words = []
|
||||
with open(keyword_file, "r") as f:
|
||||
for line in f:
|
||||
key_words.append(line.strip())
|
||||
|
||||
st_model = SentenceTransformer(model_name)
|
||||
key_words_len = len(key_words)
|
||||
word_embedding_model = st_model._first_module()
|
||||
bert_model = word_embedding_model.auto_model
|
||||
tokenizer = word_embedding_model.tokenizer
|
||||
key_words_embedding = get_keyword_embedding(bert_model, tokenizer, key_words)
|
||||
|
||||
embedding_weight = bert_model.embeddings.word_embeddings.weight
|
||||
embedding_weight_len = len(embedding_weight)
|
||||
tokenizer.add_tokens(key_words)
|
||||
bert_model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=32)
|
||||
embedding_weight = bert_model.embeddings.word_embeddings.weight
|
||||
with torch.no_grad():
|
||||
embedding_weight[embedding_weight_len:embedding_weight_len + key_words_len, :] = key_words_embedding
|
||||
|
||||
if output_model_path:
|
||||
os.makedirs(output_model_path, exist_ok=True)
|
||||
word_embedding_model.save(output_model_path)
|
||||
safetensors_file = os.path.join(output_model_path, "model.safetensors")
|
||||
metadata = {'format': 'pt'}
|
||||
save_model(bert_model, safetensors_file, metadata)
|
||||
print("save model to {}".format(output_model_path))
|
||||
|
||||
|
||||
def add_keyword_to_embedding_model(path: str = EMBEDDING_KEYWORD_FILE):
|
||||
keyword_file = os.path.join(path)
|
||||
model_name = MODEL_PATH["embed_model"][EMBEDDING_MODEL]
|
||||
model_parent_directory = os.path.dirname(model_name)
|
||||
current_time = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
output_model_name = "{}_Merge_Keywords_{}".format(EMBEDDING_MODEL, current_time)
|
||||
output_model_path = os.path.join(model_parent_directory, output_model_name)
|
||||
add_keyword_to_model(model_name, keyword_file, output_model_path)
|
||||
3
embeddings/embedding_keywords.txt
Normal file
@ -0,0 +1,3 @@
|
||||
Langchain-Chatchat
|
||||
数据科学与大数据技术
|
||||
人工智能与先进计算
|
||||
BIN
img/LLM_success.png
Normal file
|
After Width: | Height: | Size: 148 KiB |
BIN
img/agent_continue.png
Normal file
|
After Width: | Height: | Size: 101 KiB |
BIN
img/agent_success.png
Normal file
|
After Width: | Height: | Size: 84 KiB |
BIN
img/chatchat-qrcode.jpg
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
img/chatchat_icon_blue_square_v2.png
Normal file
|
After Width: | Height: | Size: 7.1 KiB |
BIN
img/docker_logs.png
Normal file
|
After Width: | Height: | Size: 69 KiB |
BIN
img/fastapi_docs_026.png
Normal file
|
After Width: | Height: | Size: 75 KiB |
BIN
img/init_knowledge_base.jpg
Normal file
|
After Width: | Height: | Size: 75 KiB |
BIN
img/knowledge_base_success.jpg
Normal file
|
After Width: | Height: | Size: 114 KiB |
BIN
img/langchain+chatglm2.png
Normal file
|
After Width: | Height: | Size: 124 KiB |
BIN
img/logo-long-chatchat-trans-v2.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
img/official_account_qr.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
img/official_wechat_mp_account.png
Normal file
|
After Width: | Height: | Size: 4.1 MiB |
9
img/partners/autodl.svg
Normal file
|
After Width: | Height: | Size: 123 KiB |
9
img/partners/aws.svg
Normal file
|
After Width: | Height: | Size: 42 KiB |
55
img/partners/chatglm.svg
Normal file
|
After Width: | Height: | Size: 28 KiB |
9
img/partners/zhenfund.svg
Normal file
@ -0,0 +1,9 @@
|
||||
<svg width="654" height="213" viewBox="0 0 654 213" fill="none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<rect x="654" width="213" height="654" transform="rotate(90 654 0)" fill="url(#pattern0)"/>
|
||||
<defs>
|
||||
<pattern id="pattern0" patternContentUnits="objectBoundingBox" width="1" height="1">
|
||||
<use xlink:href="#image0_237_57" transform="matrix(0.0204695 0 0 0.00666667 -0.00150228 0)"/>
|
||||
</pattern>
|
||||
<image id="image0_237_57" width="49" height="150" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADEAAACWCAYAAAB3hWBKAAAMyElEQVR4Ae1de5AcRRk/AR88C2MgIdzdfB0jIioFIoggGkoeCbnt3lAVtUALygdCUSWI0X8M3LcHiUHkEQoIuem7aKR4pCJSFFqQ6cvyCMnMXngVBORhBaG0ohaiIgqCrPX1bO/OzM7uzcztZAO1VzU1szPf1/39+uvn933d19fX+9vNSgAFuwo53J/rVYDzcoWNAtaiYNVcLw5X5guCA+YKgAoodxAFODdXEByeKhVA5KyJ/nlI2mh1CfZaAOSzLeka/GMB+uqIGDgpVwBTJX7VabP2DQqEAlZNxUPfUcCWOh9nz685pu/9SfhyoRnh1tF1YQSrljhckCSjUsFaGORDbn0zCV8uNCVuLQoLw3iSjLCvbw/k7I0GL9yehC8XmpKArzYEYdWSsBYnzQgFbG/wwmNJ+TpOh8I6qyGI7ipXJMkEl/R9ADm8XuflbHMSvlxoRoasz9cFaQyIG1CwpSXOTsUizG+6CoNDyOGuEB+H1bkImCRRnN+3Fwp4NSRQA0ziUb40NPjlJPnlRlPi7IfTAsHZxtyES5qw7mkEjGcCwmErnt4/I2leudONFAePGebwIxRsHQoot5/1snuwAMVqX9/7chesl0GvBHolkK0E/JEbHsvUQ+mJI7s0W84d4kLBrsgqvOEr8S6CuJLP3D88G822Fu8qCBSDp5jSnM69uyA4403CF+C8pklf3EQw8O6KM5jVodqdPpkR0f/pCAgvfSpd5qBZ7LBgf68D4ewPXRYpW/YoGK0fGtPuxXMPy5ZSF7lKnH0nBEKw5V0UJ1vWP1k0+GEU7M06EM52rl/St2e21LrERdNp5DBRB6HNkmyy/VQ8YqAuwLldEt/PFgV7KQQg2D6SPnPA7oLg7K0eCF39uqwJmjIkMCK3NkiTobkI87tanXqZ90qggyWAwjoh7ay1iZ73z+ugSOmTQs52TruLFdb16XPuIEcPRH1Ef7drgrNfYhGO6mDlyD8p5OzqYBvK3f2bB6QrxJyBIAgU8Os88sk9TW01r7cH9iaZfnLPtNMZlDhcFNRGiVtndzqP3NPDAhwfBIEcbss9005ngBzOD4EQsL3TeaRKjxpmqqVoyH9dt5K8nCrTThOjYH8Ol2pdsIYZp9GI499x9lCn5UqVXidAkNknVaadJp4WCA7vIGc3dN3EQ9EDWZanJW79AAtweKcLtZfebl8COhClEf6GyNkXd3uhowKWBHw/1I1ytixKQ79rJs1ADC27IY6uK+90Iwz09a3cU9TLhMAK2NIVgeMy7YGIK5VuvOtpohulHpdnTxNxpdKNd+8NTTQNdjDWZE8twvwSt04OjRMcnoqja3q3aHBu7sqJaiIkaGAQzPw+7/0TVEI9EEk01dNEwtaECw/tb2qMgVCfaX/bFQ07IdYeWa8EeiVQKwHPkWd5jv2Yp2Q1y+UqO1VcrA5jHbI+E1XANUv6976cs49H30/521P28iyCB3nSgKDdAcjhGdrysGLxoR8pcRg2/g3kek/g2JRCBwk2bx7b33PsN4ICZXlOCuL6hfM+iII9iQXrKvKfLz/TOgQ53IHcGiH3gJ67cUgHYlKNn5JF6ChPUhBITkoBt+s9S4IVDIgVC2cfhILdqCPg0oJwHVtEBfIc+1bPsTHNVdk0/qWghuOekVujyOFvtXC9paUACEM/wgdPxNQglH1kGIT9rEmwk3cSGDn7J1UhSrdm/73N98TC9rotWMAqFDCeKu9yubyXq+zX6kAcO5e42BJnS5DD76khN4GgNYuxTgrr+tQgKEHPkffUQShZdTeu/ViqkkhAjEsO2g85PIoCtmFhzj6hNiEaWz5x8dzDUlcnDULJc4IgqMtNIFdqkuWczdJBkxzWxIEg+zAKeDgTCNe95QBX2W8ZIK6yd1aruEdqKRMw4BA7DnXgpLW83sVSb0VB9gJeoB1jmUBQ3p6S6w0IuruOPZ6md5p07MQxgCVh/RiFv+EW+cAc3eUW4Sg9htBgl7ZhEwDXketcZf87CCL1s2MnjovVRmsOjw4PDX4hqjzaeJgpViRYlVILb+ZaKUBEBe/I7/cKCMdz7Punc1UmxrobK94RdfYS6ZVAcwnQIDfpyKOp349envqFnvc0c+0mb6rr1+/pKftbniNfatXNViZkwYi7rbzmcG9ibNhVtuMq2f0dYdu2rdnHVbLcSvjG+9EhA0Kvyc0YoWS1smn0k+ZbqztNAmk2G/yOHH5LE0K9VY4miNMY7G5oCNraUBDUxBP3rTs4yOM6csoTVPAMmE1L0RAIAb/Rs1oOm0pF9o1Ma+xtD645xHXk2xGB3nYd6dbGjf+Zb0EQJIir5IvmmzchrwkKF33WEz86/4azv4QCxAS8ghyuq81uz/dBMBnlb/vbVfapdUH0xE8+75btfsMUnFNNbhoLHRHjOnKz4XWVbBsDSNUEuXUmctiEgt2NBbgYOdw6zFkFOXvePxsHVmbShKdk0QhC94qyv24A0N1z5L8a3xttwv9m32u+UQMP8sU96+qkjQRwszZgC1iJAu7V6wwdGNkpEBtHQ+HSSUFQ1YsTPPiuFQgU7EF/1ad3mXmp1xNRTXhKnhPM2HPs101pR9uE58hJ88117LuCfHHPbUAsR84kcrYZs6yxPWWfZAShu+vYz2wv37ifEaKVJlw1dnKIT9lTHm7VGoR1gh9nW6tOaRdFNQtgvQfygcintWmTbE/K/m9dWLJHKblS91pKvlN/T+DV2MkGeNy91uv8DMmyQcYCDj/3Sx5eoLN0/P2vGdsEZegp+86gQBmeH48TPPguBMKYZ/SdPYcF9hXk8B9/fU222JRdLGXkKnuWp+xnMwhfpTaTZLSmfHR14mx9GJw/YlNb8Pf7ZVxjayDa4iE3pAHiKrmjokaPDQrV7lkfbCUGjwjSoICV2kCweNbByNktmU37wUQrE6MnuI5c6jpylPr+6GqP5lgVx76WeiqyHgZ5e8+9EnivlQD1MtHVXOLfajz/KJt2BV4pr53tKntjmp6pmdb+abs8cv/mOfbWZqFaL47iabsIgqpQvFC7BoQ+wm+6Gw1p/bCrQGARDtQW8YUzDjDVCzl8DTmbJDcwXSjgMhrZzfdE94oaOzsKgpyRiZhTEmERgLyjwwKepg2HtZDuJ5Gzh5DDtTWX1x30nCrpaHUiB0uqBFIQ+yDgduSwAgXs8H3X7GrtFi4CUFKXFwc+Smc+p0i2r69mb/qr0QZZyLdsWb93qkQSEl9esBg5VIgcOawmrdQOyaINuNlBUIKekmMGRO1eTChXYrIRAZ/THiEOt2FhzkwdFsFhgg7ZpcMTzflQmTRBUjQ55B05mVi6hIR+xABc45/WAjv02qEIByJnzyGHX+lzaykSjo5+TeuMJxl2lNd+yHXkK0Ft0Gw2oXypyHDhjAP0sZXcuoQYa435d/6GXCjTnY62bErUc6QKCpjPc7LBjsyVly6YfSyFP5SEdSEumjWXutkmoaMvPEfelI/gwQGxPQhtPKPqQtE0ZLqkSBs6Pj8mmDI23slV8qJug9ANm8P9NEaEo6Dh1ZBZk2jiqlNFyQXdBmFqhx6hBazS1g2KuqGD2Ws9lqGJvZMdKfGUOsa5kog34VTcN+EPfNYISoez0wZ16mp7Z8+aUundd0UJuI59fDDgpOLYF0wnkmZbec3MYHrk58gdh+vYF0d7KPIGPVKWmU4D2urYnwin136c6AjAOBAkhO8JGrskrVZ2KxCmNEkrW8tr9XQ4SantliA0GDIOK/t71Wp1yrPCOw0CFw3OHSlA+7DUmOr0qNFC891+cCqtaGd8wI/tqanbBA1qtDCK07Q+fJezZTRvarlXPDp/Ik8oRSe7jvxjMwhZJQ+Rq+wLW2kliyboQAZcNPApWi8gHdAQ2aukTfv6ZHl2ZBxQ8jmHJoHGnas9Rc0ru3rkPrl5Kw+MD0QTnSaIZ7RVQx99DDf7R73qY5BfjjrrQ/lGq1PUotFOK7Xg3+8GE8wGAm5Czo6subkuQw4LdOg1nVdLz4K9aIJ/g3nVn6cCQYSkFfJLxFYv3R3LstFKJhCC3ehPAMlLCpdhgZ1mQJQEnN5qt34qEIaYZq0to20c+Q9vQn67uWHLtv/2KvQPbDg80hEQUZ+0AWDuNa/q6lZa8ZSM9m5tQVC6tLXAaEI3Yq0JHa28gbRCFkCTf+w9SXWKY2yrlZRdLPKp2gT8qa0JMysIAvbEfev2dZVN4UQh33VYS1OPE4Eu9nHqZmv7Ye9Gzh7wn+FhFHBnXGHqd9MBYRKd3CRPJI9pWHhjLEgAol6dAv8aKDhWcLZMN/QhdpzJM3TvBAhKkEycrrJXNWslAQgKD2qxvdPvbtkyvculZtIMAcjjh6vGjwitvROusVvJQiYdY8psRfOuff9/T15+hUcbNtcAAAAASUVORK5CYII="/>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 4.9 KiB |
BIN
img/qr_code_100.jpg
Normal file
|
After Width: | Height: | Size: 281 KiB |
BIN
img/qr_code_101.jpg
Normal file
|
After Width: | Height: | Size: 244 KiB |
BIN
img/qr_code_102.jpg
Normal file
|
After Width: | Height: | Size: 188 KiB |
BIN
img/qr_code_103.jpg
Normal file
|
After Width: | Height: | Size: 196 KiB |
BIN
img/qr_code_104.jpg
Normal file
|
After Width: | Height: | Size: 185 KiB |
BIN
img/qr_code_105.jpg
Normal file
|
After Width: | Height: | Size: 252 KiB |
BIN
img/qr_code_106.jpg
Normal file
|
After Width: | Height: | Size: 158 KiB |
BIN
img/qr_code_106_2.jpg
Normal file
|
After Width: | Height: | Size: 208 KiB |
BIN
img/qr_code_107.jpg
Normal file
|
After Width: | Height: | Size: 181 KiB |
BIN
img/qr_code_108.jpg
Normal file
|
After Width: | Height: | Size: 153 KiB |
|
Before Width: | Height: | Size: 274 KiB |
BIN
img/qr_code_90.jpg
Normal file
|
After Width: | Height: | Size: 273 KiB |
BIN
img/qr_code_90.png
Normal file
|
After Width: | Height: | Size: 439 KiB |
BIN
img/qr_code_91.jpg
Normal file
|
After Width: | Height: | Size: 227 KiB |
BIN
img/qr_code_92.jpg
Normal file
|
After Width: | Height: | Size: 213 KiB |
BIN
img/qr_code_93.jpg
Normal file
|
After Width: | Height: | Size: 226 KiB |
BIN
img/qr_code_94.jpg
Normal file
|
After Width: | Height: | Size: 244 KiB |
BIN
img/qr_code_95.jpg
Normal file
|
After Width: | Height: | Size: 252 KiB |
BIN
img/qr_code_96.jpg
Normal file
|
After Width: | Height: | Size: 222 KiB |
BIN
img/qr_code_97.jpg
Normal file
|
After Width: | Height: | Size: 200 KiB |
BIN
img/qr_code_98.jpg
Normal file
|
After Width: | Height: | Size: 188 KiB |
BIN
img/qr_code_99.jpg
Normal file
|
After Width: | Height: | Size: 180 KiB |
BIN
img/qrcode_90_2.jpg
Normal file
|
After Width: | Height: | Size: 232 KiB |
|
Before Width: | Height: | Size: 900 KiB |
120
init_database.py
Normal file
@ -0,0 +1,120 @@
|
||||
import sys
|
||||
sys.path.append(".")
|
||||
from server.knowledge_base.migrate import (create_tables, reset_tables, import_from_db,
|
||||
folder2db, prune_db_docs, prune_folder_files)
|
||||
from configs.model_config import NLTK_DATA_PATH, EMBEDDING_MODEL
|
||||
import nltk
|
||||
nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="please specify only one operate method once time.")
|
||||
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--recreate-vs",
|
||||
action="store_true",
|
||||
help=('''
|
||||
recreate vector store.
|
||||
use this option if you have copied document files to the content folder, but vector store has not been populated or DEFAUL_VS_TYPE/EMBEDDING_MODEL changed.
|
||||
'''
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"--create-tables",
|
||||
action="store_true",
|
||||
help=("create empty tables if not existed")
|
||||
)
|
||||
parser.add_argument(
|
||||
"--clear-tables",
|
||||
action="store_true",
|
||||
help=("create empty tables, or drop the database tables before recreate vector stores")
|
||||
)
|
||||
parser.add_argument(
|
||||
"--import-db",
|
||||
help="import tables from specified sqlite database"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--update-in-db",
|
||||
action="store_true",
|
||||
help=('''
|
||||
update vector store for files exist in database.
|
||||
use this option if you want to recreate vectors for files exist in db and skip files exist in local folder only.
|
||||
'''
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i",
|
||||
"--increment",
|
||||
action="store_true",
|
||||
help=('''
|
||||
update vector store for files exist in local folder and not exist in database.
|
||||
use this option if you want to create vectors incrementally.
|
||||
'''
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prune-db",
|
||||
action="store_true",
|
||||
help=('''
|
||||
delete docs in database that not existed in local folder.
|
||||
it is used to delete database docs after user deleted some doc files in file browser
|
||||
'''
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prune-folder",
|
||||
action="store_true",
|
||||
help=('''
|
||||
delete doc files in local folder that not existed in database.
|
||||
is is used to free local disk space by delete unused doc files.
|
||||
'''
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--kb-name",
|
||||
type=str,
|
||||
nargs="+",
|
||||
default=[],
|
||||
help=("specify knowledge base names to operate on. default is all folders exist in KB_ROOT_PATH.")
|
||||
)
|
||||
parser.add_argument(
|
||||
"-e",
|
||||
"--embed-model",
|
||||
type=str,
|
||||
default=EMBEDDING_MODEL,
|
||||
help=("specify embeddings model.")
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
start_time = datetime.now()
|
||||
|
||||
if args.create_tables:
|
||||
create_tables() # confirm tables exist
|
||||
|
||||
if args.clear_tables:
|
||||
reset_tables()
|
||||
print("database tables reset")
|
||||
|
||||
if args.recreate_vs:
|
||||
create_tables()
|
||||
print("recreating all vector stores")
|
||||
folder2db(kb_names=args.kb_name, mode="recreate_vs", embed_model=args.embed_model)
|
||||
elif args.import_db:
|
||||
import_from_db(args.import_db)
|
||||
elif args.update_in_db:
|
||||
folder2db(kb_names=args.kb_name, mode="update_in_db", embed_model=args.embed_model)
|
||||
elif args.increment:
|
||||
folder2db(kb_names=args.kb_name, mode="increment", embed_model=args.embed_model)
|
||||
elif args.prune_db:
|
||||
prune_db_docs(args.kb_name)
|
||||
elif args.prune_folder:
|
||||
prune_folder_files(args.kb_name)
|
||||
|
||||
end_time = datetime.now()
|
||||
print(f"总计用时: {end_time-start_time}")
|
||||
|
After Width: | Height: | Size: 94 KiB |
|
After Width: | Height: | Size: 178 KiB |
|
After Width: | Height: | Size: 227 KiB |
|
After Width: | Height: | Size: 28 KiB |
|
After Width: | Height: | Size: 1.0 MiB |
|
After Width: | Height: | Size: 154 KiB |
|
After Width: | Height: | Size: 54 KiB |
|
After Width: | Height: | Size: 228 KiB |
|
After Width: | Height: | Size: 27 KiB |
|
After Width: | Height: | Size: 35 KiB |
|
After Width: | Height: | Size: 331 KiB |
|
After Width: | Height: | Size: 47 KiB |
|
After Width: | Height: | Size: 48 KiB |