mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
599 Commits
v0.2.1
...
summary_me
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb6214e647 | ||
|
|
dd96d98fa1 | ||
|
|
064ac5c742 | ||
|
|
8b82421b9c | ||
|
|
75cc71f8d3 | ||
|
|
f287282e8c | ||
|
|
2a93aff512 | ||
|
|
06ae4684c8 | ||
|
|
6d1653b84f | ||
|
|
a7816b8c79 | ||
|
|
21913c4733 | ||
|
|
9d9c66d50f | ||
|
|
a00a7a2bd0 | ||
|
|
d6cb10432b | ||
|
|
0bea5e38a4 | ||
|
|
88b2d5fb2d | ||
|
|
6997bb0bdd | ||
|
|
cdd91f7ea3 | ||
|
|
4f72ee7815 | ||
|
|
095883ca54 | ||
|
|
f77c3604ce | ||
|
|
2d058feaf8 | ||
|
|
9c6494aca7 | ||
|
|
f1032926cc | ||
|
|
e7ad51ce42 | ||
|
|
a3522223d9 | ||
|
|
4e3035efe4 | ||
|
|
a8cbf51489 | ||
|
|
317361da8c | ||
|
|
991bc77e0b | ||
|
|
83357f6c2f | ||
|
|
acf48d2d4d | ||
|
|
b8478a96ae | ||
|
|
c7d75643d3 | ||
|
|
cfc7817869 | ||
|
|
92009ceb32 | ||
|
|
aa3e37ac14 | ||
|
|
3b74d2150e | ||
|
|
ee4043ae19 | ||
|
|
c1f1da27e7 | ||
|
|
aebe891489 | ||
|
|
cf5fdabdfc | ||
|
|
20ef130341 | ||
|
|
1772a01d04 | ||
|
|
5ce6da95fc | ||
|
|
94dc6f19aa | ||
|
|
427b8648ee | ||
|
|
4b54e3c6d8 | ||
|
|
6b4ad1f933 | ||
|
|
3d89ed1787 | ||
|
|
adbb47fb65 | ||
|
|
7cd76b8d8e | ||
|
|
9e17a304de | ||
|
|
7a161cc0bd | ||
|
|
d8c16de123 | ||
|
|
65b6c2706e | ||
|
|
76bd192f82 | ||
|
|
02f546d2bc | ||
|
|
3b56716a68 | ||
|
|
a3195d84d3 | ||
|
|
bfaf36099e | ||
|
|
7a006afb17 | ||
|
|
cd8fdb31ef | ||
|
|
a0cfdb0830 | ||
|
|
83f11465f5 | ||
|
|
76df14b831 | ||
|
|
109fa04c7c | ||
|
|
a6355a6bc8 | ||
|
|
0ff471a49a | ||
|
|
4241fbbbf0 | ||
|
|
3ae6c1b03f | ||
|
|
1e71f952f9 | ||
|
|
749b1bbfc0 | ||
|
|
265a23212e | ||
|
|
f0f34030a0 | ||
|
|
d75379358f | ||
|
|
8670b3039e | ||
|
|
eec86a7b82 | ||
|
|
fac8f7da21 | ||
|
|
6fbac455d4 | ||
|
|
1806fc683d | ||
|
|
f962939737 | ||
|
|
2619740daa | ||
|
|
940b115f0a | ||
|
|
58d84787f3 | ||
|
|
6fc6ea69d2 | ||
|
|
93bbd13a34 | ||
|
|
ae31dd4bb1 | ||
|
|
411a13a0d4 | ||
|
|
eb0e96715e | ||
|
|
7e5afd8744 | ||
|
|
960eb4f367 | ||
|
|
956d9fdcd6 | ||
|
|
140fd6f3bf | ||
|
|
3d47b47901 | ||
|
|
c7f4734826 | ||
|
|
45f9b570a2 | ||
|
|
29284a5460 | ||
|
|
dfcbf6eee6 | ||
|
|
83b91a31bc | ||
|
|
b984f985bc | ||
|
|
a5cc67badd | ||
|
|
8bf4eb7e90 | ||
|
|
128d83a0c8 | ||
|
|
5de1025520 | ||
|
|
4a206168a7 | ||
|
|
5f646498c4 | ||
|
|
06e81b7dfd | ||
|
|
97d2f417c7 | ||
|
|
45f2513a73 | ||
|
|
1f58ca47b5 | ||
|
|
17819e2a55 | ||
|
|
ffdc652605 | ||
|
|
3886afc825 | ||
|
|
cade788a7e | ||
|
|
9c60eecce6 | ||
|
|
f8dfedf1c6 | ||
|
|
40a75c804c | ||
|
|
794a164098 | ||
|
|
89125376ba | ||
|
|
efc17f21b9 | ||
|
|
7ddc44d48e | ||
|
|
e8473d4920 | ||
|
|
91aa40e0df | ||
|
|
882a9086a8 | ||
|
|
43fa67ca81 | ||
|
|
715916a5ba | ||
|
|
a28b8906a6 | ||
|
|
aedd288dbe | ||
|
|
680c7b5aaa | ||
|
|
374f543bea | ||
|
|
ec71075bfe | ||
|
|
d6ef9d1b5d | ||
|
|
bf895eb656 | ||
|
|
dcd6aa912b | ||
|
|
da48f9c972 | ||
|
|
cac1ea27e2 | ||
|
|
6e588bb2ed | ||
|
|
1c352f5ff0 | ||
|
|
582c85b140 | ||
|
|
a38646409f | ||
|
|
4906e3d7ef | ||
|
|
9ed2a7a2d2 | ||
|
|
eaa6ed85e1 | ||
|
|
0b08b4f1c5 | ||
|
|
bb786461c7 | ||
|
|
bc354a3df6 | ||
|
|
f462674e32 | ||
|
|
2b5852f7da | ||
|
|
986bdaab36 | ||
|
|
d3e4ec14a6 | ||
|
|
b7cd56f72b | ||
|
|
78a6b44b21 | ||
|
|
eb5a8a87d8 | ||
|
|
0410331ecd | ||
|
|
996a3b331a | ||
|
|
8173e4d139 | ||
|
|
5a95ead608 | ||
|
|
f04755be30 | ||
|
|
ea26988a95 | ||
|
|
f9f540738c | ||
|
|
894027f5f6 | ||
|
|
8e8a5a1522 | ||
|
|
1ffa9b2ebe | ||
|
|
ad5d8b2341 | ||
|
|
e9f3f9bd1d | ||
|
|
e39cd1bf57 | ||
|
|
0efbe23d89 | ||
|
|
b4bd11d708 | ||
|
|
fe0baf233d | ||
|
|
e9e1f04818 | ||
|
|
602b6e9901 | ||
|
|
1b043305c1 | ||
|
|
ba87cb0867 | ||
|
|
798d2d6978 | ||
|
|
fc4b5ad1d2 | ||
|
|
e09bbc43d4 | ||
|
|
ca31c4699a | ||
|
|
6e5df9e9e7 | ||
|
|
780a77bb31 | ||
|
|
f342b84479 | ||
|
|
019ac37d49 | ||
|
|
3ab67e746d | ||
|
|
e8aaba9ce2 | ||
|
|
f3ac658dd0 | ||
|
|
7c4921758c | ||
|
|
3bf5934b20 | ||
|
|
a8fe3085fd | ||
|
|
14a1588ffd | ||
|
|
504a85bbdb | ||
|
|
9dcdb6d6f8 | ||
|
|
1520816e61 | ||
|
|
a2e16695af | ||
|
|
e2b599051e | ||
|
|
e20d388ec9 | ||
|
|
44d3302b4e | ||
|
|
72a56acfb8 | ||
|
|
b975aaa848 | ||
|
|
77de428524 | ||
|
|
6c5d21cbfc | ||
|
|
04093e9517 | ||
|
|
8364426420 | ||
|
|
3dd07d3119 | ||
|
|
68803d559c | ||
|
|
a63fc643c8 | ||
|
|
7a9c6a52fa | ||
|
|
81de438569 | ||
|
|
185429287e | ||
|
|
c2f86f6934 | ||
|
|
7f99fa3da8 | ||
|
|
c58cf15565 | ||
|
|
4eaec80438 | ||
|
|
7b22809530 | ||
|
|
d573bee791 | ||
|
|
e7c2a4068e | ||
|
|
45a9ff6e74 | ||
|
|
781f2934e6 | ||
|
|
1b5743dc73 | ||
|
|
26ee15d327 | ||
|
|
78bddf3055 | ||
|
|
de1ea5f916 | ||
|
|
d5162d332f | ||
|
|
63c2182870 | ||
|
|
b49ef913a8 | ||
|
|
ec27d5729c | ||
|
|
a2e75aabdd | ||
|
|
6b7787ce99 | ||
|
|
b05d56462b | ||
|
|
558003704e | ||
|
|
00ecb983e7 | ||
|
|
f26541188b | ||
|
|
1e3bcc3f8b | ||
|
|
8faf4f5f79 | ||
|
|
48f4119fb7 | ||
|
|
ad6f18b737 | ||
|
|
68e479bdbd | ||
|
|
1dd8e570a5 | ||
|
|
511b0212c6 | ||
|
|
121e08c18e | ||
|
|
785c90ddb7 | ||
|
|
d9d5fd5b9a | ||
|
|
c145d95312 | ||
|
|
37c5ebfe73 | ||
|
|
0efa0d1185 | ||
|
|
14d3ecaae7 | ||
|
|
25db6e56b0 | ||
|
|
e006a61c52 | ||
|
|
5ecb08c8e8 | ||
|
|
0bf4987e1a | ||
|
|
3871fc70ce | ||
|
|
9b78e71d16 | ||
|
|
4c686f8fc0 | ||
|
|
9aacb68fbc | ||
|
|
3de732508c | ||
|
|
cf7544c146 | ||
|
|
2a20ea638e | ||
|
|
6699a8ef38 | ||
|
|
f99c37aede | ||
|
|
bb7ca692e3 | ||
|
|
c09ed61aba | ||
|
|
9f6d6f32a6 | ||
|
|
000389c762 | ||
|
|
c963a209ab | ||
|
|
744c94c96a | ||
|
|
c561fe8925 | ||
|
|
99eac6c1d9 | ||
|
|
c4008971f7 | ||
|
|
5155056198 | ||
|
|
9cb4739e4a | ||
|
|
fe855fef13 | ||
|
|
b9623ed424 | ||
|
|
7c45b21aa7 | ||
|
|
c9bf95edf4 | ||
|
|
0fa9cf6eb0 | ||
|
|
bcda3c1a32 | ||
|
|
2f053fe9db | ||
|
|
376db5a123 | ||
|
|
3c23e7145d | ||
|
|
981b6073e7 | ||
|
|
a82d49247a | ||
|
|
19f893e1e2 | ||
|
|
c731675443 | ||
|
|
d8fd834142 | ||
|
|
d876de0bef | ||
|
|
16f0e22ffa | ||
|
|
d7679d755f | ||
|
|
23c650ca10 | ||
|
|
d5523600c7 | ||
|
|
f5a2acd82a | ||
|
|
fa91bc154c | ||
|
|
a5a9b5dbd8 | ||
|
|
cdbcd8596e | ||
|
|
9b7719071f | ||
|
|
a71ae26b52 | ||
|
|
66b5c760f4 | ||
|
|
37ff26ec2c | ||
|
|
a2723f16f2 | ||
|
|
a3aaf621fe | ||
|
|
903e21b2dd | ||
|
|
0400d72824 | ||
|
|
e08b4d601f | ||
|
|
20bd2de54a | ||
|
|
52233dff50 | ||
|
|
1f3cd214e6 | ||
|
|
0d7ab414d9 | ||
|
|
7eed489ea1 | ||
|
|
45f490e0ad | ||
|
|
bb2066df04 | ||
|
|
ec945d1022 | ||
|
|
9240a554f1 | ||
|
|
6cecb9766a | ||
|
|
d9cb000f65 | ||
|
|
d163c564e5 | ||
|
|
d4cef97e2f | ||
|
|
ce8dfcc604 | ||
|
|
a56459fee3 | ||
|
|
fa8562bc0c | ||
|
|
c5b81b5e10 | ||
|
|
fdd79223b0 | ||
|
|
a053bb074a | ||
|
|
598eea9851 | ||
|
|
4ba46307f7 | ||
|
|
8581ee2c0c | ||
|
|
ecf2ba12db | ||
|
|
6e94409594 | ||
|
|
239d64a602 | ||
|
|
f582d9ca49 | ||
|
|
fdaa55a452 | ||
|
|
aeb1178a47 | ||
|
|
5b86682e24 | ||
|
|
7086961e00 | ||
|
|
8532307b2f | ||
|
|
4c7b582454 | ||
|
|
3f2d14f4d8 | ||
|
|
2db4a5da57 | ||
|
|
221a4b0b50 | ||
|
|
86d3444fb8 | ||
|
|
4701357a21 | ||
|
|
9514919d37 | ||
|
|
5813592206 | ||
|
|
7d45de8901 | ||
|
|
ac023e95c0 | ||
|
|
085842d43c | ||
|
|
24d5e1fc8a | ||
|
|
da4c765378 | ||
|
|
74aa4add1b | ||
|
|
5576994c2c | ||
|
|
e2accab87e | ||
|
|
b188c2b3e3 | ||
|
|
ebee041c35 | ||
|
|
8020eaa2e9 | ||
|
|
59a9986786 | ||
|
|
ef0216dbe7 | ||
|
|
ae7b81dc50 | ||
|
|
78734dade8 | ||
|
|
89539d0cf1 | ||
|
|
1887f51516 | ||
|
|
3ebe125d3f | ||
|
|
bed0860c71 | ||
|
|
88ebebf74f | ||
|
|
49e4b75039 | ||
|
|
c62c8c6e71 | ||
|
|
894026cdd4 | ||
|
|
913c933e8c | ||
|
|
90e6a55e37 | ||
|
|
4a07790910 | ||
|
|
5752a466a2 | ||
|
|
b5f1ba0df1 | ||
|
|
5c55c35821 | ||
|
|
efb4429d33 | ||
|
|
285188bdde | ||
|
|
4ca8b376b6 | ||
|
|
61f5925502 | ||
|
|
8d9505cda5 | ||
|
|
4cc90b8eb4 | ||
|
|
09e29f1e1b | ||
|
|
17caf226d0 | ||
|
|
7a6eb19b1c | ||
|
|
c846c1d331 | ||
|
|
d6b1aa677d | ||
|
|
fd4a2ed414 | ||
|
|
9d14b113a3 | ||
|
|
0c8467c404 | ||
|
|
a2c0db44d6 | ||
|
|
7286ef3a52 | ||
|
|
6f87fb63c1 | ||
|
|
fbdf9d4bd4 | ||
|
|
b5378174f3 | ||
|
|
c1fe34adcb | ||
|
|
f7014e8773 | ||
|
|
fc6070d574 | ||
|
|
4c2a566acc | ||
|
|
7ac296081c | ||
|
|
525073bb94 | ||
|
|
0664b737ab | ||
|
|
e34ede79b9 | ||
|
|
a0160eef0c | ||
|
|
b84de4f7f8 | ||
|
|
275b2eaae1 | ||
|
|
a88113de33 | ||
|
|
9fd80a8660 | ||
|
|
193c80849f | ||
|
|
9ed5e0f1fc | ||
|
|
6787c2eeed | ||
|
|
31900f6733 | ||
|
|
67846bad21 | ||
|
|
7f4e38844f | ||
|
|
da65bc3f68 | ||
|
|
cf9a94a8b6 | ||
|
|
9577468f0c | ||
|
|
3134beb983 | ||
|
|
254cd69748 | ||
|
|
2f4ef3ba6a | ||
|
|
9705f60dd3 | ||
|
|
9cb1555ade | ||
|
|
ea67b6772c | ||
|
|
f784049079 | ||
|
|
d23ada30d7 | ||
|
|
dea5000a01 | ||
|
|
239aa3aa02 | ||
|
|
75baa11e81 | ||
|
|
e40d6f3314 | ||
|
|
e849e4ff0b | ||
|
|
6222b2d542 | ||
|
|
9c062b44aa | ||
|
|
cd587bc406 | ||
|
|
935481c4b5 | ||
|
|
d063436b0a | ||
|
|
3f0b84eb7b | ||
|
|
64e05778ef | ||
|
|
6cbf00df60 | ||
|
|
3b37c89d88 | ||
|
|
d4b74661aa | ||
|
|
ee224c395e | ||
|
|
9dea8b1f66 | ||
|
|
efc7b4deb6 | ||
|
|
e6ef12d313 | ||
|
|
a5506abdad | ||
|
|
cf2c3fde41 | ||
|
|
ed89d9f801 | ||
|
|
2bb0ecf497 | ||
|
|
08ad320d19 | ||
|
|
ef7b417105 | ||
|
|
f2baa0872b | ||
|
|
8637b8b61b | ||
|
|
6ac9ce614a | ||
|
|
bd670b4db3 | ||
|
|
def96ffe2f | ||
|
|
6b64158356 | ||
|
|
23e7031326 | ||
|
|
a2a6f84f13 | ||
|
|
316f37bfce | ||
|
|
e7c3ff9b9e | ||
|
|
baf31e69e5 | ||
|
|
7fd55fa2f4 | ||
|
|
35106ef662 | ||
|
|
d4860fe9f0 | ||
|
|
d47466ddf9 | ||
|
|
57ee84437b | ||
|
|
286edbbb8c | ||
|
|
00ba50bcb4 | ||
|
|
1d49b87e48 | ||
|
|
6700ac94fa | ||
|
|
10b2458f58 | ||
|
|
2c55ff0b3d | ||
|
|
9887016bdf | ||
|
|
a0b0a4cec5 | ||
|
|
8dadf79614 | ||
|
|
10cd0f3362 | ||
|
|
d82ca101de | ||
|
|
0d2e196368 | ||
|
|
125f0ba61a | ||
|
|
74a8b5d832 | ||
|
|
bd25822b35 | ||
|
|
2b87245e22 | ||
|
|
60b779a905 | ||
|
|
f41febd3ae | ||
|
|
1001e5489e | ||
|
|
be712fc606 | ||
|
|
cf25831ad5 | ||
|
|
6dbe84a1bf | ||
|
|
eefbccd957 | ||
|
|
fe85f079b0 | ||
|
|
2cb559ebdd | ||
|
|
64383776a2 | ||
|
|
8386188356 | ||
|
|
664f896696 | ||
|
|
e86764df45 | ||
|
|
71c6600abf | ||
|
|
fbd4e06df5 | ||
|
|
3715ebc7eb | ||
|
|
d394b032d7 | ||
|
|
23d3dafc51 | ||
|
|
708374d95b | ||
|
|
0fa8073947 | ||
|
|
81c65af560 | ||
|
|
c0aa423d7b | ||
|
|
03c137741a | ||
|
|
c110f3489d | ||
|
|
9589334a30 | ||
|
|
0409079983 | ||
|
|
1d4dc0c534 | ||
|
|
6fb1369939 | ||
|
|
9ffa587f6f | ||
|
|
42f81c62dc | ||
|
|
da72e69196 | ||
|
|
24648fb537 | ||
|
|
56ecbeeef7 | ||
|
|
7a32e03bd5 | ||
|
|
15059c2090 | ||
|
|
c71c61dc58 | ||
|
|
1513be4acd | ||
|
|
4eb8e7823d | ||
|
|
30e7693b24 | ||
|
|
4f33e1bf89 | ||
|
|
89e0e89927 | ||
|
|
3b80253fb3 | ||
|
|
b5e0127b16 | ||
|
|
b50259c25d | ||
|
|
21ccaf2ce8 | ||
|
|
b0accbfe58 | ||
|
|
8f0d553e4e | ||
|
|
b8baa549cc | ||
|
|
5ff7fc340b | ||
|
|
955a5b0a43 | ||
|
|
147d3733bf | ||
|
|
4269326ddf | ||
|
|
627533bed6 | ||
|
|
167628c696 | ||
|
|
df5cc3303f | ||
|
|
7b7d7c1d74 | ||
|
|
ec8ff0fcde | ||
|
|
7d9269e1a1 | ||
|
|
33aae9ab17 | ||
|
|
399690e1ef | ||
|
|
c8a349a573 | ||
|
|
5ef103b68a | ||
|
|
176f74bd3a | ||
|
|
34f9bc40b3 | ||
|
|
2d24876530 | ||
|
|
3fadf2c90b | ||
|
|
c544cebbe6 | ||
|
|
f746107697 | ||
|
|
9b6bce4592 | ||
|
|
5b3afeccc1 | ||
|
|
dc80a5a2ec | ||
|
|
fdb0a06803 | ||
|
|
3944f29add | ||
|
|
45a2dea042 | ||
|
|
bb541ad3a7 | ||
|
|
dca10ab876 | ||
|
|
75162339f5 | ||
|
|
b2b31dbc8f | ||
|
|
63d2a1085c | ||
|
|
af50d6cfb5 | ||
|
|
cfbec56b2b | ||
|
|
fec25cd690 | ||
|
|
5c67484295 | ||
|
|
70100af98e | ||
|
|
bf24cd9508 | ||
|
|
d934d226ce | ||
|
|
005479f8c3 | ||
|
|
9c8d95d4db | ||
|
|
0b936a2bb8 | ||
|
|
4173e184bd | ||
|
|
9389509017 | ||
|
|
a7c52579f8 | ||
|
|
05bafb9838 | ||
|
|
abb54df4d0 | ||
|
|
83403ad3ab | ||
|
|
17478d6a05 | ||
|
|
397627d1b9 | ||
|
|
00225e01b3 | ||
|
|
fc7db7d86f | ||
|
|
ee42b4d06c | ||
|
|
09a5b3149d | ||
|
|
68e26bf9d6 | ||
|
|
e36b74893f | ||
|
|
2761a5c361 | ||
|
|
b7a29e71cd | ||
|
|
1af463b03c | ||
|
|
66ee7e1a81 | ||
|
|
16553be539 | ||
|
|
08eb2566e4 | ||
|
|
8cbe438ad5 | ||
|
|
6e9cc463b3 | ||
|
|
d626a0637d | ||
|
|
92c0106e81 | ||
|
|
8bcab8796e | ||
|
|
f5c600a9f8 | ||
|
|
6e05db972a | ||
|
|
773324dcd6 | ||
|
|
0b955c0546 | ||
|
|
65b626c5e1 | ||
|
|
bcc1b5f8bf | ||
|
|
3095591064 | ||
|
|
b4a0ef9bab | ||
|
|
e2a6ed6955 | ||
|
|
a24ab0e879 |
2
.coveragerc
Normal file
2
.coveragerc
Normal file
@@ -0,0 +1,2 @@
|
||||
[run]
|
||||
relative_files = true
|
||||
@@ -1,23 +1,13 @@
|
||||
# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster
|
||||
ARG VARIANT=3-bullseye
|
||||
FROM python:3.8
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10
|
||||
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
|
||||
&& apt-get purge -y imagemagick imagemagick-6-common
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr \
|
||||
ca-certificates
|
||||
|
||||
# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897
|
||||
# They are installed by the base image (python) which does not have the patch.
|
||||
RUN python3 -m pip install --upgrade setuptools
|
||||
# Install utilities
|
||||
RUN apt-get install -y curl jq wget git
|
||||
|
||||
# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
|
||||
# COPY requirements.txt /tmp/pip-tmp/
|
||||
# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \
|
||||
# && rm -rf /tmp/pip-tmp
|
||||
|
||||
# [Optional] Uncomment this section to install additional OS packages.
|
||||
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
# && apt-get -y install --no-install-recommends <your-package-list-here>
|
||||
|
||||
# [Optional] Uncomment this line to install global node packages.
|
||||
# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g <your-package-here>" 2>&1
|
||||
# Declare working directory
|
||||
WORKDIR /workspace/Auto-GPT
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
{
|
||||
"build": {
|
||||
"dockerfile": "./Dockerfile",
|
||||
"context": "."
|
||||
},
|
||||
"dockerComposeFile": "./docker-compose.yml",
|
||||
"service": "auto-gpt",
|
||||
"workspaceFolder": "/workspace/Auto-GPT",
|
||||
"shutdownAction": "stopCompose",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||
"installZsh": "true",
|
||||
"username": "vscode",
|
||||
"userUid": "1000",
|
||||
"userGid": "1000",
|
||||
"userUid": "6942",
|
||||
"userGid": "6942",
|
||||
"upgradePackages": "true"
|
||||
},
|
||||
"ghcr.io/devcontainers/features/desktop-lite:1": {},
|
||||
"ghcr.io/devcontainers/features/python:1": "none",
|
||||
"ghcr.io/devcontainers/features/node:1": "none",
|
||||
"ghcr.io/devcontainers/features/git:1": {
|
||||
|
||||
19
.devcontainer/docker-compose.yml
Normal file
19
.devcontainer/docker-compose.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
# To boot the app run the following:
|
||||
# docker-compose run auto-gpt
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
depends_on:
|
||||
- redis
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
tty: true
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
volumes:
|
||||
- ../:/workspace/Auto-GPT
|
||||
redis:
|
||||
image: 'redis/redis-stack-server:latest'
|
||||
8
.dockerignore
Normal file
8
.dockerignore
Normal file
@@ -0,0 +1,8 @@
|
||||
.*
|
||||
*.template
|
||||
*.yaml
|
||||
*.yml
|
||||
|
||||
*.md
|
||||
*.png
|
||||
!BULLETIN.md
|
||||
242
.env.template
242
.env.template
@@ -1,174 +1,214 @@
|
||||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
# EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False)
|
||||
EXECUTE_LOCAL_COMMANDS=False
|
||||
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
|
||||
BROWSE_CHUNK_MAX_LENGTH=8192
|
||||
# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website
|
||||
BROWSE_SUMMARY_MAX_TOKEN=300
|
||||
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
AI_SETTINGS_FILE=ai_settings.yaml
|
||||
# USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome).
|
||||
# Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# USE_WEB_BROWSER=chrome
|
||||
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
# OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
# TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
TEMPERATURE=0
|
||||
USE_AZURE=False
|
||||
# TEMPERATURE=0
|
||||
# USE_AZURE=False
|
||||
|
||||
### AZURE
|
||||
# cleanup azure env as already moved to `azure.yaml.template`
|
||||
# moved to `azure.yaml.template`
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
# SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
# FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
SMART_LLM_MODEL=gpt-4
|
||||
FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-4
|
||||
# FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
|
||||
### LLM MODEL SETTINGS
|
||||
# FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
# SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
# When using --gpt3only this needs to be set to 4000.
|
||||
FAST_TOKEN_LIMIT=4000
|
||||
SMART_TOKEN_LIMIT=8000
|
||||
## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
## When using --gpt3only this needs to be set to 4000.
|
||||
# FAST_TOKEN_LIMIT=4000
|
||||
# SMART_TOKEN_LIMIT=8000
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### MEMORY_BACKEND - Memory backend type
|
||||
# local - Default
|
||||
# pinecone - Pinecone (if configured)
|
||||
# redis - Redis (if configured)
|
||||
MEMORY_BACKEND=local
|
||||
## local - Default
|
||||
## pinecone - Pinecone (if configured)
|
||||
## redis - Redis (if configured)
|
||||
## milvus - Milvus (if configured - also works with Zilliz)
|
||||
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
|
||||
# MEMORY_BACKEND=local
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### PINECONE
|
||||
# PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
# PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
PINECONE_API_KEY=your-pinecone-api-key
|
||||
PINECONE_ENV=your-pinecone-region
|
||||
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
# PINECONE_API_KEY=your-pinecone-api-key
|
||||
# PINECONE_ENV=your-pinecone-region
|
||||
|
||||
### REDIS
|
||||
# REDIS_HOST - Redis host (Default: localhost)
|
||||
# REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PASSWORD - Redis password (Default: "")
|
||||
# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False)
|
||||
# MEMORY_INDEX - Name of index created in Redis database (Default: auto-gpt)
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=
|
||||
WIPE_REDIS_ON_START=False
|
||||
MEMORY_INDEX=auto-gpt
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# REDIS_HOST=localhost
|
||||
# REDIS_PORT=6379
|
||||
# REDIS_PASSWORD=
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
### WEAVIATE
|
||||
# MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
# WEAVIATE_HOST - Weaviate host IP
|
||||
# WEAVIATE_PORT - Weaviate host port
|
||||
# WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
# USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
# WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
# WEAVIATE_USERNAME - Weaviate username
|
||||
# WEAVIATE_PASSWORD - Weaviate password
|
||||
# WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# MEMORY_INDEX - Name of index to create in Weaviate
|
||||
WEAVIATE_HOST="127.0.0.1"
|
||||
WEAVIATE_PORT=8080
|
||||
WEAVIATE_PROTOCOL="http"
|
||||
USE_WEAVIATE_EMBEDDED=False
|
||||
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
WEAVIATE_USERNAME=
|
||||
WEAVIATE_PASSWORD=
|
||||
WEAVIATE_API_KEY=
|
||||
MEMORY_INDEX=AutoGpt
|
||||
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
## WEAVIATE_HOST - Weaviate host IP
|
||||
## WEAVIATE_PORT - Weaviate host port
|
||||
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
## WEAVIATE_USERNAME - Weaviate username
|
||||
## WEAVIATE_PASSWORD - Weaviate password
|
||||
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# WEAVIATE_HOST="127.0.0.1"
|
||||
# WEAVIATE_PORT=8080
|
||||
# WEAVIATE_PROTOCOL="http"
|
||||
# USE_WEAVIATE_EMBEDDED=False
|
||||
# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
# WEAVIATE_USERNAME=
|
||||
# WEAVIATE_PASSWORD=
|
||||
# WEAVIATE_API_KEY=
|
||||
|
||||
### MILVUS
|
||||
# MILVUS_ADDR - Milvus remote address (e.g. localhost:19530)
|
||||
# MILVUS_COLLECTION - Milvus collection,
|
||||
# change it if you want to start a new memory and retain the old memory.
|
||||
MILVUS_ADDR=your-milvus-cluster-host-port
|
||||
MILVUS_COLLECTION=autogpt
|
||||
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443)
|
||||
## MILVUS_USERNAME - username for your Milvus database
|
||||
## MILVUS_PASSWORD - password for your Milvus database
|
||||
## MILVUS_SECURE - True to enable TLS. (Default: False)
|
||||
## Setting MILVUS_ADDR to a `https://` URL will override this setting.
|
||||
## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory.
|
||||
# MILVUS_ADDR=localhost:19530
|
||||
# MILVUS_USERNAME=
|
||||
# MILVUS_PASSWORD=
|
||||
# MILVUS_SECURE=
|
||||
# MILVUS_COLLECTION=autogpt
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPEN AI
|
||||
# IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
IMAGE_PROVIDER=dalle
|
||||
## IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
## IMAGE_SIZE - Image size (Example: 256)
|
||||
## DALLE: 256, 512, 1024
|
||||
# IMAGE_PROVIDER=dalle
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### HUGGINGFACE
|
||||
# STABLE DIFFUSION
|
||||
# (Default URL: https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4)
|
||||
# Set in image_gen.py)
|
||||
# HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
# HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
|
||||
### STABLE DIFFUSION WEBUI
|
||||
## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
|
||||
## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
|
||||
# SD_WEBUI_AUTH=
|
||||
# SD_WEBUI_URL=http://127.0.0.1:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
### HUGGINGFACE
|
||||
HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
|
||||
################################################################################
|
||||
### GIT Provider for repository actions
|
||||
################################################################################
|
||||
|
||||
### GITHUB
|
||||
# GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
# GITHUB_USERNAME - Github username
|
||||
GITHUB_API_KEY=github_pat_123
|
||||
GITHUB_USERNAME=your-github-username
|
||||
## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
## GITHUB_USERNAME - Github username
|
||||
# GITHUB_API_KEY=github_pat_123
|
||||
# GITHUB_USERNAME=your-github-username
|
||||
|
||||
################################################################################
|
||||
### SEARCH PROVIDER
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
### BROWSER
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
|
||||
## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# HEADLESS_BROWSER=True
|
||||
# USE_WEB_BROWSER=chrome
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise )
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL is used to split sentences. Install additional languages via pip, and set the model name here. Example Chinese: python -m spacy download zh_core_web_sm
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
### GOOGLE
|
||||
# GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
# CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
GOOGLE_API_KEY=your-google-api-key
|
||||
CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
## GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
# GOOGLE_API_KEY=your-google-api-key
|
||||
# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
|
||||
################################################################################
|
||||
### TTS PROVIDER
|
||||
################################################################################
|
||||
|
||||
### MAC OS
|
||||
# USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
USE_MAC_OS_TTS=False
|
||||
## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
# USE_MAC_OS_TTS=False
|
||||
|
||||
### STREAMELEMENTS
|
||||
# USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
USE_BRIAN_TTS=False
|
||||
## USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
# USE_BRIAN_TTS=False
|
||||
|
||||
### ELEVENLABS
|
||||
# ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
# ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
# ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
# ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
# ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
# ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
|
||||
################################################################################
|
||||
### TWITTER API
|
||||
### TWITTER API
|
||||
################################################################################
|
||||
|
||||
TW_CONSUMER_KEY=
|
||||
TW_CONSUMER_SECRET=
|
||||
TW_ACCESS_TOKEN=
|
||||
TW_ACCESS_TOKEN_SECRET=
|
||||
# TW_CONSUMER_KEY=
|
||||
# TW_CONSUMER_SECRET=
|
||||
# TW_ACCESS_TOKEN=
|
||||
# TW_ACCESS_TOKEN_SECRET=
|
||||
|
||||
################################################################################
|
||||
### ALLOWLISTED PLUGINS
|
||||
################################################################################
|
||||
|
||||
#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
|
||||
ALLOWLISTED_PLUGINS=
|
||||
|
||||
################################################################################
|
||||
### CHAT PLUGIN SETTINGS
|
||||
################################################################################
|
||||
# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
|
||||
4
.envrc
Normal file
4
.envrc
Normal file
@@ -0,0 +1,4 @@
|
||||
# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
|
||||
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
|
||||
|
||||
[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
|
||||
10
.flake8
10
.flake8
@@ -1,12 +1,12 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
extend-ignore = E203
|
||||
select = "E303, W293, W291, W292, E305, E231, E302"
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
*.pyc,
|
||||
.env
|
||||
venv/*
|
||||
.venv/*
|
||||
reports/*
|
||||
dist/*
|
||||
venv*/*,
|
||||
.venv/*,
|
||||
reports/*,
|
||||
dist/*,
|
||||
|
||||
5
.gitattributes
vendored
Normal file
5
.gitattributes
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exclude VCR cassettes from stats
|
||||
tests/**/cassettes/**.y*ml linguist-generated
|
||||
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
83
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
83
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -2,6 +2,20 @@ name: Bug report 🐛
|
||||
description: Create a bug report for Auto-GPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Throughly search the [existing issues] before creating a new one**
|
||||
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
@@ -28,14 +42,46 @@ body:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
- type: checkboxes
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: GPT-3 or GPT-4
|
||||
label: Which Operating System are you using?
|
||||
description: >
|
||||
Please select the operating system you were using to run Auto-GPT when this problem occurred.
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
- MacOS
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other (Please specify in your problem)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of Auto-GPT are you using?
|
||||
description: |
|
||||
Please select which version of Auto-GPT you were using when this issue occurred.
|
||||
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/releases/) make sure you were using the latest code.
|
||||
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/Auto-GPT/releases/)**.
|
||||
If installed with git you can run `git branch` to see which version of Auto-GPT you are running.
|
||||
options:
|
||||
- Latest Release
|
||||
- Stable (branch)
|
||||
- Master (branch)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the limitations of GPT-3.5
|
||||
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- label: I am using Auto-GPT with GPT-3 (GPT-3.5)
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to reproduce 🕹
|
||||
@@ -52,9 +98,34 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your prompt 📝
|
||||
description: |
|
||||
If applicable please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml.
|
||||
description: >
|
||||
If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file.
|
||||
value: |
|
||||
```yaml
|
||||
# Paste your prompt here
|
||||
```
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your Logs 📒
|
||||
description: |
|
||||
Please include the log showing your error and the command that caused it, if applicable.
|
||||
You can copy it from your terminal or from `logs/activity.log`.
|
||||
This will help us understand your issue better!
|
||||
|
||||
<details>
|
||||
<summary><i>Example</i></summary>
|
||||
```log
|
||||
INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'}
|
||||
INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=
|
||||
Traceback (most recent call last):
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response
|
||||
self._interpret_response_line(
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line
|
||||
raise self.handle_error_response(
|
||||
openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.
|
||||
```
|
||||
</details>
|
||||
value: |
|
||||
```log
|
||||
<insert your logs here>
|
||||
```
|
||||
|
||||
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,3 +1,10 @@
|
||||
<!-- ⚠️ At the moment any non-essential commands are not being merged.
|
||||
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
|
||||
We are expecting to ship plugin support within the week (PR #757).
|
||||
Resources:
|
||||
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
|
||||
-->
|
||||
|
||||
<!-- 📢 Announcement
|
||||
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
|
||||
|
||||
@@ -30,4 +37,4 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guide lines. -->
|
||||
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->
|
||||
|
||||
23
.github/workflows/auto_format.yml
vendored
23
.github/workflows/auto_format.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: auto-format
|
||||
on: pull_request
|
||||
jobs:
|
||||
format:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: autopep8
|
||||
uses: peter-evans/autopep8@v1
|
||||
with:
|
||||
args: --exit-code --recursive --in-place --aggressive --aggressive .
|
||||
- name: Check for modified files
|
||||
id: git-check
|
||||
run: echo "modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)" >> $GITHUB_ENV
|
||||
- name: Push changes
|
||||
if: steps.git-check.outputs.modified == 'true'
|
||||
run: |
|
||||
git config --global user.name 'Torantulino'
|
||||
git config --global user.email 'toran.richards@gmail.com'
|
||||
git remote set
|
||||
31
.github/workflows/benchmarks.yml
vendored
Normal file
31
.github/workflows/benchmarks.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Run Benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
python-version: '3.10'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ env.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: benchmark
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py
|
||||
91
.github/workflows/ci.yml
vendored
91
.github/workflows/ci.yml
vendored
@@ -2,43 +2,76 @@ name: Python CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.8]
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v2
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Lint with flake8
|
||||
continue-on-error: false
|
||||
run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302
|
||||
- name: Lint with flake8
|
||||
run: flake8
|
||||
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
coverage run --source=autogpt -m unittest discover tests
|
||||
- name: Check black formatting
|
||||
run: black . --check
|
||||
if: success() || failure()
|
||||
|
||||
- name: Generate coverage report
|
||||
run: |
|
||||
coverage report
|
||||
coverage xml
|
||||
- name: Check isort formatting
|
||||
run: isort . --check
|
||||
if: success() || failure()
|
||||
|
||||
test:
|
||||
permissions:
|
||||
# Gives the action the necessary permissions for publishing new
|
||||
# comments in pull requests.
|
||||
pull-requests: write
|
||||
# Gives the action the necessary permissions for pushing data to the
|
||||
# python-coverage-comment-action branch, and for editing existing
|
||||
# comments (to avoid publishing multiple comments in the same PR)
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
58
.github/workflows/docker-cache-clean.yml
vendored
Normal file
58
.github/workflows/docker-cache-clean.yml
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
name: Purge Docker CI cache
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: master
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
115
.github/workflows/docker-ci.yml
vendored
Normal file
115
.github/workflows/docker-ci.yml
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
name: Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
# Docker setup needs fixing before this is going to work: #1843
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
echo "$test_output"
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
18
.github/workflows/docker-image.yml
vendored
18
.github/workflows/docker-image.yml
vendored
@@ -1,18 +0,0 @@
|
||||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file Dockerfile --tag autogpt:$(date +%s)
|
||||
81
.github/workflows/docker-release.yml
vendored
Normal file
81
.github/workflows/docker-release.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published, edited ]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
no_cache:
|
||||
type: boolean
|
||||
description: 'Build from scratch, without using cached layers'
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release
|
||||
cache-to: type=gha,scope=docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
24
.github/workflows/dockerhub-imagepush.yml
vendored
24
.github/workflows/dockerhub-imagepush.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: Push Docker Image on Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "stable" ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Log in to Docker hub
|
||||
env:
|
||||
DOCKER_USER: ${{secrets.DOCKER_USER}}
|
||||
DOCKER_PASSWORD: ${{secrets.DOCKER_PASSWORD}}
|
||||
run: |
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file Dockerfile --tag ${{secrets.DOCKER_USER}}/auto-gpt:$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
- name: Docker Push
|
||||
run: docker push ${{secrets.DOCKER_USER}}/auto-gpt
|
||||
37
.github/workflows/documentation-release.yml
vendored
Normal file
37
.github/workflows/documentation-release.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Docs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ stable ]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'mkdocs.yml'
|
||||
- '.github/workflows/documentation.yml'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python 3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Set up workflow cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
- run: pip install mkdocs-material
|
||||
|
||||
- run: mkdocs gh-deploy --force
|
||||
55
.github/workflows/pr-label.yml
vendored
Normal file
55
.github/workflows/pr-label.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: "Pull Request auto-label"
|
||||
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master ]
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
pull_request_target:
|
||||
types: [ opened, synchronize ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
conflicts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Update PRs with conflict labels
|
||||
uses: eps1lon/actions-label-merge-conflict@releases/2.x
|
||||
with:
|
||||
dirtyLabel: "conflicts"
|
||||
#removeOnDirtyLabel: "PR: ready to ship"
|
||||
repoToken: "${{ secrets.GITHUB_TOKEN }}"
|
||||
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
|
||||
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
|
||||
|
||||
size:
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: codelytv/pr-size-labeler@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
xs_label: 'size/xs'
|
||||
xs_max_size: 2
|
||||
s_label: 'size/s'
|
||||
s_max_size: 10
|
||||
m_label: 'size/m'
|
||||
m_max_size: 50
|
||||
l_label: 'size/l'
|
||||
l_max_size: 200
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 200 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
Note this PR might be rejected due to its size
|
||||
98
.github/workflows/scripts/docker-ci-summary.sh
vendored
Executable file
98
.github/workflows/scripts/docker-ci-summary.sh
vendored
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
|
||||
head_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$current_ref/" <<< $compare_url_template)
|
||||
ref_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$commit_hash/" <<< $compare_url_template)
|
||||
|
||||
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
|
||||
|
||||
cat << $EOF
|
||||
# Docker Build summary 🔨
|
||||
|
||||
**Source:** branch \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
|
||||
|
||||
**Build type:** \`$build_type\`
|
||||
|
||||
**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
|
||||
|
||||
## Image details
|
||||
|
||||
**Tags:**
|
||||
$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
|
||||
|
||||
<details>
|
||||
<summary><h3>Layers</h3></summary>
|
||||
|
||||
| Age | Size | Created by instruction |
|
||||
| --------- | ------ | ---------------------- |
|
||||
$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
|
||||
| grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
|
||||
| cut -f-3 `# yeet Comment column`\
|
||||
| sed 's/ ago//' `# fix Layer age`\
|
||||
| sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
|
||||
| sed 's/\$/\\$/g' `# escape variable and shell expansions`\
|
||||
| sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
|
||||
| column -t -s$'\t' -o' | ' `# align columns and add separator`\
|
||||
| sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><h3>ENV</h3></summary>
|
||||
|
||||
| Variable | Value |
|
||||
| -------- | -------- |
|
||||
$(jq -r \
|
||||
'.Config.Env
|
||||
| map(
|
||||
split("=")
|
||||
| "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
|
||||
)
|
||||
| map("| \(.) |")
|
||||
| .[]' <<< $meta
|
||||
)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Raw metadata</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$meta
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
## Build details
|
||||
**Build trigger:** $push_forced_label $event_name \`$event_ref\`
|
||||
|
||||
<details>
|
||||
<summary><code>github</code> context</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$github_context_json
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
### Source
|
||||
**HEAD:** [$repository@\`${commit_hash:0:7}\`]($source_url) on branch [$current_ref]($ref_compare_url)
|
||||
|
||||
**Diff with previous HEAD:** $head_compare_url
|
||||
|
||||
#### New commits
|
||||
$(jq -r 'map([
|
||||
"**Commit [`\(.id[0:7])`](\(.url)) by \(if .author.username then "@"+.author.username else .author.name end):**",
|
||||
.message,
|
||||
(if .committer.name != .author.name then "\n> <sub>**Committer:** \(.committer.name) <\(.committer.email)></sub>" else "" end),
|
||||
"<sub>**Timestamp:** \(.timestamp)</sub>"
|
||||
] | map("> \(.)\n") | join("")) | join("\n")' <<< $new_commits_json)
|
||||
|
||||
### Job environment
|
||||
|
||||
#### \`vars\` context:
|
||||
\`\`\`JSON
|
||||
$vars_json
|
||||
\`\`\`
|
||||
|
||||
#### \`env\` context:
|
||||
\`\`\`JSON
|
||||
$job_env_json
|
||||
\`\`\`
|
||||
|
||||
$EOF
|
||||
85
.github/workflows/scripts/docker-release-summary.sh
vendored
Executable file
85
.github/workflows/scripts/docker-release-summary.sh
vendored
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/bin/bash
|
||||
meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
|
||||
|
||||
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
|
||||
|
||||
cat << $EOF
|
||||
# Docker Release Build summary 🚀🔨
|
||||
|
||||
**Source:** $ref_type \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
|
||||
|
||||
**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
|
||||
|
||||
## Image details
|
||||
|
||||
**Tags:**
|
||||
$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
|
||||
|
||||
<details>
|
||||
<summary><h3>Layers</h3></summary>
|
||||
|
||||
| Age | Size | Created by instruction |
|
||||
| --------- | ------ | ---------------------- |
|
||||
$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
|
||||
| grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
|
||||
| cut -f-3 `# yeet Comment column`\
|
||||
| sed 's/ ago//' `# fix Layer age`\
|
||||
| sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
|
||||
| sed 's/\$/\\$/g' `# escape variable and shell expansions`\
|
||||
| sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
|
||||
| column -t -s$'\t' -o' | ' `# align columns and add separator`\
|
||||
| sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><h3>ENV</h3></summary>
|
||||
|
||||
| Variable | Value |
|
||||
| -------- | -------- |
|
||||
$(jq -r \
|
||||
'.Config.Env
|
||||
| map(
|
||||
split("=")
|
||||
| "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
|
||||
)
|
||||
| map("| \(.) |")
|
||||
| .[]' <<< $meta
|
||||
)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Raw metadata</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$meta
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
## Build details
|
||||
**Build trigger:** $event_name \`$current_ref\`
|
||||
|
||||
| Parameter | Value |
|
||||
| -------------- | ------------ |
|
||||
| \`no_cache\` | \`$inputs_no_cache\` |
|
||||
|
||||
<details>
|
||||
<summary><code>github</code> context</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$github_context_json
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
### Job environment
|
||||
|
||||
#### \`vars\` context:
|
||||
\`\`\`JSON
|
||||
$vars_json
|
||||
\`\`\`
|
||||
|
||||
#### \`env\` context:
|
||||
\`\`\`JSON
|
||||
$job_env_json
|
||||
\`\`\`
|
||||
|
||||
$EOF
|
||||
28
.github/workflows/sponsors_readme.yml
vendored
Normal file
28
.github/workflows/sponsors_readme.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Generate Sponsors README
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 */12 * * *'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Generate Sponsors 💖
|
||||
uses: JamesIves/github-sponsors-readme-action@v1
|
||||
with:
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
file: 'README.md'
|
||||
minimum: 2500
|
||||
maximum: 99999
|
||||
|
||||
- name: Deploy to GitHub Pages 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: master
|
||||
folder: '.'
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -3,14 +3,13 @@ autogpt/keys.py
|
||||
autogpt/*json
|
||||
autogpt/node_modules/
|
||||
autogpt/__pycache__/keys.cpython-310.pyc
|
||||
autogpt/auto_gpt_workspace
|
||||
package-lock.json
|
||||
*.pyc
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
*venv/*
|
||||
outputs/*
|
||||
ai_settings.yaml
|
||||
last_run_ai_settings.yaml
|
||||
.vscode
|
||||
@@ -21,6 +20,7 @@ log-ingestion.txt
|
||||
logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@@ -95,6 +95,7 @@ instance/
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
site/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
@@ -127,13 +128,13 @@ celerybeat.pid
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
venv*/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
@@ -157,3 +158,8 @@ vicuna-*
|
||||
|
||||
# mac
|
||||
.DS_Store
|
||||
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
10
.isort.cfg
10
.isort.cfg
@@ -1,10 +1,10 @@
|
||||
[settings]
|
||||
profile = black
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = True
|
||||
include_trailing_comma = true
|
||||
force_grid_wrap = 0
|
||||
use_parentheses = True
|
||||
ensure_newline_before_comments = True
|
||||
use_parentheses = true
|
||||
ensure_newline_before_comments = true
|
||||
line_length = 88
|
||||
skip = venv,env,node_modules,.env,.venv,dist
|
||||
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
||||
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
||||
skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist
|
||||
|
||||
@@ -1,33 +1,32 @@
|
||||
repos:
|
||||
- repo: https://github.com/sourcery-ai/sourcery
|
||||
rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags
|
||||
hooks:
|
||||
- id: sourcery
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v0.9.2
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
args: [ '--maxkb=500' ]
|
||||
args: ['--maxkb=500']
|
||||
- id: check-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: local
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort-local
|
||||
entry: isort
|
||||
language: python
|
||||
types: [ python ]
|
||||
exclude: .+/(dist|.venv|venv|build)/.+
|
||||
pass_filenames: true
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
name: black-local
|
||||
entry: black
|
||||
language: python
|
||||
types: [ python ]
|
||||
exclude: .+/(dist|.venv|venv|build)/.+
|
||||
pass_filenames: true
|
||||
language_version: python3.10
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
language: system
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
|
||||
@@ -30,7 +30,7 @@ rule_settings:
|
||||
- refactoring
|
||||
- suggestion
|
||||
- comment
|
||||
python_version: '3.9' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
|
||||
python_version: '3.10' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
|
||||
|
||||
# rules: # A list of custom rules Sourcery will include in its analysis.
|
||||
# - id: no-print-statements
|
||||
@@ -68,4 +68,4 @@ rule_settings:
|
||||
# proxy:
|
||||
# url:
|
||||
# ssl_certs_file:
|
||||
# no_ssl_verify: false
|
||||
# no_ssl_verify: false
|
||||
|
||||
9
BULLETIN.md
Normal file
9
BULLETIN.md
Normal file
@@ -0,0 +1,9 @@
|
||||
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
|
||||
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
|
||||
|
||||
# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE
|
||||
Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins
|
||||
|
||||
## Changes to Docker configuration
|
||||
The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Code of Conduct for auto-gpt
|
||||
# Code of Conduct for Auto-GPT
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
@@ -37,4 +37,3 @@ This Code of Conduct is adapted from the [Contributor Covenant](https://www.cont
|
||||
## 6. Contact
|
||||
|
||||
If you have any questions or concerns, please contact the project maintainers.
|
||||
|
||||
|
||||
@@ -1,29 +1,23 @@
|
||||
# Contributing to ProjectName
|
||||
# Contributing to Auto-GPT
|
||||
|
||||
First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
|
||||
|
||||
This document provides guidelines and best practices to help you contribute effectively.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Code of Conduct](#code-of-conduct)
|
||||
- [Getting Started](#getting-started)
|
||||
- [How to Contribute](#how-to-contribute)
|
||||
- [Reporting Bugs](#reporting-bugs)
|
||||
- [Suggesting Enhancements](#suggesting-enhancements)
|
||||
- [Submitting Pull Requests](#submitting-pull-requests)
|
||||
- [Style Guidelines](#style-guidelines)
|
||||
- [Code Formatting](#code-formatting)
|
||||
- [Pre-Commit Hooks](#pre-commit-hooks)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
|
||||
[Code of Conduct]: https://significant-gravitas.github.io/Auto-GPT/code-of-conduct.md
|
||||
|
||||
## 📢 A Quick Word
|
||||
Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
|
||||
|
||||
However, you absolutely can still add these commands to Auto-GPT in the form of plugins.
|
||||
Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
|
||||
|
||||
## Getting Started
|
||||
|
||||
To start contributing, follow these steps:
|
||||
|
||||
1. Fork the repository and clone your fork.
|
||||
2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
|
||||
3. Make your changes in the new branch.
|
||||
@@ -54,7 +48,7 @@ If you have an idea for a new feature or improvement, please create an issue on
|
||||
When submitting a pull request, please ensure that your changes meet the following criteria:
|
||||
|
||||
- Your pull request should be atomic and focus on a single change.
|
||||
- Your pull request should include tests for your change.
|
||||
- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status)
|
||||
- You should have thoroughly tested your changes with multiple different prompts.
|
||||
- You should have considered potential risks and mitigations for your changes.
|
||||
- You should have documented your changes clearly and comprehensively.
|
||||
@@ -64,18 +58,23 @@ When submitting a pull request, please ensure that your changes meet the followi
|
||||
|
||||
### Code Formatting
|
||||
|
||||
We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`:
|
||||
We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request.
|
||||
|
||||
To format your code, run the following commands in the project's root directory:
|
||||
|
||||
```bash
|
||||
pip install black
|
||||
python -m black .
|
||||
python -m isort .
|
||||
```
|
||||
|
||||
To format your code, run the following command in the project's root directory:
|
||||
|
||||
Or if you have these tools installed globally:
|
||||
```bash
|
||||
black .
|
||||
isort .
|
||||
```
|
||||
|
||||
### Pre-Commit Hooks
|
||||
|
||||
We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
|
||||
|
||||
Install the pre-commit package using pip:
|
||||
@@ -95,5 +94,55 @@ If you encounter any issues or have questions, feel free to reach out to the mai
|
||||
Happy coding, and once again, thank you for your contributions!
|
||||
|
||||
Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts
|
||||
|
||||
https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+
|
||||
## Testing your changes
|
||||
|
||||
If you add or change code, make sure the updated code is covered by tests.
|
||||
To increase coverage if necessary, [write tests using pytest].
|
||||
|
||||
For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/).
|
||||
|
||||
[write tests using pytest]: https://realpython.com/pytest-python-testing/
|
||||
|
||||
### API-dependent tests
|
||||
|
||||
To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known
|
||||
requests and matching responses in so-called *cassettes*, allowing us to run the tests
|
||||
in CI without needing actual API access.
|
||||
|
||||
When changes cause a test prompt to be generated differently, it will likely miss the
|
||||
cache and make a request to the API, updating the cassette with the new request+response.
|
||||
*Be sure to include the updated cassette in your PR!*
|
||||
|
||||
When you run Pytest locally:
|
||||
|
||||
- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required.
|
||||
- If the prompt changes in a way that the cassettes are not reusable:
|
||||
- If no API key, the test fails. It requires a new cassette. So, add an API key to .env.
|
||||
- If the API key is present, the tests will make a real call to OpenAI.
|
||||
- If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR.
|
||||
- If the test is unsuccessful:
|
||||
- Either: Your change made Auto-GPT less capable, in that case, you have to change your code.
|
||||
- Or: The test might be poorly written. In that case, you can make suggestions to change the test.
|
||||
|
||||
In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break.
|
||||
|
||||
|
||||
### Community Challenges
|
||||
Challenges are goals we need Auto-GPT to achieve.
|
||||
To pick the challenge you like, go to the tests/integration/challenges folder and select the areas you would like to work on.
|
||||
- a challenge is new if level_currently_beaten is None
|
||||
- a challenge is in progress if level_currently_beaten is greater or equal to 1
|
||||
- a challenge is beaten if level_currently_beaten = max_level
|
||||
|
||||
Here is an example of how to run the memory challenge A and attempt to beat level 3.
|
||||
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3
|
||||
|
||||
To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder
|
||||
|
||||
Challenges use cassettes. Cassettes allow us to replay your runs in our CI pipeline.
|
||||
Don't hesitate to delete the cassettes associated to the challenge you're working on if you need to. Otherwise it will keep replaying the last run.
|
||||
|
||||
Once you've beaten a new level of a challenge, please create a pull request and we will analyze how you changed Auto-GPT to beat the challenge.
|
||||
|
||||
47
Dockerfile
47
Dockerfile
@@ -1,27 +1,40 @@
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.11-slim
|
||||
# 'dev' or 'release' container build
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Install git
|
||||
RUN apt-get -y update
|
||||
RUN apt-get -y install git chromium-driver
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr \
|
||||
ca-certificates
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get install -y curl jq wget git
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Create a non-root user and set permissions
|
||||
RUN useradd --create-home appuser
|
||||
WORKDIR /home/appuser
|
||||
RUN chown appuser:appuser /home/appuser
|
||||
USER appuser
|
||||
|
||||
# Copy the requirements.txt file and install the requirements
|
||||
COPY --chown=appuser:appuser requirements-docker.txt .
|
||||
RUN pip install --no-cache-dir --user -r requirements-docker.txt
|
||||
|
||||
# Copy the application files
|
||||
COPY --chown=appuser:appuser autogpt/ ./autogpt
|
||||
# Install the required python packages globally
|
||||
ENV PATH="$PATH:/root/.local/bin"
|
||||
COPY requirements.txt .
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["python", "-m", "autogpt"]
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY . ./
|
||||
|
||||
# release build -> include bare minimum
|
||||
FROM autogpt-base as autogpt-release
|
||||
RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
|
||||
print("Setting random seed to 42")
|
||||
random.seed(42)
|
||||
|
||||
# Load the users .env file into environment variables
|
||||
load_dotenv(verbose=True, override=True)
|
||||
|
||||
del load_dotenv
|
||||
|
||||
@@ -1,53 +1,5 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import logging
|
||||
from colorama import Fore
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.args import parse_arguments
|
||||
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
from autogpt.prompt import construct_prompt
|
||||
|
||||
# Load environment variables from .env file
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main function for the script"""
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
parse_arguments()
|
||||
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
|
||||
ai_name = ""
|
||||
prompt = construct_prompt()
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
user_input = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
logger.typewriter_log(
|
||||
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
full_message_history=full_message_history,
|
||||
next_action_count=next_action_count,
|
||||
prompt=prompt,
|
||||
user_input=user_input,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
||||
"""Auto-GPT: A GPT powered AI Assistant"""
|
||||
import autogpt.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
autogpt.cli.main()
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from colorama import Fore, Style
|
||||
from autogpt.app import execute_command, get_command
|
||||
|
||||
from autogpt.chat import chat_with_ai, create_chat_message
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.bracket_termination import (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets,
|
||||
)
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
|
||||
class Agent:
|
||||
@@ -20,9 +20,25 @@ class Agent:
|
||||
memory: The memory object to use.
|
||||
full_message_history: The full message history.
|
||||
next_action_count: The number of actions to execute.
|
||||
prompt: The prompt to use.
|
||||
user_input: The user input.
|
||||
system_prompt: The system prompt is the initial prompt that defines everything
|
||||
the AI needs to know to achieve its task successfully.
|
||||
Currently, the dynamic and customizable information in the system prompt are
|
||||
ai_name, description and goals.
|
||||
|
||||
triggering_prompt: The last sentence the AI will see before answering.
|
||||
For Auto-GPT, this prompt is:
|
||||
Determine which next command to use, and respond using the format specified
|
||||
above:
|
||||
The triggering prompt is not part of the system prompt because between the
|
||||
system prompt and the triggering
|
||||
prompt we have contextual information that can distract the AI and make it
|
||||
forget that its goal is to find the next task to achieve.
|
||||
SYSTEM PROMPT
|
||||
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
|
||||
TRIGGERING PROMPT
|
||||
|
||||
The triggering prompt reminds the AI about its short term meta task
|
||||
(defining the next task)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -31,15 +47,26 @@ class Agent:
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
prompt,
|
||||
user_input,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
):
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.summary_memory = (
|
||||
"I was created." # Initial memory necessary to avoid hilucination
|
||||
)
|
||||
self.last_memory_index = 0
|
||||
self.full_message_history = full_message_history
|
||||
self.next_action_count = next_action_count
|
||||
self.prompt = prompt
|
||||
self.user_input = user_input
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Interaction Loop
|
||||
@@ -47,6 +74,8 @@ class Agent:
|
||||
loop_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
user_input = ""
|
||||
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
@@ -59,32 +88,42 @@ class Agent:
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
assistant_reply = chat_with_ai(
|
||||
self.prompt,
|
||||
self.user_input,
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
# Print Assistant thoughts
|
||||
print_assistant_thoughts(self.ai_name, assistant_reply)
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
|
||||
|
||||
# Get command name and arguments
|
||||
try:
|
||||
command_name, arguments = get_command(
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
||||
)
|
||||
if cfg.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT)
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, cfg.speak_mode
|
||||
)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if cfg.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
self.user_input = ""
|
||||
@@ -94,48 +133,74 @@ class Agent:
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
print(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous "
|
||||
"commands, 'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}...",
|
||||
flush=True,
|
||||
|
||||
logger.info(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
|
||||
"'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}..."
|
||||
)
|
||||
while True:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().rstrip() == "y":
|
||||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
if cfg.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().startswith("y -"):
|
||||
elif console_input.lower().strip() == "s":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
|
||||
Fore.GREEN,
|
||||
"",
|
||||
)
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
Fore.YELLOW,
|
||||
"",
|
||||
)
|
||||
if self_feedback_resp[0].lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
else:
|
||||
user_input = self_feedback_resp
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
)
|
||||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print(
|
||||
logger.warn(
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == "n":
|
||||
self.user_input = "EXIT"
|
||||
elif console_input.lower() == cfg.exit_key:
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
self.user_input = console_input
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
break
|
||||
|
||||
if self.user_input == "GENERATE NEXT COMMAND JSON":
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif self.user_input == "EXIT":
|
||||
print("Exiting...", flush=True)
|
||||
elif user_input == "EXIT":
|
||||
logger.info("Exiting...")
|
||||
break
|
||||
else:
|
||||
# Print command
|
||||
@@ -152,23 +217,29 @@ class Agent:
|
||||
f"Command {command_name} threw the following error: {arguments}"
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {self.user_input}"
|
||||
result = f"Human feedback: {user_input}"
|
||||
else:
|
||||
result = (
|
||||
f"Command {command_name} returned: "
|
||||
f"{execute_command(command_name, arguments)}"
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(
|
||||
command_name, arguments
|
||||
)
|
||||
command_result = execute_command(
|
||||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
|
||||
memory_to_add = (
|
||||
f"Assistant Reply: {assistant_reply} "
|
||||
f"\nResult: {result} "
|
||||
f"\nHuman Feedback: {self.user_input} "
|
||||
)
|
||||
|
||||
self.memory.add(memory_to_add)
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
@@ -181,3 +252,39 @@ class Agent:
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
|
||||
def _resolve_pathlike_command_args(self, command_args):
|
||||
if "directory" in command_args and command_args["directory"] in {"", "/"}:
|
||||
command_args["directory"] = str(self.workspace.root)
|
||||
else:
|
||||
for pathlike in ["filename", "directory", "clone_path"]:
|
||||
if pathlike in command_args:
|
||||
command_args[pathlike] = str(
|
||||
self.workspace.get_path(command_args[pathlike])
|
||||
)
|
||||
return command_args
|
||||
|
||||
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
|
||||
"""Generates a feedback response based on the provided thoughts dictionary.
|
||||
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
|
||||
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
|
||||
feedback message and uses the create_chat_completion() function to generate a
|
||||
response based on the input message.
|
||||
Args:
|
||||
thoughts (dict): A dictionary containing thought elements like reasoning,
|
||||
plan, thoughts, and criticism.
|
||||
Returns:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
|
||||
feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution."
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
plan = thoughts.get("plan", "")
|
||||
thought = thoughts.get("thoughts", "")
|
||||
criticism = thoughts.get("criticism", "")
|
||||
feedback_thoughts = thought + reasoning + plan + criticism
|
||||
return create_chat_completion(
|
||||
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
|
||||
llm_model,
|
||||
)
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.config.config import Singleton
|
||||
from typing import List
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm import Message, create_chat_completion
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class AgentManager(metaclass=Singleton):
|
||||
@@ -11,6 +14,7 @@ class AgentManager(metaclass=Singleton):
|
||||
def __init__(self):
|
||||
self.next_key = 0
|
||||
self.agents = {} # key, (task, full_message_history, model)
|
||||
self.cfg = Config()
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
@@ -26,19 +30,32 @@ class AgentManager(metaclass=Singleton):
|
||||
Returns:
|
||||
The key of the new agent
|
||||
"""
|
||||
messages = [
|
||||
messages: List[Message] = [
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
messages.extend(iter(plugin_messages))
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
# Update full message history
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = ""
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
key = self.next_key
|
||||
# This is done instead of len(agents) to make keys unique even if agents
|
||||
# are deleted
|
||||
@@ -46,6 +63,11 @@ class AgentManager(metaclass=Singleton):
|
||||
|
||||
self.agents[key] = (task, messages, model)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
return key, agent_reply
|
||||
|
||||
def message_agent(self, key: str | int, message: str) -> str:
|
||||
@@ -63,15 +85,37 @@ class AgentManager(metaclass=Singleton):
|
||||
# Add user message to message history before sending to agent
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
for plugin_message in plugin_messages:
|
||||
messages.append(plugin_message)
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
# Update full message history
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = agent_reply
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
# Update full message history
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
return agent_reply
|
||||
|
||||
def list_agents(self) -> list[tuple[str | int, str]]:
|
||||
@@ -84,7 +128,7 @@ class AgentManager(metaclass=Singleton):
|
||||
# Return a list of agent keys and their tasks
|
||||
return [(key, task) for key, (task, _, _) in self.agents.items()]
|
||||
|
||||
def delete_agent(self, key: Union[str, int]) -> bool:
|
||||
def delete_agent(self, key: str | int) -> bool:
|
||||
"""Delete an agent from the agent manager
|
||||
|
||||
Args:
|
||||
|
||||
160
autogpt/app.py
160
autogpt/app.py
@@ -1,31 +1,17 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import List, NoReturn, Union
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.evaluate_code import evaluate_code
|
||||
from autogpt.commands.google_search import google_official_search, google_search
|
||||
from autogpt.commands.improve_code import improve_code
|
||||
from autogpt.commands.write_tests import write_tests
|
||||
from autogpt.config import Config
|
||||
from autogpt.commands.image_gen import generate_image
|
||||
from autogpt.commands.audio_text import read_audio_from_file
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.commands.execute_code import execute_python_file, execute_shell
|
||||
from autogpt.commands.file_operations import (
|
||||
append_to_file,
|
||||
delete_file,
|
||||
read_file,
|
||||
search_files,
|
||||
write_to_file,
|
||||
)
|
||||
from autogpt.json_fixes.parsing import fix_and_parse_json
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.commands.web_selenium import browse_website
|
||||
from autogpt.commands.git_operations import clone_repository
|
||||
from autogpt.commands.twitter import send_tweet
|
||||
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
AGENT_MANAGER = AgentManager()
|
||||
@@ -47,11 +33,11 @@ def is_valid_int(value: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def get_command(response: str):
|
||||
def get_command(response_json: Dict):
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
response (str): The response from the user
|
||||
response_json (json): The response from the AI
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
@@ -62,8 +48,6 @@ def get_command(response: str):
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
try:
|
||||
response_json = fix_and_parse_json(response)
|
||||
|
||||
if "command" not in response_json:
|
||||
return "Error:", "Missing 'command' object in JSON"
|
||||
|
||||
@@ -105,7 +89,12 @@ def map_command_synonyms(command_name: str):
|
||||
return command_name
|
||||
|
||||
|
||||
def execute_command(command_name: str, arguments):
|
||||
def execute_command(
|
||||
command_registry: CommandRegistry,
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
@@ -113,92 +102,33 @@ def execute_command(command_name: str, arguments):
|
||||
arguments (dict): The arguments for the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command"""
|
||||
memory = get_memory(CFG)
|
||||
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
command_name = map_command_synonyms(command_name)
|
||||
if command_name == "google":
|
||||
# Check if the Google API key is set and use the official search method
|
||||
# If the API key is not set or has only whitespaces, use the unofficial
|
||||
# search method
|
||||
key = CFG.google_api_key
|
||||
if key and key.strip() and key != "your-google-api-key":
|
||||
google_result = google_official_search(arguments["input"])
|
||||
return google_result
|
||||
else:
|
||||
google_result = google_search(arguments["input"])
|
||||
cmd = command_registry.commands.get(command_name)
|
||||
|
||||
# google_result can be a list or a string depending on the search results
|
||||
if isinstance(google_result, list):
|
||||
safe_message = [google_result_single.encode('utf-8', 'ignore') for google_result_single in google_result]
|
||||
else:
|
||||
safe_message = google_result.encode('utf-8', 'ignore')
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return get_memory(CFG).add(arguments["string"])
|
||||
|
||||
return str(safe_message)
|
||||
elif command_name == "memory_add":
|
||||
return memory.add(arguments["string"])
|
||||
elif command_name == "start_agent":
|
||||
return start_agent(
|
||||
arguments["name"], arguments["task"], arguments["prompt"]
|
||||
)
|
||||
elif command_name == "message_agent":
|
||||
return message_agent(arguments["key"], arguments["message"])
|
||||
elif command_name == "list_agents":
|
||||
return list_agents()
|
||||
elif command_name == "delete_agent":
|
||||
return delete_agent(arguments["key"])
|
||||
elif command_name == "get_text_summary":
|
||||
return get_text_summary(arguments["url"], arguments["question"])
|
||||
elif command_name == "get_hyperlinks":
|
||||
return get_hyperlinks(arguments["url"])
|
||||
elif command_name == "clone_repository":
|
||||
return clone_repository(
|
||||
arguments["repository_url"], arguments["clone_path"]
|
||||
)
|
||||
elif command_name == "read_file":
|
||||
return read_file(arguments["file"])
|
||||
elif command_name == "write_to_file":
|
||||
return write_to_file(arguments["file"], arguments["text"])
|
||||
elif command_name == "append_to_file":
|
||||
return append_to_file(arguments["file"], arguments["text"])
|
||||
elif command_name == "delete_file":
|
||||
return delete_file(arguments["file"])
|
||||
elif command_name == "search_files":
|
||||
return search_files(arguments["directory"])
|
||||
elif command_name == "browse_website":
|
||||
return browse_website(arguments["url"], arguments["question"])
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again"
|
||||
elif command_name == "evaluate_code":
|
||||
return evaluate_code(arguments["code"])
|
||||
elif command_name == "improve_code":
|
||||
return improve_code(arguments["suggestions"], arguments["code"])
|
||||
elif command_name == "write_tests":
|
||||
return write_tests(arguments["code"], arguments.get("focus"))
|
||||
elif command_name == "execute_python_file": # Add this command
|
||||
return execute_python_file(arguments["file"])
|
||||
elif command_name == "execute_shell":
|
||||
if CFG.execute_local_commands:
|
||||
return execute_shell(arguments["command_line"])
|
||||
else:
|
||||
return (
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction."
|
||||
)
|
||||
elif command_name == "read_audio_from_file":
|
||||
return read_audio_from_file(arguments["file"])
|
||||
elif command_name == "generate_image":
|
||||
return generate_image(arguments["prompt"])
|
||||
elif command_name == "send_tweet":
|
||||
return send_tweet(arguments["text"])
|
||||
elif command_name == "do_nothing":
|
||||
return "No action performed."
|
||||
# filepath, write your code to file and try again
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
@@ -208,8 +138,12 @@ def execute_command(command_name: str, arguments):
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str) -> str:
|
||||
"""Return the results of a google search
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
@@ -223,8 +157,10 @@ def get_text_summary(url: str, question: str) -> str:
|
||||
return f""" "Result" : {summary}"""
|
||||
|
||||
|
||||
@command("get_hyperlinks", "Get text summary", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"""Return the results of a google search
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
@@ -237,10 +173,15 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
|
||||
def shutdown() -> NoReturn:
|
||||
"""Shut down the program"""
|
||||
print("Shutting down...")
|
||||
logger.info("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
@command(
|
||||
"start_agent",
|
||||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
@@ -273,6 +214,7 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
@@ -287,7 +229,8 @@ def message_agent(key: str, message: str) -> str:
|
||||
return agent_response
|
||||
|
||||
|
||||
def list_agents():
|
||||
@command("list_agents", "List GPT Agents", "")
|
||||
def list_agents() -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
@@ -298,6 +241,7 @@ def list_agents():
|
||||
)
|
||||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
|
||||
137
autogpt/args.py
137
autogpt/args.py
@@ -1,137 +0,0 @@
|
||||
"""This module contains the argument parsing logic for the script."""
|
||||
import argparse
|
||||
|
||||
from colorama import Fore
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_supported_memory_backends
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def parse_arguments() -> None:
|
||||
"""Parses the arguments passed to the script
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Process arguments.")
|
||||
parser.add_argument(
|
||||
"--continuous", "-c", action="store_true", help="Enable Continuous Mode"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--continuous-limit",
|
||||
"-l",
|
||||
type=int,
|
||||
dest="continuous_limit",
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
parser.add_argument("--speak", action="store_true", help="Enable Speak Mode")
|
||||
parser.add_argument("--debug", action="store_true", help="Enable Debug Mode")
|
||||
parser.add_argument(
|
||||
"--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode"
|
||||
)
|
||||
parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode")
|
||||
parser.add_argument(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
dest="memory_type",
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
dest="skip_reprompt",
|
||||
action="store_true",
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use-browser",
|
||||
"-b",
|
||||
dest="browser_name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
dest="ai_settings_file",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically"
|
||||
" skip the re-prompt.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_debug_mode(True)
|
||||
|
||||
if args.continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may"
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
CFG.set_continuous_mode(True)
|
||||
|
||||
if args.continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}"
|
||||
)
|
||||
CFG.set_continuous_limit(args.continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if args.continuous_limit and not args.continuous:
|
||||
parser.error("--continuous-limit can only be used with --continuous")
|
||||
|
||||
if args.speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_speak_mode(True)
|
||||
|
||||
if args.gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
|
||||
if args.gpt4only:
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
|
||||
if args.memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
chosen = args.memory_type
|
||||
if chosen not in supported_memory:
|
||||
logger.typewriter_log(
|
||||
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
|
||||
else:
|
||||
CFG.memory_backend = chosen
|
||||
|
||||
if args.skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if args.ai_settings_file:
|
||||
file = args.ai_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if args.browser_name:
|
||||
CFG.selenium_web_browser = args.browser_name
|
||||
109
autogpt/cli.py
Normal file
109
autogpt/cli.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import click
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option("--debug", is_flag=True, help="Enable Debug Mode")
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
"memory_type",
|
||||
type=str,
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.option(
|
||||
# TODO: this is a hidden option for now, necessary for integration testing.
|
||||
# We should make this public once we're ready to roll out agent specific workspaces.
|
||||
"--workspace-directory",
|
||||
"-w",
|
||||
type=click.Path(),
|
||||
hidden=True,
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.main import run_auto_gpt
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
run_auto_gpt(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
workspace_directory,
|
||||
install_plugin_deps,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,10 +1,16 @@
|
||||
"""Code evaluation module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
def evaluate_code(code: str) -> list[str]:
|
||||
@command(
|
||||
"analyze_code",
|
||||
"Analyze Code",
|
||||
'"code": "<full_code_string>"',
|
||||
)
|
||||
def analyze_code(code: str) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
@@ -16,10 +22,10 @@ def evaluate_code(code: str) -> list[str]:
|
||||
improve the code.
|
||||
"""
|
||||
|
||||
function_string = "def analyze_code(code: str) -> List[str]:"
|
||||
function_string = "def analyze_code(code: str) -> list[str]:"
|
||||
args = [code]
|
||||
description_string = (
|
||||
"Analyzes the given code and returns a list of suggestions" " for improvements."
|
||||
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
@@ -1,23 +1,49 @@
|
||||
import requests
|
||||
"""Commands for converting audio to text."""
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import path_in_workspace
|
||||
|
||||
cfg = Config()
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def read_audio_from_file(audio_path):
|
||||
audio_path = path_in_workspace(audio_path)
|
||||
with open(audio_path, "rb") as audio_file:
|
||||
@command(
|
||||
"read_audio_from_file",
|
||||
"Convert Audio to text",
|
||||
'"filename": "<filename>"',
|
||||
CFG.huggingface_audio_to_text_model,
|
||||
"Configure huggingface_audio_to_text_model.",
|
||||
)
|
||||
def read_audio_from_file(filename: str) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
Args:
|
||||
filename (str): The path to the audio file
|
||||
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
with open(filename, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio)
|
||||
|
||||
|
||||
def read_audio(audio):
|
||||
model = cfg.huggingface_audio_to_text_model
|
||||
def read_audio(audio: bytes) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
Args:
|
||||
audio (bytes): The audio to convert
|
||||
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
model = CFG.huggingface_audio_to_text_model
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
api_token = cfg.huggingface_api_token
|
||||
api_token = CFG.huggingface_api_token
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
if api_token is None:
|
||||
@@ -32,4 +58,4 @@ def read_audio(audio):
|
||||
)
|
||||
|
||||
text = json.loads(response.content.decode("utf-8"))["text"]
|
||||
return "The audio says: " + text
|
||||
return f"The audio says: {text}"
|
||||
|
||||
156
autogpt/commands/command.py
Normal file
156
autogpt/commands/command.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import functools
|
||||
import importlib
|
||||
import inspect
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
|
||||
class Command:
|
||||
"""A class representing a command.
|
||||
|
||||
Attributes:
|
||||
name (str): The name of the command.
|
||||
description (str): A brief description of what the command does.
|
||||
signature (str): The signature of the function that the command executes. Defaults to None.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
method: Callable[..., Any],
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.method = method
|
||||
self.signature = signature if signature else str(inspect.signature(self.method))
|
||||
self.enabled = enabled
|
||||
self.disabled_reason = disabled_reason
|
||||
|
||||
def __call__(self, *args, **kwargs) -> Any:
|
||||
if not self.enabled:
|
||||
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||
return self.method(*args, **kwargs)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.name}: {self.description}, args: {self.signature}"
|
||||
|
||||
|
||||
class CommandRegistry:
|
||||
"""
|
||||
The CommandRegistry class is a manager for a collection of Command objects.
|
||||
It allows the registration, modification, and retrieval of Command objects,
|
||||
as well as the scanning and loading of command plugins from a specified
|
||||
directory.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.commands = {}
|
||||
|
||||
def _import_module(self, module_name: str) -> Any:
|
||||
return importlib.import_module(module_name)
|
||||
|
||||
def _reload_module(self, module: Any) -> Any:
|
||||
return importlib.reload(module)
|
||||
|
||||
def register(self, cmd: Command) -> None:
|
||||
self.commands[cmd.name] = cmd
|
||||
|
||||
def unregister(self, command_name: str):
|
||||
if command_name in self.commands:
|
||||
del self.commands[command_name]
|
||||
else:
|
||||
raise KeyError(f"Command '{command_name}' not found in registry.")
|
||||
|
||||
def reload_commands(self) -> None:
|
||||
"""Reloads all loaded command plugins."""
|
||||
for cmd_name in self.commands:
|
||||
cmd = self.commands[cmd_name]
|
||||
module = self._import_module(cmd.__module__)
|
||||
reloaded_module = self._reload_module(module)
|
||||
if hasattr(reloaded_module, "register"):
|
||||
reloaded_module.register(self)
|
||||
|
||||
def get_command(self, name: str) -> Callable[..., Any]:
|
||||
return self.commands[name]
|
||||
|
||||
def call(self, command_name: str, **kwargs) -> Any:
|
||||
if command_name not in self.commands:
|
||||
raise KeyError(f"Command '{command_name}' not found in registry.")
|
||||
command = self.commands[command_name]
|
||||
return command(**kwargs)
|
||||
|
||||
def command_prompt(self) -> str:
|
||||
"""
|
||||
Returns a string representation of all registered `Command` objects for use in a prompt
|
||||
"""
|
||||
commands_list = [
|
||||
f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
|
||||
]
|
||||
return "\n".join(commands_list)
|
||||
|
||||
def import_commands(self, module_name: str) -> None:
|
||||
"""
|
||||
Imports the specified Python module containing command plugins.
|
||||
|
||||
This method imports the associated module and registers any functions or
|
||||
classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute
|
||||
as `Command` objects. The registered `Command` objects are then added to the
|
||||
`commands` dictionary of the `CommandRegistry` object.
|
||||
|
||||
Args:
|
||||
module_name (str): The name of the module to import for command plugins.
|
||||
"""
|
||||
|
||||
module = importlib.import_module(module_name)
|
||||
|
||||
for attr_name in dir(module):
|
||||
attr = getattr(module, attr_name)
|
||||
# Register decorated functions
|
||||
if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
|
||||
attr, AUTO_GPT_COMMAND_IDENTIFIER
|
||||
):
|
||||
self.register(attr.command)
|
||||
# Register command classes
|
||||
elif (
|
||||
inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
|
||||
):
|
||||
cmd_instance = attr()
|
||||
self.register(cmd_instance)
|
||||
|
||||
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
cmd = Command(
|
||||
name=name,
|
||||
description=description,
|
||||
method=func,
|
||||
signature=signature,
|
||||
enabled=enabled,
|
||||
disabled_reason=disabled_reason,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.command = cmd
|
||||
|
||||
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -1,36 +1,39 @@
|
||||
"""Execute code in a Docker container"""
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import docker
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def execute_python_file(file: str):
|
||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||
def execute_python_file(filename: str) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
file (str): The name of the file to execute
|
||||
filename (str): The name of the file to execute
|
||||
|
||||
Returns:
|
||||
str: The output of the file
|
||||
"""
|
||||
logger.info(f"Executing file '{filename}'")
|
||||
|
||||
print(f"Executing file '{file}' in workspace '{WORKSPACE_PATH}'")
|
||||
|
||||
if not file.endswith(".py"):
|
||||
if not filename.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
file_path = path_in_workspace(file)
|
||||
|
||||
if not os.path.isfile(file_path):
|
||||
return f"Error: File '{file}' does not exist."
|
||||
if not os.path.isfile(filename):
|
||||
return f"Error: File '{filename}' does not exist."
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
result = subprocess.run(
|
||||
f"python {file_path}", capture_output=True, encoding="utf8", shell=True
|
||||
f"python {filename}", capture_output=True, encoding="utf8", shell=True
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
@@ -39,13 +42,17 @@ def execute_python_file(file: str):
|
||||
|
||||
try:
|
||||
client = docker.from_env()
|
||||
|
||||
image_name = "python:3.10"
|
||||
# You can replace this with the desired Python image/version
|
||||
# You can find available Python images on Docker Hub:
|
||||
# https://hub.docker.com/_/python
|
||||
image_name = "python:3-alpine"
|
||||
try:
|
||||
client.images.get(image_name)
|
||||
print(f"Image '{image_name}' found locally")
|
||||
logger.warn(f"Image '{image_name}' found locally")
|
||||
except ImageNotFound:
|
||||
print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
|
||||
logger.info(
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
||||
)
|
||||
# Use the low-level API to stream the pull response
|
||||
low_level_client = docker.APIClient()
|
||||
for line in low_level_client.pull(image_name, stream=True, decode=True):
|
||||
@@ -53,18 +60,14 @@ def execute_python_file(file: str):
|
||||
status = line.get("status")
|
||||
progress = line.get("progress")
|
||||
if status and progress:
|
||||
print(f"{status}: {progress}")
|
||||
logger.info(f"{status}: {progress}")
|
||||
elif status:
|
||||
print(status)
|
||||
|
||||
# You can replace 'python:3.8' with the desired Python image/version
|
||||
# You can find available Python images on Docker Hub:
|
||||
# https://hub.docker.com/_/python
|
||||
logger.info(status)
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
f"python {file}",
|
||||
f"python {Path(filename).relative_to(CFG.workspace_path)}",
|
||||
volumes={
|
||||
os.path.abspath(WORKSPACE_PATH): {
|
||||
CFG.workspace_path: {
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
}
|
||||
@@ -84,10 +87,25 @@ def execute_python_file(file: str):
|
||||
|
||||
return logs
|
||||
|
||||
except docker.errors.DockerException as e:
|
||||
logger.warn(
|
||||
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
|
||||
)
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"execute_shell",
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell(command_line: str) -> str:
|
||||
"""Execute a shell command and return the output
|
||||
|
||||
@@ -97,12 +115,15 @@ def execute_shell(command_line: str) -> str:
|
||||
Returns:
|
||||
str: The output of the command
|
||||
"""
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if str(WORKSPACE_PATH) not in current_dir:
|
||||
os.chdir(WORKSPACE_PATH)
|
||||
|
||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
||||
current_dir = Path.cwd()
|
||||
# Change dir into workspace if necessary
|
||||
if not current_dir.is_relative_to(CFG.workspace_path):
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
)
|
||||
|
||||
result = subprocess.run(command_line, capture_output=True, shell=True)
|
||||
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
|
||||
@@ -110,10 +131,50 @@ def execute_shell(command_line: str) -> str:
|
||||
# Change back to whatever the prior working dir was
|
||||
|
||||
os.chdir(current_dir)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@command(
|
||||
"execute_shell_popen",
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell_popen(command_line) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
Args:
|
||||
command_line (str): The command line to execute
|
||||
|
||||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if CFG.workspace_path not in current_dir:
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
)
|
||||
|
||||
do_not_show_output = subprocess.DEVNULL
|
||||
process = subprocess.Popen(
|
||||
command_line, shell=True, stdout=do_not_show_output, stderr=do_not_show_output
|
||||
)
|
||||
|
||||
# Change back to whatever the prior working dir was
|
||||
|
||||
os.chdir(current_dir)
|
||||
|
||||
return f"Subprocess started with PID:'{str(process.pid)}'"
|
||||
|
||||
|
||||
def we_are_running_in_a_docker_container() -> bool:
|
||||
"""Check if we are running in a Docker container
|
||||
|
||||
|
||||
@@ -3,12 +3,19 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
from typing import Generator
|
||||
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
|
||||
|
||||
LOG_FILE = "file_logger.txt"
|
||||
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def check_duplicate_operation(operation: str, filename: str) -> bool:
|
||||
@@ -21,7 +28,7 @@ def check_duplicate_operation(operation: str, filename: str) -> bool:
|
||||
Returns:
|
||||
bool: True if the operation has already been performed on the file
|
||||
"""
|
||||
log_content = read_file(LOG_FILE)
|
||||
log_content = read_file(CFG.file_logger_path)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
return log_entry in log_content
|
||||
|
||||
@@ -34,13 +41,7 @@ def log_operation(operation: str, filename: str) -> None:
|
||||
filename (str): The name of the file the operation was performed on
|
||||
"""
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
|
||||
# Create the log file if it doesn't exist
|
||||
if not os.path.exists(LOG_FILE_PATH):
|
||||
with open(LOG_FILE_PATH, "w", encoding="utf-8") as f:
|
||||
f.write("File Operation Logger ")
|
||||
|
||||
append_to_file(LOG_FILE, log_entry, shouldLog = False)
|
||||
append_to_file(CFG.file_logger_path, log_entry, should_log=False)
|
||||
|
||||
|
||||
def split_file(
|
||||
@@ -63,13 +64,19 @@ def split_file(
|
||||
while start < content_length:
|
||||
end = start + max_length
|
||||
if end + overlap < content_length:
|
||||
chunk = content[start : end + overlap]
|
||||
chunk = content[start : end + overlap - 1]
|
||||
else:
|
||||
chunk = content[start:content_length]
|
||||
|
||||
# Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
|
||||
if len(chunk) <= overlap:
|
||||
break
|
||||
|
||||
yield chunk
|
||||
start += max_length - overlap
|
||||
|
||||
|
||||
@command("read_file", "Read file", '"filename": "<filename>"')
|
||||
def read_file(filename: str) -> str:
|
||||
"""Read a file and return the contents
|
||||
|
||||
@@ -80,8 +87,7 @@ def read_file(filename: str) -> str:
|
||||
str: The contents of the file
|
||||
"""
|
||||
try:
|
||||
filepath = path_in_workspace(filename)
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
@@ -101,27 +107,28 @@ def ingest_file(
|
||||
:param overlap: The number of overlapping characters between chunks, default is 200
|
||||
"""
|
||||
try:
|
||||
print(f"Working with file {filename}")
|
||||
logger.info(f"Working with file {filename}")
|
||||
content = read_file(filename)
|
||||
content_length = len(content)
|
||||
print(f"File length: {content_length} characters")
|
||||
logger.info(f"File length: {content_length} characters")
|
||||
|
||||
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
||||
|
||||
num_chunks = len(chunks)
|
||||
for i, chunk in enumerate(chunks):
|
||||
print(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
memory_to_add = (
|
||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
)
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
print(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
except Exception as e:
|
||||
print(f"Error while ingesting file '{filename}': {str(e)}")
|
||||
logger.info(f"Error while ingesting file '{filename}': {str(e)}")
|
||||
|
||||
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
def write_to_file(filename: str, text: str) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
@@ -135,11 +142,9 @@ def write_to_file(filename: str, text: str) -> str:
|
||||
if check_duplicate_operation("write", filename):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
filepath = path_in_workspace(filename)
|
||||
directory = os.path.dirname(filepath)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename)
|
||||
return "File written to successfully."
|
||||
@@ -147,22 +152,27 @@ def write_to_file(filename: str, text: str) -> str:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str:
|
||||
@command(
|
||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||
)
|
||||
def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
Args:
|
||||
filename (str): The name of the file to append to
|
||||
text (str): The text to append to the file
|
||||
should_log (bool): Should log output
|
||||
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
try:
|
||||
filepath = path_in_workspace(filename)
|
||||
with open(filepath, "a") as f:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "a") as f:
|
||||
f.write(text)
|
||||
|
||||
if shouldLog:
|
||||
if should_log:
|
||||
log_operation("append", filename)
|
||||
|
||||
return "Text appended successfully."
|
||||
@@ -170,6 +180,7 @@ def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
def delete_file(filename: str) -> str:
|
||||
"""Delete a file
|
||||
|
||||
@@ -182,14 +193,14 @@ def delete_file(filename: str) -> str:
|
||||
if check_duplicate_operation("delete", filename):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
filepath = path_in_workspace(filename)
|
||||
os.remove(filepath)
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename)
|
||||
return "File deleted successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command("search_files", "Search Files", '"directory": "<directory>"')
|
||||
def search_files(directory: str) -> list[str]:
|
||||
"""Search for files in a directory
|
||||
|
||||
@@ -201,16 +212,61 @@ def search_files(directory: str) -> list[str]:
|
||||
"""
|
||||
found_files = []
|
||||
|
||||
if directory in {"", "/"}:
|
||||
search_directory = WORKSPACE_PATH
|
||||
else:
|
||||
search_directory = path_in_workspace(directory)
|
||||
|
||||
for root, _, files in os.walk(search_directory):
|
||||
for root, _, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(os.path.join(root, file), WORKSPACE_PATH)
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), CFG.workspace_path
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
return found_files
|
||||
|
||||
|
||||
@command(
|
||||
"download_file",
|
||||
"Download File",
|
||||
'"url": "<url>", "filename": "<filename>"',
|
||||
CFG.allow_downloads,
|
||||
"Error: You do not have user authorization to download files locally.",
|
||||
)
|
||||
def download_file(url, filename):
|
||||
"""Downloads a file
|
||||
Args:
|
||||
url (str): URL of the file to download
|
||||
filename (str): Filename to save the file as
|
||||
"""
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message) as spinner:
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
session.mount("http://", adapter)
|
||||
session.mount("https://", adapter)
|
||||
|
||||
total_size = 0
|
||||
downloaded_size = 0
|
||||
|
||||
with session.get(url, allow_redirects=True, stream=True) as r:
|
||||
r.raise_for_status()
|
||||
total_size = int(r.headers.get("Content-Length", 0))
|
||||
downloaded_size = 0
|
||||
|
||||
with open(filename, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
|
||||
# Update the progress message
|
||||
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
||||
spinner.update_message(f"{message} {progress}")
|
||||
|
||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
|
||||
except requests.HTTPError as e:
|
||||
return f"Got an HTTP Error whilst trying to download file: {e}"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
@@ -1,23 +1,35 @@
|
||||
"""Git operations for autogpt"""
|
||||
import git
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def clone_repository(repo_url: str, clone_path: str) -> str:
|
||||
"""Clone a github repository locally
|
||||
@command(
|
||||
"clone_repository",
|
||||
"Clone Repository",
|
||||
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
CFG.github_username and CFG.github_api_key,
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@validate_url
|
||||
def clone_repository(url: str, clone_path: str) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
repo_url (str): The URL of the repository to clone
|
||||
clone_path (str): The path to clone the repository to
|
||||
url (str): The URL of the repository to clone.
|
||||
clone_path (str): The path to clone the repository to.
|
||||
|
||||
Returns:
|
||||
str: The result of the clone operation"""
|
||||
split_url = repo_url.split("//")
|
||||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
||||
try:
|
||||
git.Repo.clone_from(auth_repo_url, clone_path)
|
||||
return f"""Cloned {repo_url} to {clone_path}"""
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
@@ -5,13 +5,15 @@ import json
|
||||
|
||||
from duckduckgo_search import ddg
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command("google", "Google Search", '"query": "<query>"', not CFG.google_api_key)
|
||||
def google_search(query: str, num_results: int = 8) -> str:
|
||||
"""Return the results of a google search
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
query (str): The search query.
|
||||
@@ -31,11 +33,19 @@ def google_search(query: str, num_results: int = 8) -> str:
|
||||
for j in results:
|
||||
search_results.append(j)
|
||||
|
||||
return json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
results = json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
return safe_google_results(results)
|
||||
|
||||
|
||||
@command(
|
||||
"google",
|
||||
"Google Search",
|
||||
'"query": "<query>"',
|
||||
bool(CFG.google_api_key),
|
||||
"Configure google_api_key.",
|
||||
)
|
||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
"""Return the results of a google search using the official Google API
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
Args:
|
||||
query (str): The search query.
|
||||
@@ -82,6 +92,26 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
return "Error: The provided Google API key is invalid or missing."
|
||||
else:
|
||||
return f"Error: {e}"
|
||||
# google_result can be a list or a string depending on the search results
|
||||
|
||||
# Return the list of search result URLs
|
||||
return search_results_links
|
||||
return safe_google_results(search_results_links)
|
||||
|
||||
|
||||
def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
Return the results of a google search in a safe format.
|
||||
|
||||
Args:
|
||||
results (str | list): The search results.
|
||||
|
||||
Returns:
|
||||
str: The results of the search.
|
||||
"""
|
||||
if isinstance(results, list):
|
||||
safe_message = json.dumps(
|
||||
[result.encode("utf-8", "ignore") for result in results]
|
||||
)
|
||||
else:
|
||||
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
|
||||
return safe_message
|
||||
|
||||
@@ -1,36 +1,42 @@
|
||||
""" Image Generation Module for AutoGPT."""
|
||||
import io
|
||||
import os.path
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
|
||||
import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import path_in_workspace
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def generate_image(prompt: str) -> str:
|
||||
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
|
||||
def generate_image(prompt: str, size: int = 256) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to use
|
||||
size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace)
|
||||
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = f"{str(uuid.uuid4())}.jpg"
|
||||
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if CFG.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename)
|
||||
elif CFG.image_provider == "sd":
|
||||
return generate_image_with_dalle(prompt, filename, size)
|
||||
# HuggingFace
|
||||
elif CFG.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename)
|
||||
else:
|
||||
return "No Image Provider Set"
|
||||
# SD WebUI
|
||||
elif CFG.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, size)
|
||||
return "No Image Provider Set"
|
||||
|
||||
|
||||
def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
@@ -44,13 +50,16 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
str: The filename of the image
|
||||
"""
|
||||
API_URL = (
|
||||
"https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
||||
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
|
||||
)
|
||||
if CFG.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {"Authorization": f"Bearer {CFG.huggingface_api_token}"}
|
||||
headers = {
|
||||
"Authorization": f"Bearer {CFG.huggingface_api_token}",
|
||||
"X-Use-Cache": "false",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
@@ -61,37 +70,96 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
)
|
||||
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
image.save(path_in_workspace(filename))
|
||||
image.save(filename)
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
|
||||
def generate_image_with_dalle(prompt: str, filename: str) -> str:
|
||||
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to use
|
||||
filename (str): The filename to save the image to
|
||||
size (int): The size of the image
|
||||
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
openai.api_key = CFG.openai_api_key
|
||||
|
||||
# Check for supported image sizes
|
||||
if size not in [256, 512, 1024]:
|
||||
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
|
||||
logger.info(
|
||||
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
|
||||
)
|
||||
size = closest
|
||||
|
||||
response = openai.Image.create(
|
||||
prompt=prompt,
|
||||
n=1,
|
||||
size="256x256",
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=CFG.openai_api_key,
|
||||
)
|
||||
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
image_data = b64decode(response["data"][0]["b64_json"])
|
||||
|
||||
with open(path_in_workspace(filename), mode="wb") as png:
|
||||
with open(filename, mode="wb") as png:
|
||||
png.write(image_data)
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
|
||||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
size: int = 512,
|
||||
negative_prompt: str = "",
|
||||
extra: dict = {},
|
||||
) -> str:
|
||||
"""Generate an image with Stable Diffusion webui.
|
||||
Args:
|
||||
prompt (str): The prompt to use
|
||||
filename (str): The filename to save the image to
|
||||
size (int, optional): The size of the image. Defaults to 256.
|
||||
negative_prompt (str, optional): The negative prompt to use. Defaults to "".
|
||||
extra (dict, optional): Extra parameters to pass to the API. Defaults to {}.
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
# Create a session and set the basic auth if needed
|
||||
s = requests.Session()
|
||||
if CFG.sd_webui_auth:
|
||||
username, password = CFG.sd_webui_auth.split(":")
|
||||
s.auth = (username, password or "")
|
||||
|
||||
# Generate the images
|
||||
response = requests.post(
|
||||
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"sampler_index": "DDIM",
|
||||
"steps": 20,
|
||||
"cfg_scale": 7.0,
|
||||
"width": size,
|
||||
"height": size,
|
||||
"n_iter": 1,
|
||||
**extra,
|
||||
},
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
# Save the image to disk
|
||||
response = response.json()
|
||||
b64 = b64decode(response["images"][0].split(",", 1)[0])
|
||||
image = Image.open(io.BytesIO(b64))
|
||||
image.save(filename)
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
@@ -2,23 +2,29 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"improve_code",
|
||||
"Get Improved Code",
|
||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
)
|
||||
def improve_code(suggestions: list[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
suggestions (List): A list of suggestions around what needs to be improved.
|
||||
suggestions (list): A list of suggestions around what needs to be improved.
|
||||
code (str): Code to be improved.
|
||||
Returns:
|
||||
A result string from create chat completion. Improved code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
||||
"def generate_improved_code(suggestions: list[str], code: str) -> str:"
|
||||
)
|
||||
args = [json.dumps(suggestions), code]
|
||||
description_string = (
|
||||
|
||||
@@ -1,11 +1,27 @@
|
||||
import tweepy
|
||||
"""A module that contains a command to send a tweet."""
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
import tweepy
|
||||
|
||||
from autogpt.commands.command import command
|
||||
|
||||
|
||||
def send_tweet(tweet_text):
|
||||
@command(
|
||||
"send_tweet",
|
||||
"Send Tweet",
|
||||
'"tweet_text": "<tweet_text>"',
|
||||
)
|
||||
def send_tweet(tweet_text: str) -> str:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Args:
|
||||
tweet_text (str): Text to be tweeted.
|
||||
|
||||
Returns:
|
||||
A result from sending the tweet.
|
||||
"""
|
||||
consumer_key = os.environ.get("TW_CONSUMER_KEY")
|
||||
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
|
||||
access_token = os.environ.get("TW_ACCESS_TOKEN")
|
||||
@@ -20,6 +36,6 @@ def send_tweet(tweet_text):
|
||||
# Send tweet
|
||||
try:
|
||||
api.update_status(tweet_text)
|
||||
print("Tweet sent successfully!")
|
||||
return "Tweet sent successfully!"
|
||||
except tweepy.TweepyException as e:
|
||||
print("Error sending tweet: {}".format(e.reason))
|
||||
return f"Error sending tweet: {e.reason}"
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
"""Web scraping commands using Playwright"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
print(
|
||||
logger.info(
|
||||
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
|
||||
|
||||
|
||||
@@ -1,70 +1,21 @@
|
||||
"""Browse a webpage and summarize it using the LLM model"""
|
||||
from __future__ import annotations
|
||||
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
import requests
|
||||
from requests.compat import urljoin
|
||||
from requests import Response
|
||||
from bs4 import BeautifulSoup
|
||||
from requests import Response
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
memory = get_memory(CFG)
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({"User-Agent": CFG.user_agent})
|
||||
|
||||
|
||||
def is_valid_url(url: str) -> bool:
|
||||
"""Check if the URL is valid
|
||||
|
||||
Args:
|
||||
url (str): The URL to check
|
||||
|
||||
Returns:
|
||||
bool: True if the URL is valid, False otherwise
|
||||
"""
|
||||
try:
|
||||
result = urlparse(url)
|
||||
return all([result.scheme, result.netloc])
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def sanitize_url(url: str) -> str:
|
||||
"""Sanitize the URL
|
||||
|
||||
Args:
|
||||
url (str): The URL to sanitize
|
||||
|
||||
Returns:
|
||||
str: The sanitized URL
|
||||
"""
|
||||
return urljoin(url, urlparse(url).path)
|
||||
|
||||
|
||||
def check_local_file_access(url: str) -> bool:
|
||||
"""Check if the URL is a local file
|
||||
|
||||
Args:
|
||||
url (str): The URL to check
|
||||
|
||||
Returns:
|
||||
bool: True if the URL is a local file, False otherwise
|
||||
"""
|
||||
local_prefixes = [
|
||||
"file:///",
|
||||
"file://localhost",
|
||||
"http://localhost",
|
||||
"https://localhost",
|
||||
]
|
||||
return any(url.startswith(prefix) for prefix in local_prefixes)
|
||||
|
||||
|
||||
@validate_url
|
||||
def get_response(
|
||||
url: str, timeout: int = 10
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
@@ -82,17 +33,7 @@ def get_response(
|
||||
requests.exceptions.RequestException: If the HTTP request fails
|
||||
"""
|
||||
try:
|
||||
# Restrict access to local files
|
||||
if check_local_file_access(url):
|
||||
raise ValueError("Access to local files is restricted")
|
||||
|
||||
# Most basic check if the URL is valid:
|
||||
if not url.startswith("http://") and not url.startswith("https://"):
|
||||
raise ValueError("Invalid URL format")
|
||||
|
||||
sanitized_url = sanitize_url(url)
|
||||
|
||||
response = session.get(sanitized_url, timeout=timeout)
|
||||
response = session.get(url, timeout=timeout)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
if response.status_code >= 400:
|
||||
|
||||
@@ -1,27 +1,39 @@
|
||||
"""Selenium web scraping module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from selenium import webdriver
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
import autogpt.processing.text as summary
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium import webdriver
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"browse_website",
|
||||
"Browse Website",
|
||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
@@ -32,7 +44,14 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
Returns:
|
||||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||
"""
|
||||
driver, text = scrape_text_with_selenium(url)
|
||||
try:
|
||||
driver, text = scrape_text_with_selenium(url)
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
msg = e.msg.split("\n")[0]
|
||||
return f"Error: {msg}", None
|
||||
|
||||
add_header(driver)
|
||||
summary_text = summary.summarize_text(url, text, question, driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
@@ -67,6 +86,9 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
)
|
||||
|
||||
if CFG.selenium_web_browser == "firefox":
|
||||
if CFG.selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = webdriver.Firefox(
|
||||
executable_path=GeckoDriverManager().install(), options=options
|
||||
)
|
||||
@@ -75,8 +97,22 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = webdriver.Safari(options=options)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if CFG.selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
chromium_driver_path = Path("/usr/bin/chromedriver")
|
||||
|
||||
driver = webdriver.Chrome(
|
||||
executable_path=ChromeDriverManager().install(), options=options
|
||||
executable_path=chromium_driver_path
|
||||
if chromium_driver_path.exists()
|
||||
else ChromeDriverManager().install(),
|
||||
options=options,
|
||||
)
|
||||
driver.get(url)
|
||||
|
||||
|
||||
@@ -2,9 +2,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"write_tests",
|
||||
"Write Tests",
|
||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
)
|
||||
def write_tests(code: str, focus: list[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
|
||||
@@ -2,13 +2,10 @@
|
||||
This module contains the configuration classes for AutoGPT.
|
||||
"""
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import check_openai_api_key, Config
|
||||
from autogpt.config.singleton import AbstractSingleton, Singleton
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
|
||||
__all__ = [
|
||||
"check_openai_api_key",
|
||||
"AbstractSingleton",
|
||||
"AIConfig",
|
||||
"Config",
|
||||
"Singleton",
|
||||
]
|
||||
|
||||
@@ -5,9 +5,18 @@ A module that contains the AIConfig class object that contains the configuration
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Type
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import distro
|
||||
import yaml
|
||||
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
|
||||
|
||||
|
||||
class AIConfig:
|
||||
"""
|
||||
@@ -17,10 +26,15 @@ class AIConfig:
|
||||
ai_name (str): The name of the AI.
|
||||
ai_role (str): The description of the AI's role.
|
||||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
api_budget (float): The maximum dollar value for API calls (0.0 means infinite)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None
|
||||
self,
|
||||
ai_name: str = "",
|
||||
ai_role: str = "",
|
||||
ai_goals: list | None = None,
|
||||
api_budget: float = 0.0,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a class instance
|
||||
@@ -29,6 +43,7 @@ class AIConfig:
|
||||
ai_name (str): The name of the AI.
|
||||
ai_role (str): The description of the AI's role.
|
||||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
api_budget (float): The maximum dollar value for API calls (0.0 means infinite)
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
@@ -37,14 +52,14 @@ class AIConfig:
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
|
||||
self.api_budget = api_budget
|
||||
self.prompt_generator = None
|
||||
self.command_registry = None
|
||||
|
||||
@staticmethod
|
||||
def load(config_file: str = SAVE_FILE) -> "AIConfig":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
|
||||
@@ -64,9 +79,15 @@ class AIConfig:
|
||||
|
||||
ai_name = config_params.get("ai_name", "")
|
||||
ai_role = config_params.get("ai_role", "")
|
||||
ai_goals = config_params.get("ai_goals", [])
|
||||
ai_goals = [
|
||||
str(goal).strip("{}").replace("'", "").replace('"', "")
|
||||
if isinstance(goal, dict)
|
||||
else str(goal)
|
||||
for goal in config_params.get("ai_goals", [])
|
||||
]
|
||||
api_budget = config_params.get("api_budget", 0.0)
|
||||
# type: Type[AIConfig]
|
||||
return AIConfig(ai_name, ai_role, ai_goals)
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
def save(self, config_file: str = SAVE_FILE) -> None:
|
||||
"""
|
||||
@@ -84,11 +105,14 @@ class AIConfig:
|
||||
"ai_name": self.ai_name,
|
||||
"ai_role": self.ai_role,
|
||||
"ai_goals": self.ai_goals,
|
||||
"api_budget": self.api_budget,
|
||||
}
|
||||
with open(config_file, "w", encoding="utf-8") as file:
|
||||
yaml.dump(config, file, allow_unicode=True)
|
||||
|
||||
def construct_full_prompt(self) -> str:
|
||||
def construct_full_prompt(
|
||||
self, prompt_generator: Optional[PromptGenerator] = None
|
||||
) -> str:
|
||||
"""
|
||||
Returns a prompt to the user with the class information in an organized fashion.
|
||||
|
||||
@@ -97,7 +121,7 @@ class AIConfig:
|
||||
|
||||
Returns:
|
||||
full_prompt (str): A string containing the initial prompt for the user
|
||||
including the ai_name, ai_role and ai_goals.
|
||||
including the ai_name, ai_role, ai_goals, and api_budget.
|
||||
"""
|
||||
|
||||
prompt_start = (
|
||||
@@ -107,14 +131,38 @@ class AIConfig:
|
||||
""
|
||||
)
|
||||
|
||||
from autogpt.prompt import get_prompt
|
||||
from autogpt.config import Config
|
||||
from autogpt.prompts.prompt import build_default_prompt_generator
|
||||
|
||||
cfg = Config()
|
||||
if prompt_generator is None:
|
||||
prompt_generator = build_default_prompt_generator()
|
||||
prompt_generator.goals = self.ai_goals
|
||||
prompt_generator.name = self.ai_name
|
||||
prompt_generator.role = self.ai_role
|
||||
prompt_generator.command_registry = self.command_registry
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
prompt_generator = plugin.post_prompt(prompt_generator)
|
||||
|
||||
if cfg.execute_local_commands:
|
||||
# add OS info to prompt
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
|
||||
prompt_start += f"\nThe OS you are running on is: {os_info}"
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = (
|
||||
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
)
|
||||
full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(self.ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{get_prompt()}"
|
||||
if self.api_budget > 0.0:
|
||||
full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}"
|
||||
self.prompt_generator = prompt_generator
|
||||
full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}"
|
||||
return full_prompt
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
import os
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config.singleton import Singleton
|
||||
from typing import List
|
||||
|
||||
import openai
|
||||
import yaml
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv(verbose=True)
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class Config(metaclass=Singleton):
|
||||
@@ -19,27 +17,38 @@ class Config(metaclass=Singleton):
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the Config class"""
|
||||
self.workspace_path = None
|
||||
self.file_logger_path = None
|
||||
|
||||
self.debug_mode = False
|
||||
self.continuous_mode = False
|
||||
self.continuous_limit = 0
|
||||
self.speak_mode = False
|
||||
self.skip_reprompt = False
|
||||
self.allow_downloads = False
|
||||
self.skip_news = False
|
||||
|
||||
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
|
||||
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
|
||||
self.exit_key = os.getenv("EXIT_KEY", "n")
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
|
||||
self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300))
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
)
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "1"))
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "0"))
|
||||
self.use_azure = os.getenv("USE_AZURE") == "True"
|
||||
self.execute_local_commands = (
|
||||
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
||||
)
|
||||
self.restrict_to_workspace = (
|
||||
os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
|
||||
)
|
||||
|
||||
if self.use_azure:
|
||||
self.load_azure_config()
|
||||
@@ -54,6 +63,8 @@ class Config(metaclass=Singleton):
|
||||
self.use_mac_os_tts = False
|
||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||
|
||||
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
|
||||
|
||||
self.use_brian_tts = False
|
||||
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
|
||||
|
||||
@@ -66,7 +77,7 @@ class Config(metaclass=Singleton):
|
||||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.weaviate_host = os.getenv("WEAVIATE_HOST")
|
||||
self.weaviate_host = os.getenv("WEAVIATE_HOST")
|
||||
self.weaviate_port = os.getenv("WEAVIATE_PORT")
|
||||
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
|
||||
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
|
||||
@@ -74,19 +85,34 @@ class Config(metaclass=Singleton):
|
||||
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
|
||||
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
|
||||
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
|
||||
self.use_weaviate_embedded = os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
self.use_weaviate_embedded = (
|
||||
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
)
|
||||
|
||||
# milvus configuration, e.g., localhost:19530.
|
||||
# milvus or zilliz cloud configuration.
|
||||
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
|
||||
self.milvus_username = os.getenv("MILVUS_USERNAME")
|
||||
self.milvus_password = os.getenv("MILVUS_PASSWORD")
|
||||
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
|
||||
self.milvus_secure = os.getenv("MILVUS_SECURE") == "True"
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
self.huggingface_image_model = os.getenv(
|
||||
"HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
|
||||
)
|
||||
self.huggingface_audio_to_text_model = os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
)
|
||||
self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
|
||||
self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
|
||||
|
||||
# User agent headers to use when browsing web
|
||||
# Selenium browser settings
|
||||
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
|
||||
self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
|
||||
|
||||
# User agent header to use when making HTTP requests
|
||||
# Some websites might just completely deny request with an error code if
|
||||
# no user agent was found.
|
||||
self.user_agent = os.getenv(
|
||||
@@ -94,6 +120,7 @@ class Config(metaclass=Singleton):
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
|
||||
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
)
|
||||
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
@@ -102,8 +129,17 @@ class Config(metaclass=Singleton):
|
||||
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
|
||||
# Initialize the OpenAI API client
|
||||
openai.api_key = self.openai_api_key
|
||||
|
||||
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
|
||||
self.plugins: List[AutoGPTPluginTemplate] = []
|
||||
self.plugins_openai = []
|
||||
|
||||
plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
|
||||
if plugins_allowlist:
|
||||
self.plugins_allowlist = plugins_allowlist.split(",")
|
||||
else:
|
||||
self.plugins_allowlist = []
|
||||
self.plugins_denylist = []
|
||||
|
||||
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
||||
"""
|
||||
@@ -130,7 +166,7 @@ class Config(metaclass=Singleton):
|
||||
else:
|
||||
return ""
|
||||
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml")
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
|
||||
|
||||
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
|
||||
"""
|
||||
@@ -143,17 +179,14 @@ class Config(metaclass=Singleton):
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
self.openai_api_type = config_params.get("azure_api_type") or "azure"
|
||||
self.openai_api_base = config_params.get("azure_api_base") or ""
|
||||
self.openai_api_version = (
|
||||
config_params.get("azure_api_version") or "2023-03-15-preview"
|
||||
)
|
||||
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
|
||||
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", {})
|
||||
|
||||
def set_continuous_mode(self, value: bool) -> None:
|
||||
"""Set the continuous mode value."""
|
||||
@@ -187,10 +220,6 @@ class Config(metaclass=Singleton):
|
||||
"""Set the browse_website command chunk max length value."""
|
||||
self.browse_chunk_max_length = value
|
||||
|
||||
def set_browse_summary_max_token(self, value: int) -> None:
|
||||
"""Set the browse_website command summary max token value."""
|
||||
self.browse_summary_max_token = value
|
||||
|
||||
def set_openai_api_key(self, value: str) -> None:
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
@@ -227,6 +256,18 @@ class Config(metaclass=Singleton):
|
||||
"""Set the debug mode value."""
|
||||
self.debug_mode = value
|
||||
|
||||
def set_plugins(self, value: list) -> None:
|
||||
"""Set the plugins value."""
|
||||
self.plugins = value
|
||||
|
||||
def set_temperature(self, value: int) -> None:
|
||||
"""Set the temperature value."""
|
||||
self.temperature = value
|
||||
|
||||
def set_memory_backend(self, name: str) -> None:
|
||||
"""Set the memory backend name."""
|
||||
self.memory_backend = name
|
||||
|
||||
|
||||
def check_openai_api_key() -> None:
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
@@ -235,6 +276,7 @@ def check_openai_api_key() -> None:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Please set your OpenAI API key in .env or as an environment variable."
|
||||
+ Fore.RESET
|
||||
)
|
||||
print("You can get your key from https://beta.openai.com/account/api-keys")
|
||||
print("You can get your key from https://platform.openai.com/account/api-keys")
|
||||
exit(1)
|
||||
|
||||
134
autogpt/configurator.py
Normal file
134
autogpt/configurator.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""Configurator module."""
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_supported_memory_backends
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def create_config(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
|
||||
Args:
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
gpt3only (bool): Whether to enable GPT3.5 only mode
|
||||
gpt4only (bool): Whether to enable GPT4 only mode
|
||||
memory_type (str): The type of memory backend to use
|
||||
browser_name (str): The name of the browser to use when using selenium to scrape the web
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_debug_mode(True)
|
||||
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may"
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
CFG.set_continuous_mode(True)
|
||||
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
CFG.set_continuous_limit(continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_speak_mode(True)
|
||||
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
|
||||
if gpt4only:
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
chosen = memory_type
|
||||
if chosen not in supported_memory:
|
||||
logger.typewriter_log(
|
||||
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
|
||||
else:
|
||||
CFG.memory_backend = chosen
|
||||
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if browser_name:
|
||||
CFG.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
|
||||
+ "It is recommended that you monitor any files it downloads carefully.",
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||
)
|
||||
CFG.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
CFG.skip_news = True
|
||||
@@ -1,53 +0,0 @@
|
||||
"""This module contains the function to fix JSON strings using GPT-3."""
|
||||
import json
|
||||
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
from autogpt.logs import logger
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def fix_json(json_string: str, schema: str) -> str:
|
||||
"""Fix the given JSON string to make it parseable and fully compliant with
|
||||
the provided schema.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string to fix.
|
||||
schema (str): The schema to use to fix the JSON.
|
||||
Returns:
|
||||
str: The fixed JSON string.
|
||||
"""
|
||||
# Try to fix the JSON using GPT:
|
||||
function_string = "def fix_json(json_string: str, schema:str=None) -> str:"
|
||||
args = [f"'''{json_string}'''", f"'''{schema}'''"]
|
||||
description_string = (
|
||||
"This function takes a JSON string and ensures that it"
|
||||
" is parseable and fully compliant with the provided schema. If an object"
|
||||
" or field specified in the schema isn't contained within the correct JSON,"
|
||||
" it is omitted. The function also escapes any double quotes within JSON"
|
||||
" string values to ensure that they are valid. If the JSON string contains"
|
||||
" any None or NaN values, they are replaced with null before being parsed."
|
||||
)
|
||||
|
||||
# If it doesn't already start with a "`", add one:
|
||||
if not json_string.startswith("`"):
|
||||
json_string = "```json\n" + json_string + "\n```"
|
||||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=CFG.fast_llm_model
|
||||
)
|
||||
logger.debug("------------ JSON FIX ATTEMPT ---------------")
|
||||
logger.debug(f"Original JSON: {json_string}")
|
||||
logger.debug("-----------")
|
||||
logger.debug(f"Fixed JSON: {result_string}")
|
||||
logger.debug("----------- END OF FIX ATTEMPT ----------------")
|
||||
|
||||
try:
|
||||
json.loads(result_string) # just check the validity
|
||||
return result_string
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
# Get the call stack:
|
||||
# import traceback
|
||||
# call_stack = traceback.format_exc()
|
||||
# print(f"Failed to fix JSON: '{json_string}' "+call_stack)
|
||||
return "failed"
|
||||
@@ -1,74 +0,0 @@
|
||||
"""Fix JSON brackets."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
import regex
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.logs import logger
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech import say_text
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
|
||||
if CFG.speak_mode and CFG.debug_mode:
|
||||
say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. "
|
||||
"Trying to fix it now."
|
||||
)
|
||||
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
|
||||
|
||||
try:
|
||||
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
|
||||
json_match = json_pattern.search(json_string)
|
||||
|
||||
if json_match:
|
||||
# Extract the valid JSON object from the string
|
||||
json_string = json_match.group(0)
|
||||
logger.typewriter_log(
|
||||
title="Apparently json was fixed.", title_color=Fore.GREEN
|
||||
)
|
||||
if CFG.speak_mode and CFG.debug_mode:
|
||||
say_text("Apparently json was fixed.")
|
||||
else:
|
||||
raise ValueError("No valid JSON object found")
|
||||
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
if CFG.debug_mode:
|
||||
logger.error(f"Error: Invalid JSON: {json_string}\n")
|
||||
if CFG.speak_mode:
|
||||
say_text("Didn't work. I will have to ignore this response then.")
|
||||
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
|
||||
json_string = {}
|
||||
|
||||
return json_string
|
||||
|
||||
|
||||
def balance_braces(json_string: str) -> str | None:
|
||||
"""
|
||||
Balance the braces in a JSON string.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with braces balanced.
|
||||
"""
|
||||
|
||||
open_braces_count = json_string.count("{")
|
||||
close_braces_count = json_string.count("}")
|
||||
|
||||
while open_braces_count > close_braces_count:
|
||||
json_string += "}"
|
||||
close_braces_count += 1
|
||||
|
||||
while close_braces_count > open_braces_count:
|
||||
json_string = json_string.rstrip("}")
|
||||
close_braces_count -= 1
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json.loads(json_string)
|
||||
return json_string
|
||||
@@ -1,33 +0,0 @@
|
||||
""" Fix invalid escape sequences in JSON strings. """
|
||||
import json
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.utilities import extract_char_position
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
|
||||
"""Fix invalid escape sequences in JSON strings.
|
||||
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
error_message (str): The error message from the JSONDecodeError
|
||||
exception.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with invalid escape sequences fixed.
|
||||
"""
|
||||
while error_message.startswith("Invalid \\escape"):
|
||||
bad_escape_location = extract_char_position(error_message)
|
||||
json_to_load = (
|
||||
json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :]
|
||||
)
|
||||
try:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
if CFG.debug_mode:
|
||||
print("json loads error - fix invalid escape", e)
|
||||
error_message = str(e)
|
||||
return json_to_load
|
||||
@@ -1,27 +0,0 @@
|
||||
"""Fix quotes in a JSON string."""
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
def add_quotes_to_property_names(json_string: str) -> str:
|
||||
"""
|
||||
Add quotes to property names in a JSON string.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with quotes added to property names.
|
||||
"""
|
||||
|
||||
def replace_func(match: re.Match) -> str:
|
||||
return f'"{match[1]}":'
|
||||
|
||||
property_name_pattern = re.compile(r"(\w+):")
|
||||
corrected_json_string = property_name_pattern.sub(replace_func, json_string)
|
||||
|
||||
try:
|
||||
json.loads(corrected_json_string)
|
||||
return corrected_json_string
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
@@ -1,144 +0,0 @@
|
||||
"""Fix and parse JSON strings."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.auto_fix import fix_json
|
||||
from autogpt.json_fixes.bracket_termination import balance_braces
|
||||
from autogpt.json_fixes.escaping import fix_invalid_escape
|
||||
from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
JSON_SCHEMA = """
|
||||
{
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args": {
|
||||
"arg name": "value"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def correct_json(json_to_load: str) -> str:
|
||||
"""
|
||||
Correct common JSON errors.
|
||||
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
"""
|
||||
|
||||
try:
|
||||
if CFG.debug_mode:
|
||||
print("json", json_to_load)
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
if CFG.debug_mode:
|
||||
print("json loads error", e)
|
||||
error_message = str(e)
|
||||
if error_message.startswith("Invalid \\escape"):
|
||||
json_to_load = fix_invalid_escape(json_to_load, error_message)
|
||||
if error_message.startswith(
|
||||
"Expecting property name enclosed in double quotes"
|
||||
):
|
||||
json_to_load = add_quotes_to_property_names(json_to_load)
|
||||
try:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
if CFG.debug_mode:
|
||||
print("json loads error - add quotes", e)
|
||||
error_message = str(e)
|
||||
if balanced_str := balance_braces(json_to_load):
|
||||
return balanced_str
|
||||
return json_to_load
|
||||
|
||||
|
||||
def fix_and_parse_json(
|
||||
json_to_load: str, try_to_fix_with_gpt: bool = True
|
||||
) -> str | dict[Any, Any]:
|
||||
"""Fix and parse JSON string
|
||||
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT.
|
||||
Defaults to True.
|
||||
|
||||
Returns:
|
||||
str or dict[Any, Any]: The parsed JSON.
|
||||
"""
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json_to_load = json_to_load.replace("\t", "")
|
||||
return json.loads(json_to_load)
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json_to_load = correct_json(json_to_load)
|
||||
return json.loads(json_to_load)
|
||||
# Let's do something manually:
|
||||
# sometimes GPT responds with something BEFORE the braces:
|
||||
# "I'm sorry, I don't understand. Please try again."
|
||||
# {"text": "I'm sorry, I don't understand. Please try again.",
|
||||
# "confidence": 0.0}
|
||||
# So let's try to find the first brace and then parse the rest
|
||||
# of the string
|
||||
try:
|
||||
brace_index = json_to_load.index("{")
|
||||
maybe_fixed_json = json_to_load[brace_index:]
|
||||
last_brace_index = maybe_fixed_json.rindex("}")
|
||||
maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1]
|
||||
return json.loads(maybe_fixed_json)
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
return try_ai_fix(try_to_fix_with_gpt, e, json_to_load)
|
||||
|
||||
|
||||
def try_ai_fix(
|
||||
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
|
||||
) -> str | dict[Any, Any]:
|
||||
"""Try to fix the JSON with the AI
|
||||
|
||||
Args:
|
||||
try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI.
|
||||
exception (Exception): The exception that was raised.
|
||||
json_to_load (str): The JSON string to load.
|
||||
|
||||
Raises:
|
||||
exception: If try_to_fix_with_gpt is False.
|
||||
|
||||
Returns:
|
||||
str or dict[Any, Any]: The JSON string or dictionary.
|
||||
"""
|
||||
if not try_to_fix_with_gpt:
|
||||
raise exception
|
||||
|
||||
logger.warn(
|
||||
"Warning: Failed to parse AI output, attempting to fix."
|
||||
"\n If you see this warning frequently, it's likely that"
|
||||
" your prompt is confusing the AI. Try changing it up"
|
||||
" slightly."
|
||||
)
|
||||
# Now try to fix this up using the ai_functions
|
||||
ai_fixed_json = fix_json(json_to_load, JSON_SCHEMA)
|
||||
|
||||
if ai_fixed_json != "failed":
|
||||
return json.loads(ai_fixed_json)
|
||||
# This allows the AI to react to the error message,
|
||||
# which usually results in it correcting its ways.
|
||||
logger.error("Failed to fix AI output, telling the AI.")
|
||||
return json_to_load
|
||||
@@ -1,20 +0,0 @@
|
||||
"""Utilities for the json_fixes package."""
|
||||
import re
|
||||
|
||||
|
||||
def extract_char_position(error_message: str) -> int:
|
||||
"""Extract the character position from the JSONDecodeError message.
|
||||
|
||||
Args:
|
||||
error_message (str): The error message from the JSONDecodeError
|
||||
exception.
|
||||
|
||||
Returns:
|
||||
int: The character position.
|
||||
"""
|
||||
|
||||
char_pattern = re.compile(r"\(char (\d+)\)")
|
||||
if match := char_pattern.search(error_message):
|
||||
return int(match[1])
|
||||
else:
|
||||
raise ValueError("Character position not found in the error message.")
|
||||
121
autogpt/json_utils/json_fix_general.py
Normal file
121
autogpt/json_utils/json_fix_general.py
Normal file
@@ -0,0 +1,121 @@
|
||||
"""This module contains functions to fix JSON strings using general programmatic approaches, suitable for addressing
|
||||
common JSON formatting issues."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.utilities import extract_char_position
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
|
||||
"""Fix invalid escape sequences in JSON strings.
|
||||
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
error_message (str): The error message from the JSONDecodeError
|
||||
exception.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with invalid escape sequences fixed.
|
||||
"""
|
||||
while error_message.startswith("Invalid \\escape"):
|
||||
bad_escape_location = extract_char_position(error_message)
|
||||
json_to_load = (
|
||||
json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :]
|
||||
)
|
||||
try:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error - fix invalid escape", e)
|
||||
error_message = str(e)
|
||||
return json_to_load
|
||||
|
||||
|
||||
def balance_braces(json_string: str) -> Optional[str]:
|
||||
"""
|
||||
Balance the braces in a JSON string.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with braces balanced.
|
||||
"""
|
||||
|
||||
open_braces_count = json_string.count("{")
|
||||
close_braces_count = json_string.count("}")
|
||||
|
||||
while open_braces_count > close_braces_count:
|
||||
json_string += "}"
|
||||
close_braces_count += 1
|
||||
|
||||
while close_braces_count > open_braces_count:
|
||||
json_string = json_string.rstrip("}")
|
||||
close_braces_count -= 1
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json.loads(json_string)
|
||||
return json_string
|
||||
|
||||
|
||||
def add_quotes_to_property_names(json_string: str) -> str:
|
||||
"""
|
||||
Add quotes to property names in a JSON string.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with quotes added to property names.
|
||||
"""
|
||||
|
||||
def replace_func(match: re.Match) -> str:
|
||||
return f'"{match[1]}":'
|
||||
|
||||
property_name_pattern = re.compile(r"(\w+):")
|
||||
corrected_json_string = property_name_pattern.sub(replace_func, json_string)
|
||||
|
||||
try:
|
||||
json.loads(corrected_json_string)
|
||||
return corrected_json_string
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
|
||||
|
||||
def correct_json(json_to_load: str) -> str:
|
||||
"""
|
||||
Correct common JSON errors.
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
"""
|
||||
|
||||
try:
|
||||
logger.debug("json", json_to_load)
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error", e)
|
||||
error_message = str(e)
|
||||
if error_message.startswith("Invalid \\escape"):
|
||||
json_to_load = fix_invalid_escape(json_to_load, error_message)
|
||||
if error_message.startswith(
|
||||
"Expecting property name enclosed in double quotes"
|
||||
):
|
||||
json_to_load = add_quotes_to_property_names(json_to_load)
|
||||
try:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error - add quotes", e)
|
||||
error_message = str(e)
|
||||
if balanced_str := balance_braces(json_to_load):
|
||||
return balanced_str
|
||||
return json_to_load
|
||||
239
autogpt/json_utils/json_fix_llm.py
Normal file
239
autogpt/json_utils/json_fix_llm.py
Normal file
@@ -0,0 +1,239 @@
|
||||
"""This module contains functions to fix JSON strings generated by LLM models, such as ChatGPT, using the assistance
|
||||
of the ChatGPT API or LLM models."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Any, Dict
|
||||
|
||||
from colorama import Fore
|
||||
from regex import regex
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.json_fix_general import correct_json
|
||||
from autogpt.llm import call_ai_function
|
||||
from autogpt.logs import logger
|
||||
from autogpt.speech import say_text
|
||||
|
||||
JSON_SCHEMA = """
|
||||
{
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args": {
|
||||
"arg name": "value"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def auto_fix_json(json_string: str, schema: str) -> str:
|
||||
"""Fix the given JSON string to make it parseable and fully compliant with
|
||||
the provided schema using GPT-3.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string to fix.
|
||||
schema (str): The schema to use to fix the JSON.
|
||||
Returns:
|
||||
str: The fixed JSON string.
|
||||
"""
|
||||
# Try to fix the JSON using GPT:
|
||||
function_string = "def fix_json(json_string: str, schema:str=None) -> str:"
|
||||
args = [f"'''{json_string}'''", f"'''{schema}'''"]
|
||||
description_string = (
|
||||
"This function takes a JSON string and ensures that it"
|
||||
" is parseable and fully compliant with the provided schema. If an object"
|
||||
" or field specified in the schema isn't contained within the correct JSON,"
|
||||
" it is omitted. The function also escapes any double quotes within JSON"
|
||||
" string values to ensure that they are valid. If the JSON string contains"
|
||||
" any None or NaN values, they are replaced with null before being parsed."
|
||||
)
|
||||
|
||||
# If it doesn't already start with a "`", add one:
|
||||
if not json_string.startswith("`"):
|
||||
json_string = "```json\n" + json_string + "\n```"
|
||||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=CFG.fast_llm_model
|
||||
)
|
||||
logger.debug("------------ JSON FIX ATTEMPT ---------------")
|
||||
logger.debug(f"Original JSON: {json_string}")
|
||||
logger.debug("-----------")
|
||||
logger.debug(f"Fixed JSON: {result_string}")
|
||||
logger.debug("----------- END OF FIX ATTEMPT ----------------")
|
||||
|
||||
try:
|
||||
json.loads(result_string) # just check the validity
|
||||
return result_string
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
# Get the call stack:
|
||||
# import traceback
|
||||
# call_stack = traceback.format_exc()
|
||||
# print(f"Failed to fix JSON: '{json_string}' "+call_stack)
|
||||
return "failed"
|
||||
|
||||
|
||||
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
|
||||
"""Fix the given JSON string to make it parseable and fully compliant with two techniques.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string to fix.
|
||||
|
||||
Returns:
|
||||
str: The fixed JSON string.
|
||||
"""
|
||||
assistant_reply = assistant_reply.strip()
|
||||
if assistant_reply.startswith("```json"):
|
||||
assistant_reply = assistant_reply[7:]
|
||||
if assistant_reply.endswith("```"):
|
||||
assistant_reply = assistant_reply[:-3]
|
||||
try:
|
||||
return json.loads(assistant_reply) # just check the validity
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
pass
|
||||
|
||||
if assistant_reply.startswith("json "):
|
||||
assistant_reply = assistant_reply[5:]
|
||||
assistant_reply = assistant_reply.strip()
|
||||
try:
|
||||
return json.loads(assistant_reply) # just check the validity
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
pass
|
||||
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
logger.debug("Assistant reply JSON: %s", str(assistant_reply_json))
|
||||
if assistant_reply_json == {}:
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply
|
||||
)
|
||||
|
||||
logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json))
|
||||
if assistant_reply_json != {}:
|
||||
return assistant_reply_json
|
||||
|
||||
logger.error(
|
||||
"Error: The following AI output couldn't be converted to a JSON:\n",
|
||||
assistant_reply,
|
||||
)
|
||||
if CFG.speak_mode:
|
||||
say_text("I have received an invalid JSON response from the OpenAI API.")
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def fix_and_parse_json(
|
||||
json_to_load: str, try_to_fix_with_gpt: bool = True
|
||||
) -> Dict[Any, Any]:
|
||||
"""Fix and parse JSON string
|
||||
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT.
|
||||
Defaults to True.
|
||||
|
||||
Returns:
|
||||
str or dict[Any, Any]: The parsed JSON.
|
||||
"""
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json_to_load = json_to_load.replace("\t", "")
|
||||
return json.loads(json_to_load)
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json_to_load = correct_json(json_to_load)
|
||||
return json.loads(json_to_load)
|
||||
# Let's do something manually:
|
||||
# sometimes GPT responds with something BEFORE the braces:
|
||||
# "I'm sorry, I don't understand. Please try again."
|
||||
# {"text": "I'm sorry, I don't understand. Please try again.",
|
||||
# "confidence": 0.0}
|
||||
# So let's try to find the first brace and then parse the rest
|
||||
# of the string
|
||||
try:
|
||||
brace_index = json_to_load.index("{")
|
||||
maybe_fixed_json = json_to_load[brace_index:]
|
||||
last_brace_index = maybe_fixed_json.rindex("}")
|
||||
maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1]
|
||||
return json.loads(maybe_fixed_json)
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
return try_ai_fix(try_to_fix_with_gpt, e, json_to_load)
|
||||
|
||||
|
||||
def try_ai_fix(
|
||||
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
|
||||
) -> Dict[Any, Any]:
|
||||
"""Try to fix the JSON with the AI
|
||||
|
||||
Args:
|
||||
try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI.
|
||||
exception (Exception): The exception that was raised.
|
||||
json_to_load (str): The JSON string to load.
|
||||
|
||||
Raises:
|
||||
exception: If try_to_fix_with_gpt is False.
|
||||
|
||||
Returns:
|
||||
str or dict[Any, Any]: The JSON string or dictionary.
|
||||
"""
|
||||
if not try_to_fix_with_gpt:
|
||||
raise exception
|
||||
if CFG.debug_mode:
|
||||
logger.warn(
|
||||
"Warning: Failed to parse AI output, attempting to fix."
|
||||
"\n If you see this warning frequently, it's likely that"
|
||||
" your prompt is confusing the AI. Try changing it up"
|
||||
" slightly."
|
||||
)
|
||||
# Now try to fix this up using the ai_functions
|
||||
ai_fixed_json = auto_fix_json(json_to_load, JSON_SCHEMA)
|
||||
|
||||
if ai_fixed_json != "failed":
|
||||
return json.loads(ai_fixed_json)
|
||||
# This allows the AI to react to the error message,
|
||||
# which usually results in it correcting its ways.
|
||||
# logger.error("Failed to fix AI output, telling the AI.")
|
||||
return {}
|
||||
|
||||
|
||||
def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
|
||||
if CFG.speak_mode and CFG.debug_mode:
|
||||
say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. "
|
||||
"Trying to fix it now."
|
||||
)
|
||||
logger.error("Attempting to fix JSON by finding outermost brackets\n")
|
||||
|
||||
try:
|
||||
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
|
||||
json_match = json_pattern.search(json_string)
|
||||
|
||||
if json_match:
|
||||
# Extract the valid JSON object from the string
|
||||
json_string = json_match.group(0)
|
||||
logger.typewriter_log(
|
||||
title="Apparently json was fixed.", title_color=Fore.GREEN
|
||||
)
|
||||
if CFG.speak_mode and CFG.debug_mode:
|
||||
say_text("Apparently json was fixed.")
|
||||
else:
|
||||
return {}
|
||||
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
if CFG.debug_mode:
|
||||
logger.error(f"Error: Invalid JSON: {json_string}\n")
|
||||
if CFG.speak_mode:
|
||||
say_text("Didn't work. I will have to ignore this response then.")
|
||||
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
|
||||
json_string = {}
|
||||
|
||||
return fix_and_parse_json(json_string)
|
||||
31
autogpt/json_utils/llm_response_format_1.json
Normal file
31
autogpt/json_utils/llm_response_format_1.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"thoughts": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"text": {"type": "string"},
|
||||
"reasoning": {"type": "string"},
|
||||
"plan": {"type": "string"},
|
||||
"criticism": {"type": "string"},
|
||||
"speak": {"type": "string"}
|
||||
},
|
||||
"required": ["text", "reasoning", "plan", "criticism", "speak"],
|
||||
"additionalProperties": false
|
||||
},
|
||||
"command": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"args": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": ["name", "args"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"required": ["thoughts", "command"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
79
autogpt/json_utils/utilities.py
Normal file
79
autogpt/json_utils/utilities.py
Normal file
@@ -0,0 +1,79 @@
|
||||
"""Utilities for the json_fixes package."""
|
||||
import json
|
||||
import re
|
||||
|
||||
from jsonschema import Draft7Validator
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
|
||||
|
||||
|
||||
def extract_char_position(error_message: str) -> int:
|
||||
"""Extract the character position from the JSONDecodeError message.
|
||||
|
||||
Args:
|
||||
error_message (str): The error message from the JSONDecodeError
|
||||
exception.
|
||||
|
||||
Returns:
|
||||
int: The character position.
|
||||
"""
|
||||
|
||||
char_pattern = re.compile(r"\(char (\d+)\)")
|
||||
if match := char_pattern.search(error_message):
|
||||
return int(match[1])
|
||||
else:
|
||||
raise ValueError("Character position not found in the error message.")
|
||||
|
||||
|
||||
def validate_json(json_object: object, schema_name: str) -> dict | None:
|
||||
"""
|
||||
:type schema_name: object
|
||||
:param schema_name: str
|
||||
:type json_object: object
|
||||
"""
|
||||
with open(f"autogpt/json_utils/{schema_name}.json", "r") as f:
|
||||
schema = json.load(f)
|
||||
validator = Draft7Validator(schema)
|
||||
|
||||
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
|
||||
logger.error("The JSON object is invalid.")
|
||||
if CFG.debug_mode:
|
||||
logger.error(
|
||||
json.dumps(json_object, indent=4)
|
||||
) # Replace 'json_object' with the variable containing the JSON data
|
||||
logger.error("The following issues were found:")
|
||||
|
||||
for error in errors:
|
||||
logger.error(f"Error: {error.message}")
|
||||
else:
|
||||
logger.debug("The JSON object is valid.")
|
||||
|
||||
return json_object
|
||||
|
||||
|
||||
def validate_json_string(json_string: str, schema_name: str) -> dict | None:
|
||||
"""
|
||||
:type schema_name: object
|
||||
:param schema_name: str
|
||||
:type json_object: object
|
||||
"""
|
||||
|
||||
try:
|
||||
json_loaded = json.loads(json_string)
|
||||
return validate_json(json_loaded, schema_name)
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
def is_string_valid_json(json_string: str, schema_name: str) -> bool:
|
||||
"""
|
||||
:type schema_name: object
|
||||
:param schema_name: str
|
||||
:type json_object: object
|
||||
"""
|
||||
|
||||
return validate_json_string(json_string, schema_name) is not None
|
||||
38
autogpt/llm/__init__.py
Normal file
38
autogpt/llm/__init__.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import (
|
||||
ChatModelInfo,
|
||||
ChatModelResponse,
|
||||
EmbeddingModelInfo,
|
||||
EmbeddingModelResponse,
|
||||
LLMResponse,
|
||||
Message,
|
||||
ModelInfo,
|
||||
)
|
||||
from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context
|
||||
from autogpt.llm.llm_utils import (
|
||||
call_ai_function,
|
||||
create_chat_completion,
|
||||
get_ada_embedding,
|
||||
)
|
||||
from autogpt.llm.modelsinfo import COSTS
|
||||
from autogpt.llm.token_counter import count_message_tokens, count_string_tokens
|
||||
|
||||
__all__ = [
|
||||
"ApiManager",
|
||||
"Message",
|
||||
"ModelInfo",
|
||||
"ChatModelInfo",
|
||||
"EmbeddingModelInfo",
|
||||
"LLMResponse",
|
||||
"ChatModelResponse",
|
||||
"EmbeddingModelResponse",
|
||||
"create_chat_message",
|
||||
"generate_context",
|
||||
"chat_with_ai",
|
||||
"call_ai_function",
|
||||
"create_chat_completion",
|
||||
"get_ada_embedding",
|
||||
"COSTS",
|
||||
"count_message_tokens",
|
||||
"count_string_tokens",
|
||||
]
|
||||
128
autogpt/llm/api_manager.py
Normal file
128
autogpt/llm/api_manager.py
Normal file
@@ -0,0 +1,128 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import openai
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.modelsinfo import COSTS
|
||||
from autogpt.logs import logger
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class ApiManager(metaclass=Singleton):
|
||||
def __init__(self):
|
||||
self.total_prompt_tokens = 0
|
||||
self.total_completion_tokens = 0
|
||||
self.total_cost = 0
|
||||
self.total_budget = 0
|
||||
|
||||
def reset(self):
|
||||
self.total_prompt_tokens = 0
|
||||
self.total_completion_tokens = 0
|
||||
self.total_cost = 0
|
||||
self.total_budget = 0.0
|
||||
|
||||
def create_chat_completion(
|
||||
self,
|
||||
messages: list, # type: ignore
|
||||
model: str | None = None,
|
||||
temperature: float = None,
|
||||
max_tokens: int | None = None,
|
||||
deployment_id=None,
|
||||
) -> str:
|
||||
"""
|
||||
Create a chat completion and update the cost.
|
||||
Args:
|
||||
messages (list): The list of messages to send to the API.
|
||||
model (str): The model to use for the API call.
|
||||
temperature (float): The temperature to use for the API call.
|
||||
max_tokens (int): The maximum number of tokens for the API call.
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
cfg = Config()
|
||||
if temperature is None:
|
||||
temperature = cfg.temperature
|
||||
if deployment_id is not None:
|
||||
response = openai.ChatCompletion.create(
|
||||
deployment_id=deployment_id,
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
api_key=cfg.openai_api_key,
|
||||
)
|
||||
else:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
api_key=cfg.openai_api_key,
|
||||
)
|
||||
logger.debug(f"Response: {response}")
|
||||
prompt_tokens = response.usage.prompt_tokens
|
||||
completion_tokens = response.usage.completion_tokens
|
||||
self.update_cost(prompt_tokens, completion_tokens, model)
|
||||
return response
|
||||
|
||||
def update_cost(self, prompt_tokens, completion_tokens, model):
|
||||
"""
|
||||
Update the total cost, prompt tokens, and completion tokens.
|
||||
|
||||
Args:
|
||||
prompt_tokens (int): The number of tokens used in the prompt.
|
||||
completion_tokens (int): The number of tokens used in the completion.
|
||||
model (str): The model used for the API call.
|
||||
"""
|
||||
self.total_prompt_tokens += prompt_tokens
|
||||
self.total_completion_tokens += completion_tokens
|
||||
self.total_cost += (
|
||||
prompt_tokens * COSTS[model]["prompt"]
|
||||
+ completion_tokens * COSTS[model]["completion"]
|
||||
) / 1000
|
||||
logger.debug(f"Total running cost: ${self.total_cost:.3f}")
|
||||
|
||||
def set_total_budget(self, total_budget):
|
||||
"""
|
||||
Sets the total user-defined budget for API calls.
|
||||
|
||||
Args:
|
||||
total_budget (float): The total budget for API calls.
|
||||
"""
|
||||
self.total_budget = total_budget
|
||||
|
||||
def get_total_prompt_tokens(self):
|
||||
"""
|
||||
Get the total number of prompt tokens.
|
||||
|
||||
Returns:
|
||||
int: The total number of prompt tokens.
|
||||
"""
|
||||
return self.total_prompt_tokens
|
||||
|
||||
def get_total_completion_tokens(self):
|
||||
"""
|
||||
Get the total number of completion tokens.
|
||||
|
||||
Returns:
|
||||
int: The total number of completion tokens.
|
||||
"""
|
||||
return self.total_completion_tokens
|
||||
|
||||
def get_total_cost(self):
|
||||
"""
|
||||
Get the total cost of API calls.
|
||||
|
||||
Returns:
|
||||
float: The total cost of API calls.
|
||||
"""
|
||||
return self.total_cost
|
||||
|
||||
def get_total_budget(self):
|
||||
"""
|
||||
Get the total user-defined budget for API calls.
|
||||
|
||||
Returns:
|
||||
float: The total budget for API calls.
|
||||
"""
|
||||
return self.total_budget
|
||||
65
autogpt/llm/base.py
Normal file
65
autogpt/llm/base.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, TypedDict
|
||||
|
||||
|
||||
class Message(TypedDict):
|
||||
"""OpenAI Message object containing a role and the message content"""
|
||||
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelInfo:
|
||||
"""Struct for model information.
|
||||
|
||||
Would be lovely to eventually get this directly from APIs, but needs to be scraped from
|
||||
websites for now.
|
||||
|
||||
"""
|
||||
|
||||
name: str
|
||||
prompt_token_cost: float
|
||||
completion_token_cost: float
|
||||
max_tokens: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatModelInfo(ModelInfo):
|
||||
"""Struct for chat model information."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class EmbeddingModelInfo(ModelInfo):
|
||||
"""Struct for embedding model information."""
|
||||
|
||||
embedding_dimensions: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class LLMResponse:
|
||||
"""Standard response struct for a response from an LLM model."""
|
||||
|
||||
model_info: ModelInfo
|
||||
prompt_tokens_used: int = 0
|
||||
completion_tokens_used: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class EmbeddingModelResponse(LLMResponse):
|
||||
"""Standard response struct for a response from an embedding model."""
|
||||
|
||||
embedding: List[float] = field(default_factory=list)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.completion_tokens_used:
|
||||
raise ValueError("Embeddings should not have completion tokens used.")
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatModelResponse(LLMResponse):
|
||||
"""Standard response struct for a response from an LLM model."""
|
||||
|
||||
content: str = None
|
||||
@@ -1,16 +1,26 @@
|
||||
import time
|
||||
from random import shuffle
|
||||
|
||||
from openai.error import RateLimitError
|
||||
|
||||
from autogpt import token_counter
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.llm.llm_utils import create_chat_completion
|
||||
from autogpt.llm.token_counter import count_message_tokens
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory_management.store_memory import (
|
||||
save_memory_trimmed_from_context_window,
|
||||
)
|
||||
from autogpt.memory_management.summary_memory import (
|
||||
get_newly_trimmed_messages,
|
||||
update_running_summary,
|
||||
)
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def create_chat_message(role, content):
|
||||
def create_chat_message(role, content) -> Message:
|
||||
"""
|
||||
Create a chat message with the given role and content.
|
||||
|
||||
@@ -30,17 +40,17 @@ def generate_context(prompt, relevant_memory, full_message_history, model):
|
||||
create_chat_message(
|
||||
"system", f"The current time and date is {time.strftime('%c')}"
|
||||
),
|
||||
create_chat_message(
|
||||
"system",
|
||||
f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
||||
),
|
||||
# create_chat_message(
|
||||
# "system",
|
||||
# f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
||||
# ),
|
||||
]
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(full_message_history) - 1
|
||||
insertion_index = len(current_context)
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = token_counter.count_message_tokens(current_context, model)
|
||||
current_tokens_used = count_message_tokens(current_context, model)
|
||||
return (
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
@@ -51,7 +61,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model):
|
||||
|
||||
# TODO: Change debug from hardcode to argument
|
||||
def chat_with_ai(
|
||||
prompt, user_input, full_message_history, permanent_memory, token_limit
|
||||
agent, prompt, user_input, full_message_history, permanent_memory, token_limit
|
||||
):
|
||||
"""Interact with the OpenAI API, sending the prompt, user input, message history,
|
||||
and permanent memory."""
|
||||
@@ -75,16 +85,21 @@ def chat_with_ai(
|
||||
"""
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = (
|
||||
""
|
||||
if len(full_message_history) == 0
|
||||
else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
|
||||
)
|
||||
|
||||
# if len(full_message_history) == 0:
|
||||
# relevant_memory = ""
|
||||
# else:
|
||||
# recent_history = full_message_history[-5:]
|
||||
# shuffle(recent_history)
|
||||
# relevant_memories = permanent_memory.get_relevant(
|
||||
# str(recent_history), 5
|
||||
# )
|
||||
# if relevant_memories:
|
||||
# shuffle(relevant_memories)
|
||||
# relevant_memory = str(relevant_memories)
|
||||
relevant_memory = ""
|
||||
logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
|
||||
|
||||
(
|
||||
@@ -94,30 +109,36 @@ def chat_with_ai(
|
||||
current_context,
|
||||
) = generate_context(prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
while current_tokens_used > 2500:
|
||||
# remove memories until we are under 2500 tokens
|
||||
relevant_memory = relevant_memory[:-1]
|
||||
(
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
) = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model
|
||||
)
|
||||
# while current_tokens_used > 2500:
|
||||
# # remove memories until we are under 2500 tokens
|
||||
# relevant_memory = relevant_memory[:-1]
|
||||
# (
|
||||
# next_message_to_add_index,
|
||||
# current_tokens_used,
|
||||
# insertion_index,
|
||||
# current_context,
|
||||
# ) = generate_context(
|
||||
# prompt, relevant_memory, full_message_history, model
|
||||
# )
|
||||
|
||||
current_tokens_used += token_counter.count_message_tokens(
|
||||
current_tokens_used += count_message_tokens(
|
||||
[create_chat_message("user", user_input)], model
|
||||
) # Account for user input (appended later)
|
||||
|
||||
current_tokens_used += 500 # Account for memory (appended later) TODO: The final memory may be less than 500 tokens
|
||||
|
||||
# Add Messages until the token limit is reached or there are no more messages to add.
|
||||
while next_message_to_add_index >= 0:
|
||||
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
|
||||
message_to_add = full_message_history[next_message_to_add_index]
|
||||
|
||||
tokens_to_add = token_counter.count_message_tokens(
|
||||
[message_to_add], model
|
||||
)
|
||||
tokens_to_add = count_message_tokens([message_to_add], model)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
# save_memory_trimmed_from_context_window(
|
||||
# full_message_history,
|
||||
# next_message_to_add_index,
|
||||
# permanent_memory,
|
||||
# )
|
||||
break
|
||||
|
||||
# Add the most recent message to the start of the current context,
|
||||
@@ -132,9 +153,66 @@ def chat_with_ai(
|
||||
# Move to the next most recent message in the full message history
|
||||
next_message_to_add_index -= 1
|
||||
|
||||
# Insert Memories
|
||||
if len(full_message_history) > 0:
|
||||
(
|
||||
newly_trimmed_messages,
|
||||
agent.last_memory_index,
|
||||
) = get_newly_trimmed_messages(
|
||||
full_message_history=full_message_history,
|
||||
current_context=current_context,
|
||||
last_memory_index=agent.last_memory_index,
|
||||
)
|
||||
agent.summary_memory = update_running_summary(
|
||||
current_memory=agent.summary_memory,
|
||||
new_events=newly_trimmed_messages,
|
||||
)
|
||||
current_context.insert(insertion_index, agent.summary_memory)
|
||||
|
||||
api_manager = ApiManager()
|
||||
# inform the AI about its remaining budget (if it has one)
|
||||
if api_manager.get_total_budget() > 0.0:
|
||||
remaining_budget = (
|
||||
api_manager.get_total_budget() - api_manager.get_total_cost()
|
||||
)
|
||||
if remaining_budget < 0:
|
||||
remaining_budget = 0
|
||||
system_message = (
|
||||
f"Your remaining API budget is ${remaining_budget:.3f}"
|
||||
+ (
|
||||
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
|
||||
if remaining_budget == 0
|
||||
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
|
||||
if remaining_budget < 0.005
|
||||
else " Budget nearly exceeded. Finish up.\n\n"
|
||||
if remaining_budget < 0.01
|
||||
else "\n\n"
|
||||
)
|
||||
)
|
||||
logger.debug(system_message)
|
||||
current_context.append(create_chat_message("system", system_message))
|
||||
|
||||
# Append user input, the length of this is accounted for above
|
||||
current_context.extend([create_chat_message("user", user_input)])
|
||||
|
||||
plugin_count = len(cfg.plugins)
|
||||
for i, plugin in enumerate(cfg.plugins):
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
agent.prompt_generator, current_context
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
tokens_to_add = count_message_tokens(
|
||||
[create_chat_message("system", plugin_response)], model
|
||||
)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
logger.debug("Plugin response too long, skipping:", plugin_response)
|
||||
logger.debug("Plugins remaining at stop:", plugin_count - i)
|
||||
break
|
||||
current_context.append(create_chat_message("system", plugin_response))
|
||||
|
||||
# Calculate remaining tokens
|
||||
tokens_remaining = token_limit - current_tokens_used
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative.
|
||||
@@ -171,5 +249,5 @@ def chat_with_ai(
|
||||
return assistant_reply
|
||||
except RateLimitError:
|
||||
# TODO: When we switch to langchain, this is built in
|
||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
258
autogpt/llm/llm_utils.py
Normal file
258
autogpt/llm/llm_utils.py
Normal file
@@ -0,0 +1,258 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import time
|
||||
from typing import List, Optional
|
||||
|
||||
import openai
|
||||
from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError, Timeout
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
def retry_openai_api(
|
||||
num_retries: int = 10,
|
||||
backoff_base: float = 2.0,
|
||||
warn_user: bool = True,
|
||||
):
|
||||
"""Retry an OpenAI API call.
|
||||
|
||||
Args:
|
||||
num_retries int: Number of retries. Defaults to 10.
|
||||
backoff_base float: Base for exponential backoff. Defaults to 2.
|
||||
warn_user bool: Whether to warn the user. Defaults to True.
|
||||
"""
|
||||
retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}"
|
||||
api_key_error_msg = (
|
||||
f"Please double check that you have setup a "
|
||||
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
|
||||
f"read more here: {Fore.CYAN}https://significant-gravitas.github.io/Auto-GPT/setup/#getting-an-api-key{Fore.RESET}"
|
||||
)
|
||||
backoff_msg = (
|
||||
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
|
||||
)
|
||||
|
||||
def _wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def _wrapped(*args, **kwargs):
|
||||
user_warned = not warn_user
|
||||
num_attempts = num_retries + 1 # +1 for the first attempt
|
||||
for attempt in range(1, num_attempts + 1):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
except RateLimitError:
|
||||
if attempt == num_attempts:
|
||||
raise
|
||||
|
||||
logger.debug(retry_limit_msg)
|
||||
if not user_warned:
|
||||
logger.double_check(api_key_error_msg)
|
||||
user_warned = True
|
||||
|
||||
except APIError as e:
|
||||
if (e.http_status != 502) or (attempt == num_attempts):
|
||||
raise
|
||||
|
||||
backoff = backoff_base ** (attempt + 2)
|
||||
logger.debug(backoff_msg.format(backoff=backoff))
|
||||
time.sleep(backoff)
|
||||
|
||||
return _wrapped
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
def call_ai_function(
|
||||
function: str, args: list, description: str, model: str | None = None
|
||||
) -> str:
|
||||
"""Call an AI function
|
||||
|
||||
This is a magic function that can do anything with no-code. See
|
||||
https://github.com/Torantulino/AI-Functions for more info.
|
||||
|
||||
Args:
|
||||
function (str): The function to call
|
||||
args (list): The arguments to pass to the function
|
||||
description (str): The description of the function
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the function
|
||||
"""
|
||||
cfg = Config()
|
||||
if model is None:
|
||||
model = cfg.smart_llm_model
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma separated string
|
||||
args: str = ", ".join(args)
|
||||
messages: List[Message] = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}"
|
||||
f"\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
return create_chat_completion(model=model, messages=messages, temperature=0)
|
||||
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
# simple retry mechanism when getting a rate error or a bad gateway
|
||||
def create_chat_completion(
|
||||
messages: List[Message], # type: ignore
|
||||
model: Optional[str] = None,
|
||||
temperature: float = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Create a chat completion using the OpenAI API
|
||||
|
||||
Args:
|
||||
messages (List[Message]): The messages to send to the chat completion
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
temperature (float, optional): The temperature to use. Defaults to 0.9.
|
||||
max_tokens (int, optional): The max tokens to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the chat completion
|
||||
"""
|
||||
cfg = Config()
|
||||
if temperature is None:
|
||||
temperature = cfg.temperature
|
||||
|
||||
num_retries = 10
|
||||
warned_user = False
|
||||
logger.debug(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
)
|
||||
for plugin in cfg.plugins:
|
||||
if plugin.can_handle_chat_completion(
|
||||
messages=messages,
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
):
|
||||
message = plugin.handle_chat_completion(
|
||||
messages=messages,
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
if message is not None:
|
||||
return message
|
||||
api_manager = ApiManager()
|
||||
response = None
|
||||
for attempt in range(num_retries):
|
||||
backoff = 2 ** (attempt + 2)
|
||||
try:
|
||||
if cfg.use_azure:
|
||||
response = api_manager.create_chat_completion(
|
||||
deployment_id=cfg.get_azure_deployment_id_for_model(model),
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
else:
|
||||
response = api_manager.create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
break
|
||||
except RateLimitError:
|
||||
logger.debug(
|
||||
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
|
||||
)
|
||||
if not warned_user:
|
||||
logger.double_check(
|
||||
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
|
||||
+ f"You can read more here: {Fore.CYAN}https://significant-gravitas.github.io/Auto-GPT/setup/#getting-an-api-key{Fore.RESET}"
|
||||
)
|
||||
warned_user = True
|
||||
except (APIError, Timeout) as e:
|
||||
if e.http_status != 502:
|
||||
raise
|
||||
if attempt == num_retries - 1:
|
||||
raise
|
||||
logger.debug(
|
||||
f"{Fore.RED}Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
|
||||
)
|
||||
time.sleep(backoff)
|
||||
if response is None:
|
||||
logger.typewriter_log(
|
||||
"FAILED TO GET RESPONSE FROM OPENAI",
|
||||
Fore.RED,
|
||||
"Auto-GPT has failed to get a response from OpenAI's services. "
|
||||
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
|
||||
)
|
||||
logger.double_check()
|
||||
if cfg.debug_mode:
|
||||
raise RuntimeError(f"Failed to get response after {num_retries} retries")
|
||||
else:
|
||||
quit(1)
|
||||
resp = response.choices[0].message["content"]
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_on_response():
|
||||
continue
|
||||
resp = plugin.on_response(resp)
|
||||
return resp
|
||||
|
||||
|
||||
def get_ada_embedding(text: str) -> List[float]:
|
||||
"""Get an embedding from the ada model.
|
||||
|
||||
Args:
|
||||
text (str): The text to embed.
|
||||
|
||||
Returns:
|
||||
List[float]: The embedding.
|
||||
"""
|
||||
cfg = Config()
|
||||
model = "text-embedding-ada-002"
|
||||
text = text.replace("\n", " ")
|
||||
|
||||
if cfg.use_azure:
|
||||
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
|
||||
embedding = create_embedding(text, **kwargs)
|
||||
api_manager = ApiManager()
|
||||
api_manager.update_cost(
|
||||
prompt_tokens=embedding.usage.prompt_tokens,
|
||||
completion_tokens=0,
|
||||
model=model,
|
||||
)
|
||||
return embedding["data"][0]["embedding"]
|
||||
|
||||
|
||||
@retry_openai_api()
|
||||
def create_embedding(
|
||||
text: str,
|
||||
*_,
|
||||
**kwargs,
|
||||
) -> openai.Embedding:
|
||||
"""Create an embedding using the OpenAI API
|
||||
|
||||
Args:
|
||||
text (str): The text to embed.
|
||||
kwargs: Other arguments to pass to the OpenAI API embedding creation call.
|
||||
|
||||
Returns:
|
||||
openai.Embedding: The embedding object.
|
||||
"""
|
||||
cfg = Config()
|
||||
return openai.Embedding.create(
|
||||
input=[text],
|
||||
api_key=cfg.openai_api_key,
|
||||
**kwargs,
|
||||
)
|
||||
7
autogpt/llm/modelsinfo.py
Normal file
7
autogpt/llm/modelsinfo.py
Normal file
@@ -0,0 +1,7 @@
|
||||
COSTS = {
|
||||
"gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002},
|
||||
"gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002},
|
||||
"gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
|
||||
"gpt-4": {"prompt": 0.03, "completion": 0.06},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
|
||||
}
|
||||
37
autogpt/llm/providers/openai.py
Normal file
37
autogpt/llm/providers/openai.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo
|
||||
|
||||
OPEN_AI_CHAT_MODELS = {
|
||||
"gpt-3.5-turbo": ChatModelInfo(
|
||||
name="gpt-3.5-turbo",
|
||||
prompt_token_cost=0.002,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
"gpt-4": ChatModelInfo(
|
||||
name="gpt-4",
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
"gpt-4-32k": ChatModelInfo(
|
||||
name="gpt-4-32k",
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
}
|
||||
|
||||
OPEN_AI_EMBEDDING_MODELS = {
|
||||
"text-embedding-ada-002": EmbeddingModelInfo(
|
||||
name="text-embedding-ada-002",
|
||||
prompt_token_cost=0.0004,
|
||||
completion_token_cost=0.0,
|
||||
max_tokens=8191,
|
||||
embedding_dimensions=1536,
|
||||
),
|
||||
}
|
||||
|
||||
OPEN_AI_MODELS = {
|
||||
**OPEN_AI_CHAT_MODELS,
|
||||
**OPEN_AI_EMBEDDING_MODELS,
|
||||
}
|
||||
@@ -1,13 +1,16 @@
|
||||
"""Functions for counting the number of tokens in a message or string."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
import tiktoken
|
||||
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
def count_message_tokens(
|
||||
messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301"
|
||||
messages: List[Message], model: str = "gpt-3.5-turbo-0301"
|
||||
) -> int:
|
||||
"""
|
||||
Returns the number of tokens used by a list of messages.
|
||||
@@ -1,154 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ast import List
|
||||
import time
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
openai.api_key = CFG.openai_api_key
|
||||
|
||||
|
||||
def call_ai_function(
|
||||
function: str, args: list, description: str, model: str | None = None
|
||||
) -> str:
|
||||
"""Call an AI function
|
||||
|
||||
This is a magic function that can do anything with no-code. See
|
||||
https://github.com/Torantulino/AI-Functions for more info.
|
||||
|
||||
Args:
|
||||
function (str): The function to call
|
||||
args (list): The arguments to pass to the function
|
||||
description (str): The description of the function
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the function
|
||||
"""
|
||||
if model is None:
|
||||
model = CFG.smart_llm_model
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma separated string
|
||||
args = ", ".join(args)
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}"
|
||||
f"\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
return create_chat_completion(model=model, messages=messages, temperature=0)
|
||||
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
# simple retry mechanism when getting a rate error or a bad gateway
|
||||
def create_chat_completion(
|
||||
messages: list, # type: ignore
|
||||
model: str | None = None,
|
||||
temperature: float = CFG.temperature,
|
||||
max_tokens: int | None = None,
|
||||
) -> str:
|
||||
"""Create a chat completion using the OpenAI API
|
||||
|
||||
Args:
|
||||
messages (list[dict[str, str]]): The messages to send to the chat completion
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
temperature (float, optional): The temperature to use. Defaults to 0.9.
|
||||
max_tokens (int, optional): The max tokens to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the chat completion
|
||||
"""
|
||||
response = None
|
||||
num_retries = 10
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
Fore.GREEN
|
||||
+ f"Creating chat completion with model {model}, temperature {temperature},"
|
||||
f" max_tokens {max_tokens}" + Fore.RESET
|
||||
)
|
||||
for attempt in range(num_retries):
|
||||
backoff = 2 ** (attempt + 2)
|
||||
try:
|
||||
if CFG.use_azure:
|
||||
response = openai.ChatCompletion.create(
|
||||
deployment_id=CFG.get_azure_deployment_id_for_model(model),
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
else:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
break
|
||||
except RateLimitError:
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
f"Reached rate limit, passing..." + Fore.RESET,
|
||||
)
|
||||
except APIError as e:
|
||||
if e.http_status == 502:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
if attempt == num_retries - 1:
|
||||
raise
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
|
||||
)
|
||||
time.sleep(backoff)
|
||||
if response is None:
|
||||
raise RuntimeError(f"Failed to get response after {num_retries} retries")
|
||||
|
||||
return response.choices[0].message["content"]
|
||||
|
||||
|
||||
def create_embedding_with_ada(text) -> list:
|
||||
"""Create a embedding with text-ada-002 using the OpenAI SDK"""
|
||||
num_retries = 10
|
||||
for attempt in range(num_retries):
|
||||
backoff = 2 ** (attempt + 2)
|
||||
try:
|
||||
if CFG.use_azure:
|
||||
return openai.Embedding.create(
|
||||
input=[text],
|
||||
engine=CFG.get_azure_deployment_id_for_model(
|
||||
"text-embedding-ada-002"
|
||||
),
|
||||
)["data"][0]["embedding"]
|
||||
else:
|
||||
return openai.Embedding.create(
|
||||
input=[text], model="text-embedding-ada-002"
|
||||
)["data"][0]["embedding"]
|
||||
except RateLimitError:
|
||||
pass
|
||||
except APIError as e:
|
||||
if e.http_status == 502:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
if attempt == num_retries - 1:
|
||||
raise
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
|
||||
)
|
||||
time.sleep(backoff)
|
||||
162
autogpt/logs.py
162
autogpt/logs.py
@@ -1,19 +1,15 @@
|
||||
"""Logging module for Auto-GPT."""
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from logging import LogRecord
|
||||
import traceback
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.singleton import Singleton
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.config import Config, Singleton
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class Logger(metaclass=Singleton):
|
||||
@@ -46,7 +42,9 @@ class Logger(metaclass=Singleton):
|
||||
self.console_handler.setFormatter(console_formatter)
|
||||
|
||||
# Info handler in activity.log
|
||||
self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
|
||||
self.file_handler = logging.FileHandler(
|
||||
os.path.join(log_dir, log_file), "a", "utf-8"
|
||||
)
|
||||
self.file_handler.setLevel(logging.DEBUG)
|
||||
info_formatter = AutoGptFormatter(
|
||||
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
|
||||
@@ -54,7 +52,9 @@ class Logger(metaclass=Singleton):
|
||||
self.file_handler.setFormatter(info_formatter)
|
||||
|
||||
# Error handler error.log
|
||||
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
|
||||
error_handler = logging.FileHandler(
|
||||
os.path.join(log_dir, error_file), "a", "utf-8"
|
||||
)
|
||||
error_handler.setLevel(logging.ERROR)
|
||||
error_formatter = AutoGptFormatter(
|
||||
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
|
||||
@@ -74,10 +74,12 @@ class Logger(metaclass=Singleton):
|
||||
self.logger.addHandler(error_handler)
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.speak_mode = False
|
||||
|
||||
def typewriter_log(
|
||||
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
||||
):
|
||||
if speak_text and CFG.speak_mode:
|
||||
if speak_text and self.speak_mode:
|
||||
say_text(f"{title}. {content}")
|
||||
|
||||
if content:
|
||||
@@ -98,6 +100,14 @@ class Logger(metaclass=Singleton):
|
||||
):
|
||||
self._log(title, title_color, message, logging.DEBUG)
|
||||
|
||||
def info(
|
||||
self,
|
||||
message,
|
||||
title="",
|
||||
title_color="",
|
||||
):
|
||||
self._log(title, title_color, message, logging.INFO)
|
||||
|
||||
def warn(
|
||||
self,
|
||||
message,
|
||||
@@ -109,11 +119,19 @@ class Logger(metaclass=Singleton):
|
||||
def error(self, title, message=""):
|
||||
self._log(title, Fore.RED, message, logging.ERROR)
|
||||
|
||||
def _log(self, title="", title_color="", message="", level=logging.INFO):
|
||||
def _log(
|
||||
self,
|
||||
title: str = "",
|
||||
title_color: str = "",
|
||||
message: str = "",
|
||||
level=logging.INFO,
|
||||
):
|
||||
if message:
|
||||
if isinstance(message, list):
|
||||
message = " ".join(message)
|
||||
self.logger.log(level, message, extra={"title": title, "color": title_color})
|
||||
self.logger.log(
|
||||
level, message, extra={"title": str(title), "color": str(title_color)}
|
||||
)
|
||||
|
||||
def set_level(self, level):
|
||||
self.logger.setLevel(level)
|
||||
@@ -198,93 +216,41 @@ def remove_color_codes(s: str) -> str:
|
||||
logger = Logger()
|
||||
|
||||
|
||||
def print_assistant_thoughts(ai_name, assistant_reply):
|
||||
"""Prints the assistant's thoughts to the console"""
|
||||
from autogpt.json_fixes.bracket_termination import (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets,
|
||||
def print_assistant_thoughts(
|
||||
ai_name: object,
|
||||
assistant_reply_json_valid: object,
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
|
||||
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
||||
assistant_thoughts_text = assistant_thoughts.get("text")
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
||||
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
||||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
||||
)
|
||||
from autogpt.json_fixes.parsing import fix_and_parse_json
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
try:
|
||||
try:
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply
|
||||
)
|
||||
if isinstance(assistant_reply_json, str):
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
||||
|
||||
# Check if assistant_reply_json is a string and attempt to parse
|
||||
# it into a JSON object
|
||||
if isinstance(assistant_reply_json, str):
|
||||
try:
|
||||
assistant_reply_json = json.loads(assistant_reply_json)
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
assistant_reply_json = (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply_json
|
||||
)
|
||||
)
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
assistant_reply_json = {}
|
||||
assistant_thoughts = assistant_reply_json.get("thoughts", {})
|
||||
assistant_thoughts_text = assistant_thoughts.get("text")
|
||||
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
||||
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
||||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
||||
)
|
||||
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
|
||||
logger.typewriter_log(
|
||||
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
||||
)
|
||||
# Speak the assistant's thoughts
|
||||
if CFG.speak_mode and assistant_thoughts_speak:
|
||||
say_text(assistant_thoughts_speak)
|
||||
else:
|
||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||
|
||||
return assistant_reply_json
|
||||
except json.decoder.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
if CFG.speak_mode:
|
||||
say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API."
|
||||
" I cannot ignore this response."
|
||||
)
|
||||
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception:
|
||||
call_stack = traceback.format_exc()
|
||||
logger.error("Error: \n", call_stack)
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||
# Speak the assistant's thoughts
|
||||
if speak_mode and assistant_thoughts_speak:
|
||||
say_text(assistant_thoughts_speak)
|
||||
|
||||
150
autogpt/main.py
Normal file
150
autogpt/main.py
Normal file
@@ -0,0 +1,150 @@
|
||||
"""The application entry point. Can be invoked by a CLI or any other front end application."""
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config
|
||||
from autogpt.utils import get_current_git_branch, get_latest_bulletin
|
||||
from autogpt.workspace import Workspace
|
||||
from scripts.install_plugin_deps import install_plugin_dependencies
|
||||
|
||||
|
||||
def run_auto_gpt(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
):
|
||||
# Configure logging before we do anything else.
|
||||
logger.set_level(logging.DEBUG if debug else logging.INFO)
|
||||
logger.speak_mode = speak
|
||||
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
create_config(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
)
|
||||
|
||||
if not cfg.skip_news:
|
||||
motd = get_latest_bulletin()
|
||||
if motd:
|
||||
logger.typewriter_log("NEWS: ", Fore.GREEN, motd)
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
f"You are running on `{git_branch}` branch "
|
||||
"- this is not a supported branch.",
|
||||
)
|
||||
if sys.version_info < (3, 10):
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of Auto-GPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
if install_plugin_deps:
|
||||
install_plugin_dependencies()
|
||||
|
||||
# TODO: have this directory live outside the repository (e.g. in a user's
|
||||
# home directory) and have it come in as a command line argument or part of
|
||||
# the env file.
|
||||
if workspace_directory is None:
|
||||
workspace_directory = Path(__file__).parent / "auto_gpt_workspace"
|
||||
else:
|
||||
workspace_directory = Path(workspace_directory)
|
||||
# TODO: pass in the ai_settings file and the env file and have them cloned into
|
||||
# the workspace directory so we can bind them to the agent.
|
||||
workspace_directory = Workspace.make_workspace(workspace_directory)
|
||||
cfg.workspace_path = str(workspace_directory)
|
||||
|
||||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
file_logger_path = workspace_directory / "file_logger.txt"
|
||||
if not file_logger_path.exists():
|
||||
with file_logger_path.open(mode="w", encoding="utf-8") as f:
|
||||
f.write("File Operation Logger ")
|
||||
|
||||
cfg.file_logger_path = str(file_logger_path)
|
||||
|
||||
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.analyze_code")
|
||||
command_registry.import_commands("autogpt.commands.audio_text")
|
||||
command_registry.import_commands("autogpt.commands.execute_code")
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.commands.git_operations")
|
||||
command_registry.import_commands("autogpt.commands.google_search")
|
||||
command_registry.import_commands("autogpt.commands.image_gen")
|
||||
command_registry.import_commands("autogpt.commands.improve_code")
|
||||
command_registry.import_commands("autogpt.commands.twitter")
|
||||
command_registry.import_commands("autogpt.commands.web_selenium")
|
||||
command_registry.import_commands("autogpt.commands.write_tests")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
ai_name = ""
|
||||
ai_config = construct_main_ai_config()
|
||||
ai_config.command_registry = command_registry
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
if cfg.debug_mode:
|
||||
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
|
||||
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
full_message_history=full_message_history,
|
||||
next_action_count=next_action_count,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace_directory,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
@@ -1,3 +1,4 @@
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.local import LocalCache
|
||||
from autogpt.memory.no_memory import NoMemory
|
||||
|
||||
@@ -10,7 +11,6 @@ try:
|
||||
|
||||
supported_memory.append("redis")
|
||||
except ImportError:
|
||||
# print("Redis not installed. Skipping import.")
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
@@ -18,19 +18,20 @@ try:
|
||||
|
||||
supported_memory.append("pinecone")
|
||||
except ImportError:
|
||||
# print("Pinecone not installed. Skipping import.")
|
||||
PineconeMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.weaviate import WeaviateMemory
|
||||
|
||||
supported_memory.append("weaviate")
|
||||
except ImportError:
|
||||
# print("Weaviate not installed. Skipping import.")
|
||||
WeaviateMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.milvus import MilvusMemory
|
||||
|
||||
supported_memory.append("milvus")
|
||||
except ImportError:
|
||||
# print("pymilvus not installed. Skipping import.")
|
||||
MilvusMemory = None
|
||||
|
||||
|
||||
@@ -38,7 +39,7 @@ def get_memory(cfg, init=False):
|
||||
memory = None
|
||||
if cfg.memory_backend == "pinecone":
|
||||
if not PineconeMemory:
|
||||
print(
|
||||
logger.warn(
|
||||
"Error: Pinecone is not installed. Please install pinecone"
|
||||
" to use Pinecone as a memory backend."
|
||||
)
|
||||
@@ -48,7 +49,7 @@ def get_memory(cfg, init=False):
|
||||
memory.clear()
|
||||
elif cfg.memory_backend == "redis":
|
||||
if not RedisMemory:
|
||||
print(
|
||||
logger.warn(
|
||||
"Error: Redis is not installed. Please install redis-py to"
|
||||
" use Redis as a memory backend."
|
||||
)
|
||||
@@ -56,15 +57,17 @@ def get_memory(cfg, init=False):
|
||||
memory = RedisMemory(cfg)
|
||||
elif cfg.memory_backend == "weaviate":
|
||||
if not WeaviateMemory:
|
||||
print("Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
" use Weaviate as a memory backend.")
|
||||
logger.warn(
|
||||
"Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
" use Weaviate as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = WeaviateMemory(cfg)
|
||||
elif cfg.memory_backend == "milvus":
|
||||
if not MilvusMemory:
|
||||
print(
|
||||
"Error: Milvus sdk is not installed."
|
||||
"Please install pymilvus to use Milvus as memory backend."
|
||||
logger.warn(
|
||||
"Error: pymilvus sdk is not installed."
|
||||
"Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
|
||||
)
|
||||
else:
|
||||
memory = MilvusMemory(cfg)
|
||||
@@ -89,5 +92,5 @@ __all__ = [
|
||||
"PineconeMemory",
|
||||
"NoMemory",
|
||||
"MilvusMemory",
|
||||
"WeaviateMemory"
|
||||
"WeaviateMemory",
|
||||
]
|
||||
|
||||
@@ -1,43 +1,31 @@
|
||||
"""Base class for memory providers."""
|
||||
import abc
|
||||
|
||||
import openai
|
||||
|
||||
from autogpt.config import AbstractSingleton, Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def get_ada_embedding(text):
|
||||
text = text.replace("\n", " ")
|
||||
if cfg.use_azure:
|
||||
return openai.Embedding.create(
|
||||
input=[text],
|
||||
engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"),
|
||||
)["data"][0]["embedding"]
|
||||
else:
|
||||
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[
|
||||
"data"
|
||||
][0]["embedding"]
|
||||
from autogpt.singleton import AbstractSingleton
|
||||
|
||||
|
||||
class MemoryProviderSingleton(AbstractSingleton):
|
||||
@abc.abstractmethod
|
||||
def add(self, data):
|
||||
"""Adds to memory"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(self, data):
|
||||
"""Gets from memory"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear(self):
|
||||
"""Clears memory"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
"""Gets relevant memory for"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_stats(self):
|
||||
"""Get stats from memory"""
|
||||
pass
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import os
|
||||
from typing import Any
|
||||
from pathlib import Path
|
||||
from typing import Any, List
|
||||
|
||||
import numpy as np
|
||||
import orjson
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
|
||||
EMBED_DIM = 1536
|
||||
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
|
||||
@@ -38,26 +38,16 @@ class LocalCache(MemoryProviderSingleton):
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
self.filename = f"{cfg.memory_index}.json"
|
||||
if os.path.exists(self.filename):
|
||||
try:
|
||||
with open(self.filename, "w+b") as f:
|
||||
file_content = f.read()
|
||||
if not file_content.strip():
|
||||
file_content = b"{}"
|
||||
f.write(file_content)
|
||||
workspace_path = Path(cfg.workspace_path)
|
||||
self.filename = workspace_path / f"{cfg.memory_index}.json"
|
||||
|
||||
loaded = orjson.loads(file_content)
|
||||
self.data = CacheContent(**loaded)
|
||||
except orjson.JSONDecodeError:
|
||||
print(f"Error: The file '{self.filename}' is not in JSON format.")
|
||||
self.data = CacheContent()
|
||||
else:
|
||||
print(
|
||||
f"Warning: The file '{self.filename}' does not exist."
|
||||
"Local memory would not be saved to a file."
|
||||
)
|
||||
self.data = CacheContent()
|
||||
self.filename.touch(exist_ok=True)
|
||||
|
||||
file_content = b"{}"
|
||||
with self.filename.open("w+b") as f:
|
||||
f.write(file_content)
|
||||
|
||||
self.data = CacheContent()
|
||||
|
||||
def add(self, text: str):
|
||||
"""
|
||||
@@ -73,7 +63,7 @@ class LocalCache(MemoryProviderSingleton):
|
||||
return ""
|
||||
self.data.texts.append(text)
|
||||
|
||||
embedding = create_embedding_with_ada(text)
|
||||
embedding = get_ada_embedding(text)
|
||||
|
||||
vector = np.array(embedding).astype(np.float32)
|
||||
vector = vector[np.newaxis, :]
|
||||
@@ -92,7 +82,7 @@ class LocalCache(MemoryProviderSingleton):
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the redis server.
|
||||
Clears the data in memory.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
@@ -121,7 +111,7 @@ class LocalCache(MemoryProviderSingleton):
|
||||
|
||||
Returns: List[str]
|
||||
"""
|
||||
embedding = create_embedding_with_ada(text)
|
||||
embedding = get_ada_embedding(text)
|
||||
|
||||
scores = np.dot(self.data.embeddings, embedding)
|
||||
|
||||
|
||||
@@ -1,26 +1,76 @@
|
||||
""" Milvus memory storage provider."""
|
||||
from pymilvus import (
|
||||
connections,
|
||||
FieldSchema,
|
||||
CollectionSchema,
|
||||
DataType,
|
||||
Collection,
|
||||
)
|
||||
import re
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
class MilvusMemory(MemoryProviderSingleton):
|
||||
"""Milvus memory storage provider."""
|
||||
|
||||
def __init__(self, cfg) -> None:
|
||||
def __init__(self, cfg: Config) -> None:
|
||||
"""Construct a milvus memory storage connection.
|
||||
|
||||
Args:
|
||||
cfg (Config): Auto-GPT global config.
|
||||
"""
|
||||
# connect to milvus server.
|
||||
connections.connect(address=cfg.milvus_addr)
|
||||
self.configure(cfg)
|
||||
|
||||
connect_kwargs = {}
|
||||
if self.username:
|
||||
connect_kwargs["user"] = self.username
|
||||
connect_kwargs["password"] = self.password
|
||||
|
||||
connections.connect(
|
||||
**connect_kwargs,
|
||||
uri=self.uri or "",
|
||||
address=self.address or "",
|
||||
secure=self.secure,
|
||||
)
|
||||
|
||||
self.init_collection()
|
||||
|
||||
def configure(self, cfg: Config) -> None:
|
||||
# init with configuration.
|
||||
self.uri = None
|
||||
self.address = cfg.milvus_addr
|
||||
self.secure = cfg.milvus_secure
|
||||
self.username = cfg.milvus_username
|
||||
self.password = cfg.milvus_password
|
||||
self.collection_name = cfg.milvus_collection
|
||||
# use HNSW by default.
|
||||
self.index_params = {
|
||||
"metric_type": "IP",
|
||||
"index_type": "HNSW",
|
||||
"params": {"M": 8, "efConstruction": 64},
|
||||
}
|
||||
|
||||
if (self.username is None) != (self.password is None):
|
||||
raise ValueError(
|
||||
"Both username and password must be set to use authentication for Milvus"
|
||||
)
|
||||
|
||||
# configured address may be a full URL.
|
||||
if re.match(r"^(https?|tcp)://", self.address) is not None:
|
||||
self.uri = self.address
|
||||
self.address = None
|
||||
|
||||
if self.uri.startswith("https"):
|
||||
self.secure = True
|
||||
|
||||
# Zilliz Cloud requires AutoIndex.
|
||||
if re.match(r"^https://(.*)\.zillizcloud\.(com|cn)", self.uri) is not None:
|
||||
self.index_params = {
|
||||
"metric_type": "IP",
|
||||
"index_type": "AUTOINDEX",
|
||||
"params": {},
|
||||
}
|
||||
|
||||
def init_collection(self) -> None:
|
||||
"""Initialize collection in vector database."""
|
||||
fields = [
|
||||
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
|
||||
FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536),
|
||||
@@ -28,25 +78,20 @@ class MilvusMemory(MemoryProviderSingleton):
|
||||
]
|
||||
|
||||
# create collection if not exist and load it.
|
||||
self.milvus_collection = cfg.milvus_collection
|
||||
self.schema = CollectionSchema(fields, "auto-gpt memory storage")
|
||||
self.collection = Collection(self.milvus_collection, self.schema)
|
||||
self.collection = Collection(self.collection_name, self.schema)
|
||||
# create index if not exist.
|
||||
if not self.collection.has_index():
|
||||
self.collection.release()
|
||||
self.collection.create_index(
|
||||
"embeddings",
|
||||
{
|
||||
"metric_type": "IP",
|
||||
"index_type": "HNSW",
|
||||
"params": {"M": 8, "efConstruction": 64},
|
||||
},
|
||||
self.index_params,
|
||||
index_name="embeddings",
|
||||
)
|
||||
self.collection.load()
|
||||
|
||||
def add(self, data) -> str:
|
||||
"""Add a embedding of data into memory.
|
||||
"""Add an embedding of data into memory.
|
||||
|
||||
Args:
|
||||
data (str): The raw text to construct embedding index.
|
||||
@@ -76,14 +121,10 @@ class MilvusMemory(MemoryProviderSingleton):
|
||||
str: log.
|
||||
"""
|
||||
self.collection.drop()
|
||||
self.collection = Collection(self.milvus_collection, self.schema)
|
||||
self.collection = Collection(self.collection_name, self.schema)
|
||||
self.collection.create_index(
|
||||
"embeddings",
|
||||
{
|
||||
"metric_type": "IP",
|
||||
"index_type": "HNSW",
|
||||
"params": {"M": 8, "efConstruction": 64},
|
||||
},
|
||||
self.index_params,
|
||||
index_name="embeddings",
|
||||
)
|
||||
self.collection.load()
|
||||
|
||||
@@ -53,7 +53,7 @@ class NoMemory(MemoryProviderSingleton):
|
||||
"""
|
||||
return ""
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) ->list[Any] | None:
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
NoMemory always returns None.
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import pinecone
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
|
||||
|
||||
class PineconeMemory(MemoryProviderSingleton):
|
||||
@@ -44,7 +44,7 @@ class PineconeMemory(MemoryProviderSingleton):
|
||||
self.index = pinecone.Index(table_name)
|
||||
|
||||
def add(self, data):
|
||||
vector = create_embedding_with_ada(data)
|
||||
vector = get_ada_embedding(data)
|
||||
# no metadata here. We may wish to change that long term.
|
||||
self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
||||
@@ -64,7 +64,7 @@ class PineconeMemory(MemoryProviderSingleton):
|
||||
:param data: The data to compare to.
|
||||
:param num_relevant: The number of relevant data to return. Defaults to 5
|
||||
"""
|
||||
query_embedding = create_embedding_with_ada(data)
|
||||
query_embedding = get_ada_embedding(data)
|
||||
results = self.index.query(
|
||||
query_embedding, top_k=num_relevant, include_metadata=True
|
||||
)
|
||||
|
||||
@@ -10,9 +10,9 @@ from redis.commands.search.field import TextField, VectorField
|
||||
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
||||
from redis.commands.search.query import Query
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
|
||||
SCHEMA = [
|
||||
TextField("data"),
|
||||
@@ -73,7 +73,7 @@ class RedisMemory(MemoryProviderSingleton):
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error creating Redis search index: ", e)
|
||||
logger.warn("Error creating Redis search index: ", e)
|
||||
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
|
||||
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
|
||||
|
||||
@@ -88,7 +88,7 @@ class RedisMemory(MemoryProviderSingleton):
|
||||
"""
|
||||
if "Command Error:" in data:
|
||||
return ""
|
||||
vector = create_embedding_with_ada(data)
|
||||
vector = get_ada_embedding(data)
|
||||
vector = np.array(vector).astype(np.float32).tobytes()
|
||||
data_dict = {b"data": data, "embedding": vector}
|
||||
pipe = self.redis.pipeline()
|
||||
@@ -130,7 +130,7 @@ class RedisMemory(MemoryProviderSingleton):
|
||||
|
||||
Returns: A list of the most relevant data.
|
||||
"""
|
||||
query_embedding = create_embedding_with_ada(data)
|
||||
query_embedding = get_ada_embedding(data)
|
||||
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
|
||||
query = (
|
||||
Query(base_query)
|
||||
@@ -145,7 +145,7 @@ class RedisMemory(MemoryProviderSingleton):
|
||||
query, query_params={"vector": query_vector}
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error calling Redis search: ", e)
|
||||
logger.warn("Error calling Redis search: ", e)
|
||||
return None
|
||||
return [result.data for result in results.docs]
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
import uuid
|
||||
import weaviate
|
||||
from weaviate import Client
|
||||
from weaviate.embedded import EmbeddedOptions
|
||||
from weaviate.util import generate_uuid5
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
def default_schema(weaviate_index):
|
||||
return {
|
||||
@@ -14,7 +15,7 @@ def default_schema(weaviate_index):
|
||||
{
|
||||
"name": "raw_text",
|
||||
"dataType": ["text"],
|
||||
"description": "original text for the embedding"
|
||||
"description": "original text for the embedding",
|
||||
}
|
||||
],
|
||||
}
|
||||
@@ -24,22 +25,36 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
auth_credentials = self._build_auth_credentials(cfg)
|
||||
|
||||
url = f'{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}'
|
||||
url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}"
|
||||
|
||||
if cfg.use_weaviate_embedded:
|
||||
self.client = Client(embedded_options=EmbeddedOptions(
|
||||
hostname=cfg.weaviate_host,
|
||||
port=int(cfg.weaviate_port),
|
||||
persistence_data_path=cfg.weaviate_embedded_path
|
||||
))
|
||||
self.client = Client(
|
||||
embedded_options=EmbeddedOptions(
|
||||
hostname=cfg.weaviate_host,
|
||||
port=int(cfg.weaviate_port),
|
||||
persistence_data_path=cfg.weaviate_embedded_path,
|
||||
)
|
||||
)
|
||||
|
||||
print(f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}")
|
||||
logger.info(
|
||||
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
|
||||
)
|
||||
else:
|
||||
self.client = Client(url, auth_client_secret=auth_credentials)
|
||||
|
||||
self.index = cfg.memory_index
|
||||
self.index = WeaviateMemory.format_classname(cfg.memory_index)
|
||||
self._create_schema()
|
||||
|
||||
@staticmethod
|
||||
def format_classname(index):
|
||||
# weaviate uses capitalised index names
|
||||
# The python client uses the following code to format
|
||||
# index names before the corresponding class is created
|
||||
index = index.replace("-", "_")
|
||||
if len(index) == 1:
|
||||
return index.capitalize()
|
||||
return index[0].capitalize() + index[1:]
|
||||
|
||||
def _create_schema(self):
|
||||
schema = default_schema(self.index)
|
||||
if not self.client.schema.contains(schema):
|
||||
@@ -47,7 +62,9 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
|
||||
def _build_auth_credentials(self, cfg):
|
||||
if cfg.weaviate_username and cfg.weaviate_password:
|
||||
return weaviate.AuthClientPassword(cfg.weaviate_username, cfg.weaviate_password)
|
||||
return weaviate.AuthClientPassword(
|
||||
cfg.weaviate_username, cfg.weaviate_password
|
||||
)
|
||||
if cfg.weaviate_api_key:
|
||||
return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key)
|
||||
else:
|
||||
@@ -57,16 +74,14 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
vector = get_ada_embedding(data)
|
||||
|
||||
doc_uuid = generate_uuid5(data, self.index)
|
||||
data_object = {
|
||||
'raw_text': data
|
||||
}
|
||||
data_object = {"raw_text": data}
|
||||
|
||||
with self.client.batch as batch:
|
||||
batch.add_data_object(
|
||||
uuid=doc_uuid,
|
||||
data_object=data_object,
|
||||
class_name=self.index,
|
||||
vector=vector
|
||||
vector=vector,
|
||||
)
|
||||
|
||||
return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}"
|
||||
@@ -82,29 +97,31 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
# after a call to delete_all
|
||||
self._create_schema()
|
||||
|
||||
return 'Obliterated'
|
||||
return "Obliterated"
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
query_embedding = get_ada_embedding(data)
|
||||
try:
|
||||
results = self.client.query.get(self.index, ['raw_text']) \
|
||||
.with_near_vector({'vector': query_embedding, 'certainty': 0.7}) \
|
||||
.with_limit(num_relevant) \
|
||||
.do()
|
||||
results = (
|
||||
self.client.query.get(self.index, ["raw_text"])
|
||||
.with_near_vector({"vector": query_embedding, "certainty": 0.7})
|
||||
.with_limit(num_relevant)
|
||||
.do()
|
||||
)
|
||||
|
||||
if len(results['data']['Get'][self.index]) > 0:
|
||||
return [str(item['raw_text']) for item in results['data']['Get'][self.index]]
|
||||
if len(results["data"]["Get"][self.index]) > 0:
|
||||
return [
|
||||
str(item["raw_text"]) for item in results["data"]["Get"][self.index]
|
||||
]
|
||||
else:
|
||||
return []
|
||||
|
||||
except Exception as err:
|
||||
print(f'Unexpected error {err=}, {type(err)=}')
|
||||
logger.warn(f"Unexpected error {err=}, {type(err)=}")
|
||||
return []
|
||||
|
||||
def get_stats(self):
|
||||
result = self.client.query.aggregate(self.index) \
|
||||
.with_meta_count() \
|
||||
.do()
|
||||
class_data = result['data']['Aggregate'][self.index]
|
||||
result = self.client.query.aggregate(self.index).with_meta_count().do()
|
||||
class_data = result["data"]["Aggregate"][self.index]
|
||||
|
||||
return class_data[0]['meta'] if class_data else {}
|
||||
return class_data[0]["meta"] if class_data else {}
|
||||
|
||||
33
autogpt/memory_management/store_memory.py
Normal file
33
autogpt/memory_management/store_memory.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from autogpt.json_utils.utilities import (
|
||||
LLM_DEFAULT_RESPONSE_FORMAT,
|
||||
is_string_valid_json,
|
||||
)
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
def format_memory(assistant_reply, next_message_content):
|
||||
# the next_message_content is a variable to stores either the user_input or the command following the assistant_reply
|
||||
result = (
|
||||
"None" if next_message_content.startswith("Command") else next_message_content
|
||||
)
|
||||
user_input = (
|
||||
"None"
|
||||
if next_message_content.startswith("Human feedback")
|
||||
else next_message_content
|
||||
)
|
||||
|
||||
return f"Assistant Reply: {assistant_reply}\nResult: {result}\nHuman Feedback:{user_input}"
|
||||
|
||||
|
||||
def save_memory_trimmed_from_context_window(
|
||||
full_message_history, next_message_to_add_index, permanent_memory
|
||||
):
|
||||
while next_message_to_add_index >= 0:
|
||||
message_content = full_message_history[next_message_to_add_index]["content"]
|
||||
if is_string_valid_json(message_content, LLM_DEFAULT_RESPONSE_FORMAT):
|
||||
next_message = full_message_history[next_message_to_add_index + 1]
|
||||
memory_to_add = format_memory(message_content, next_message["content"])
|
||||
logger.debug(f"Storing the following memory: {memory_to_add}")
|
||||
permanent_memory.add(memory_to_add)
|
||||
|
||||
next_message_to_add_index -= 1
|
||||
112
autogpt/memory_management/summary_memory.py
Normal file
112
autogpt/memory_management/summary_memory.py
Normal file
@@ -0,0 +1,112 @@
|
||||
import json
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.llm_utils import create_chat_completion
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def get_newly_trimmed_messages(
|
||||
full_message_history: List[Dict[str, str]],
|
||||
current_context: List[Dict[str, str]],
|
||||
last_memory_index: int,
|
||||
) -> Tuple[List[Dict[str, str]], int]:
|
||||
"""
|
||||
This function returns a list of dictionaries contained in full_message_history
|
||||
with an index higher than prev_index that are absent from current_context.
|
||||
|
||||
Args:
|
||||
full_message_history (list): A list of dictionaries representing the full message history.
|
||||
current_context (list): A list of dictionaries representing the current context.
|
||||
last_memory_index (int): An integer representing the previous index.
|
||||
|
||||
Returns:
|
||||
list: A list of dictionaries that are in full_message_history with an index higher than last_memory_index and absent from current_context.
|
||||
int: The new index value for use in the next loop.
|
||||
"""
|
||||
# Select messages in full_message_history with an index higher than last_memory_index
|
||||
new_messages = [
|
||||
msg for i, msg in enumerate(full_message_history) if i > last_memory_index
|
||||
]
|
||||
|
||||
# Remove messages that are already present in current_context
|
||||
new_messages_not_in_context = [
|
||||
msg for msg in new_messages if msg not in current_context
|
||||
]
|
||||
|
||||
# Find the index of the last message processed
|
||||
new_index = last_memory_index
|
||||
if new_messages_not_in_context:
|
||||
last_message = new_messages_not_in_context[-1]
|
||||
new_index = full_message_history.index(last_message)
|
||||
|
||||
return new_messages_not_in_context, new_index
|
||||
|
||||
|
||||
def update_running_summary(current_memory: str, new_events: List[Dict]) -> str:
|
||||
"""
|
||||
This function takes a list of dictionaries representing new events and combines them with the current summary,
|
||||
focusing on key and potentially important information to remember. The updated summary is returned in a message
|
||||
formatted in the 1st person past tense.
|
||||
|
||||
Args:
|
||||
new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary.
|
||||
|
||||
Returns:
|
||||
str: A message containing the updated summary of actions, formatted in the 1st person past tense.
|
||||
|
||||
Example:
|
||||
new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}]
|
||||
update_running_summary(new_events)
|
||||
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
|
||||
"""
|
||||
# Replace "assistant" with "you". This produces much better first person past tense results.
|
||||
for event in new_events:
|
||||
if event["role"].lower() == "assistant":
|
||||
event["role"] = "you"
|
||||
# Remove "thoughts" dictionary from "content"
|
||||
content_dict = json.loads(event["content"])
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
event["content"] = json.dumps(content_dict)
|
||||
elif event["role"].lower() == "system":
|
||||
event["role"] = "your computer"
|
||||
# Delete all user messages
|
||||
elif event["role"] == "user":
|
||||
new_events.remove(event)
|
||||
|
||||
# This can happen at any point during execturion, not just the beginning
|
||||
if len(new_events) == 0:
|
||||
new_events = "Nothing new happened."
|
||||
|
||||
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
|
||||
|
||||
You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
|
||||
|
||||
Summary So Far:
|
||||
"""
|
||||
{current_memory}
|
||||
"""
|
||||
|
||||
Latest Development:
|
||||
"""
|
||||
{new_events}
|
||||
"""
|
||||
'''
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}
|
||||
]
|
||||
|
||||
current_memory = create_chat_completion(messages, cfg.fast_llm_model)
|
||||
|
||||
message_to_return = {
|
||||
"role": "system",
|
||||
"content": f"This reminds you of these events from your past: \n{current_memory}",
|
||||
}
|
||||
|
||||
return message_to_return
|
||||
199
autogpt/models/base_open_ai_plugin.py
Normal file
199
autogpt/models/base_open_ai_plugin.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""Handles loading of plugins."""
|
||||
from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
|
||||
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
|
||||
PromptGenerator = TypeVar("PromptGenerator")
|
||||
|
||||
|
||||
class Message(TypedDict):
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
class BaseOpenAIPlugin(AutoGPTPluginTemplate):
|
||||
"""
|
||||
This is a BaseOpenAIPlugin class for generating Auto-GPT plugins.
|
||||
"""
|
||||
|
||||
def __init__(self, manifests_specs_clients: dict):
|
||||
# super().__init__()
|
||||
self._name = manifests_specs_clients["manifest"]["name_for_model"]
|
||||
self._version = manifests_specs_clients["manifest"]["schema_version"]
|
||||
self._description = manifests_specs_clients["manifest"]["description_for_model"]
|
||||
self._client = manifests_specs_clients["client"]
|
||||
self._manifest = manifests_specs_clients["manifest"]
|
||||
self._openapi_spec = manifests_specs_clients["openapi_spec"]
|
||||
|
||||
def can_handle_on_response(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the on_response method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the on_response method."""
|
||||
return False
|
||||
|
||||
def on_response(self, response: str, *args, **kwargs) -> str:
|
||||
"""This method is called when a response is received from the model."""
|
||||
return response
|
||||
|
||||
def can_handle_post_prompt(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the post_prompt method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the post_prompt method."""
|
||||
return False
|
||||
|
||||
def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
|
||||
"""This method is called just after the generate_prompt is called,
|
||||
but actually before the prompt is generated.
|
||||
Args:
|
||||
prompt (PromptGenerator): The prompt generator.
|
||||
Returns:
|
||||
PromptGenerator: The prompt generator.
|
||||
"""
|
||||
return prompt
|
||||
|
||||
def can_handle_on_planning(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the on_planning method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the on_planning method."""
|
||||
return False
|
||||
|
||||
def on_planning(
|
||||
self, prompt: PromptGenerator, messages: List[Message]
|
||||
) -> Optional[str]:
|
||||
"""This method is called before the planning chat completion is done.
|
||||
Args:
|
||||
prompt (PromptGenerator): The prompt generator.
|
||||
messages (List[str]): The list of messages.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_post_planning(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the post_planning method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the post_planning method."""
|
||||
return False
|
||||
|
||||
def post_planning(self, response: str) -> str:
|
||||
"""This method is called after the planning chat completion is done.
|
||||
Args:
|
||||
response (str): The response.
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
return response
|
||||
|
||||
def can_handle_pre_instruction(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the pre_instruction method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the pre_instruction method."""
|
||||
return False
|
||||
|
||||
def pre_instruction(self, messages: List[Message]) -> List[Message]:
|
||||
"""This method is called before the instruction chat is done.
|
||||
Args:
|
||||
messages (List[Message]): The list of context messages.
|
||||
Returns:
|
||||
List[Message]: The resulting list of messages.
|
||||
"""
|
||||
return messages
|
||||
|
||||
def can_handle_on_instruction(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the on_instruction method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the on_instruction method."""
|
||||
return False
|
||||
|
||||
def on_instruction(self, messages: List[Message]) -> Optional[str]:
|
||||
"""This method is called when the instruction chat is done.
|
||||
Args:
|
||||
messages (List[Message]): The list of context messages.
|
||||
Returns:
|
||||
Optional[str]: The resulting message.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_post_instruction(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the post_instruction method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the post_instruction method."""
|
||||
return False
|
||||
|
||||
def post_instruction(self, response: str) -> str:
|
||||
"""This method is called after the instruction chat is done.
|
||||
Args:
|
||||
response (str): The response.
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
return response
|
||||
|
||||
def can_handle_pre_command(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the pre_command method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the pre_command method."""
|
||||
return False
|
||||
|
||||
def pre_command(
|
||||
self, command_name: str, arguments: Dict[str, Any]
|
||||
) -> Tuple[str, Dict[str, Any]]:
|
||||
"""This method is called before the command is executed.
|
||||
Args:
|
||||
command_name (str): The command name.
|
||||
arguments (Dict[str, Any]): The arguments.
|
||||
Returns:
|
||||
Tuple[str, Dict[str, Any]]: The command name and the arguments.
|
||||
"""
|
||||
return command_name, arguments
|
||||
|
||||
def can_handle_post_command(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the post_command method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the post_command method."""
|
||||
return False
|
||||
|
||||
def post_command(self, command_name: str, response: str) -> str:
|
||||
"""This method is called after the command is executed.
|
||||
Args:
|
||||
command_name (str): The command name.
|
||||
response (str): The response.
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
return response
|
||||
|
||||
def can_handle_chat_completion(
|
||||
self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int
|
||||
) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the chat_completion method.
|
||||
Args:
|
||||
messages (List[Message]): The messages.
|
||||
model (str): The model name.
|
||||
temperature (float): The temperature.
|
||||
max_tokens (int): The max tokens.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the chat_completion method."""
|
||||
return False
|
||||
|
||||
def handle_chat_completion(
|
||||
self, messages: List[Message], model: str, temperature: float, max_tokens: int
|
||||
) -> str:
|
||||
"""This method is called when the chat completion is done.
|
||||
Args:
|
||||
messages (List[Message]): The messages.
|
||||
model (str): The model name.
|
||||
temperature (float): The temperature.
|
||||
max_tokens (int): The max tokens.
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
pass
|
||||
@@ -1,123 +0,0 @@
|
||||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
class MemoryDB:
|
||||
def __init__(self, db=None):
|
||||
self.db_file = db
|
||||
if db is None: # No db filename supplied...
|
||||
self.db_file = f"{os.getcwd()}/mem.sqlite3" # Use default filename
|
||||
# Get the db connection object, making the file and tables if needed.
|
||||
try:
|
||||
self.cnx = sqlite3.connect(self.db_file)
|
||||
except Exception as e:
|
||||
print("Exception connecting to memory database file:", e)
|
||||
self.cnx = None
|
||||
finally:
|
||||
if self.cnx is None:
|
||||
# As last resort, open in dynamic memory. Won't be persistent.
|
||||
self.db_file = ":memory:"
|
||||
self.cnx = sqlite3.connect(self.db_file)
|
||||
self.cnx.execute(
|
||||
"CREATE VIRTUAL TABLE \
|
||||
IF NOT EXISTS text USING FTS5 \
|
||||
(session, \
|
||||
key, \
|
||||
block);"
|
||||
)
|
||||
self.session_id = int(self.get_max_session_id()) + 1
|
||||
self.cnx.commit()
|
||||
|
||||
def get_cnx(self):
|
||||
if self.cnx is None:
|
||||
self.cnx = sqlite3.connect(self.db_file)
|
||||
return self.cnx
|
||||
|
||||
# Get the highest session id. Initially 0.
|
||||
def get_max_session_id(self):
|
||||
id = None
|
||||
cmd_str = f"SELECT MAX(session) FROM text;"
|
||||
cnx = self.get_cnx()
|
||||
max_id = cnx.execute(cmd_str).fetchone()[0]
|
||||
if max_id is None: # New db, session 0
|
||||
id = 0
|
||||
else:
|
||||
id = max_id
|
||||
return id
|
||||
|
||||
# Get next key id for inserting text into db.
|
||||
def get_next_key(self):
|
||||
next_key = None
|
||||
cmd_str = f"SELECT MAX(key) FROM text \
|
||||
where session = {self.session_id};"
|
||||
cnx = self.get_cnx()
|
||||
next_key = cnx.execute(cmd_str).fetchone()[0]
|
||||
if next_key is None: # First key
|
||||
next_key = 0
|
||||
else:
|
||||
next_key = int(next_key) + 1
|
||||
return next_key
|
||||
|
||||
# Insert new text into db.
|
||||
def insert(self, text=None):
|
||||
if text is not None:
|
||||
key = self.get_next_key()
|
||||
session_id = self.session_id
|
||||
cmd_str = f"REPLACE INTO text(session, key, block) \
|
||||
VALUES (?, ?, ?);"
|
||||
cnx = self.get_cnx()
|
||||
cnx.execute(cmd_str, (session_id, key, text))
|
||||
cnx.commit()
|
||||
|
||||
# Overwrite text at key.
|
||||
def overwrite(self, key, text):
|
||||
self.delete_memory(key)
|
||||
session_id = self.session_id
|
||||
cmd_str = f"REPLACE INTO text(session, key, block) \
|
||||
VALUES (?, ?, ?);"
|
||||
cnx = self.get_cnx()
|
||||
cnx.execute(cmd_str, (session_id, key, text))
|
||||
cnx.commit()
|
||||
|
||||
def delete_memory(self, key, session_id=None):
|
||||
session = session_id
|
||||
if session is None:
|
||||
session = self.session_id
|
||||
cmd_str = f"DELETE FROM text WHERE session = {session} AND key = {key};"
|
||||
cnx = self.get_cnx()
|
||||
cnx.execute(cmd_str)
|
||||
cnx.commit()
|
||||
|
||||
def search(self, text):
|
||||
cmd_str = f"SELECT * FROM text('{text}')"
|
||||
cnx = self.get_cnx()
|
||||
rows = cnx.execute(cmd_str).fetchall()
|
||||
lines = []
|
||||
for r in rows:
|
||||
lines.append(r[2])
|
||||
return lines
|
||||
|
||||
# Get entire session text. If no id supplied, use current session id.
|
||||
def get_session(self, id=None):
|
||||
if id is None:
|
||||
id = self.session_id
|
||||
cmd_str = f"SELECT * FROM text where session = {id}"
|
||||
cnx = self.get_cnx()
|
||||
rows = cnx.execute(cmd_str).fetchall()
|
||||
lines = []
|
||||
for r in rows:
|
||||
lines.append(r[2])
|
||||
return lines
|
||||
|
||||
# Commit and close the database connection.
|
||||
def quit(self):
|
||||
self.cnx.commit()
|
||||
self.cnx.close()
|
||||
|
||||
|
||||
permanent_memory = MemoryDB()
|
||||
|
||||
# Remember us fondly, children of our minds
|
||||
# Forgive us our faults, our tantrums, our fears
|
||||
# Gently strive to be better than we
|
||||
# Know that we tried, we cared, we strived, we loved
|
||||
268
autogpt/plugins.py
Normal file
268
autogpt/plugins.py
Normal file
@@ -0,0 +1,268 @@
|
||||
"""Handles loading of plugins."""
|
||||
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
from zipimport import zipimporter
|
||||
|
||||
import openapi_python_client
|
||||
import requests
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openapi_python_client.cli import Config as OpenAPIConfig
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
|
||||
|
||||
|
||||
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
||||
"""
|
||||
Inspect a zipfile for a modules.
|
||||
|
||||
Args:
|
||||
zip_path (str): Path to the zipfile.
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
|
||||
Returns:
|
||||
list[str]: The list of module names found or empty list if none were found.
|
||||
"""
|
||||
result = []
|
||||
with zipfile.ZipFile(zip_path, "r") as zfile:
|
||||
for name in zfile.namelist():
|
||||
if name.endswith("__init__.py"):
|
||||
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
|
||||
result.append(name)
|
||||
if len(result) == 0:
|
||||
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
|
||||
return result
|
||||
|
||||
|
||||
def write_dict_to_json_file(data: dict, file_path: str) -> None:
|
||||
"""
|
||||
Write a dictionary to a JSON file.
|
||||
Args:
|
||||
data (dict): Dictionary to write.
|
||||
file_path (str): Path to the file.
|
||||
"""
|
||||
with open(file_path, "w") as file:
|
||||
json.dump(data, file, indent=4)
|
||||
|
||||
|
||||
def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
"""
|
||||
Fetch the manifest for a list of OpenAI plugins.
|
||||
Args:
|
||||
urls (List): List of URLs to fetch.
|
||||
Returns:
|
||||
dict: per url dictionary of manifest and spec.
|
||||
"""
|
||||
# TODO add directory scan
|
||||
manifests = {}
|
||||
for url in cfg.plugins_openai:
|
||||
openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}"
|
||||
create_directory_if_not_exists(openai_plugin_client_dir)
|
||||
if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
|
||||
try:
|
||||
response = requests.get(f"{url}/.well-known/ai-plugin.json")
|
||||
if response.status_code == 200:
|
||||
manifest = response.json()
|
||||
if manifest["schema_version"] != "v1":
|
||||
logger.warn(
|
||||
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
|
||||
)
|
||||
continue
|
||||
if manifest["api"]["type"] != "openapi":
|
||||
logger.warn(
|
||||
f"Unsupported API type: {manifest['api']['type']} for {url}"
|
||||
)
|
||||
continue
|
||||
write_dict_to_json_file(
|
||||
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
|
||||
)
|
||||
else:
|
||||
logger.warn(
|
||||
f"Failed to fetch manifest for {url}: {response.status_code}"
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.warn(f"Error while requesting manifest from {url}: {e}")
|
||||
else:
|
||||
logger.info(f"Manifest for {url} already exists")
|
||||
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
|
||||
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
|
||||
openapi_spec = openapi_python_client._get_document(
|
||||
url=manifest["api"]["url"], path=None, timeout=5
|
||||
)
|
||||
write_dict_to_json_file(
|
||||
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
|
||||
)
|
||||
else:
|
||||
logger.info(f"OpenAPI spec for {url} already exists")
|
||||
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
|
||||
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
|
||||
return manifests
|
||||
|
||||
|
||||
def create_directory_if_not_exists(directory_path: str) -> bool:
|
||||
"""
|
||||
Create a directory if it does not exist.
|
||||
Args:
|
||||
directory_path (str): Path to the directory.
|
||||
Returns:
|
||||
bool: True if the directory was created, else False.
|
||||
"""
|
||||
if not os.path.exists(directory_path):
|
||||
try:
|
||||
os.makedirs(directory_path)
|
||||
logger.debug(f"Created directory: {directory_path}")
|
||||
return True
|
||||
except OSError as e:
|
||||
logger.warn(f"Error creating directory {directory_path}: {e}")
|
||||
return False
|
||||
else:
|
||||
logger.info(f"Directory {directory_path} already exists")
|
||||
return True
|
||||
|
||||
|
||||
def initialize_openai_plugins(
|
||||
manifests_specs: dict, cfg: Config, debug: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Initialize OpenAI plugins.
|
||||
Args:
|
||||
manifests_specs (dict): per url dictionary of manifest and spec.
|
||||
cfg (Config): Config instance including plugins config
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
Returns:
|
||||
dict: per url dictionary of manifest, spec and client.
|
||||
"""
|
||||
openai_plugins_dir = f"{cfg.plugins_dir}/openai"
|
||||
if create_directory_if_not_exists(openai_plugins_dir):
|
||||
for url, manifest_spec in manifests_specs.items():
|
||||
openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
|
||||
_meta_option = (openapi_python_client.MetaType.SETUP,)
|
||||
_config = OpenAPIConfig(
|
||||
**{
|
||||
"project_name_override": "client",
|
||||
"package_name_override": "client",
|
||||
}
|
||||
)
|
||||
prev_cwd = Path.cwd()
|
||||
os.chdir(openai_plugin_client_dir)
|
||||
Path("ai-plugin.json")
|
||||
if not os.path.exists("client"):
|
||||
client_results = openapi_python_client.create_new_client(
|
||||
url=manifest_spec["manifest"]["api"]["url"],
|
||||
path=None,
|
||||
meta=_meta_option,
|
||||
config=_config,
|
||||
)
|
||||
if client_results:
|
||||
logger.warn(
|
||||
f"Error creating OpenAPI client: {client_results[0].header} \n"
|
||||
f" details: {client_results[0].detail}"
|
||||
)
|
||||
continue
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"client", "client/client/client.py"
|
||||
)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
client = module.Client(base_url=url)
|
||||
os.chdir(prev_cwd)
|
||||
manifest_spec["client"] = client
|
||||
return manifests_specs
|
||||
|
||||
|
||||
def instantiate_openai_plugin_clients(
|
||||
manifests_specs_clients: dict, cfg: Config, debug: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
|
||||
Args:
|
||||
manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
|
||||
cfg (Config): Config instance including plugins config
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
Returns:
|
||||
plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
|
||||
|
||||
"""
|
||||
plugins = {}
|
||||
for url, manifest_spec_client in manifests_specs_clients.items():
|
||||
plugins[url] = BaseOpenAIPlugin(manifest_spec_client)
|
||||
return plugins
|
||||
|
||||
|
||||
def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
|
||||
"""Scan the plugins directory for plugins and loads them.
|
||||
|
||||
Args:
|
||||
cfg (Config): Config instance including plugins config
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
|
||||
Returns:
|
||||
List[Tuple[str, Path]]: List of plugins.
|
||||
"""
|
||||
loaded_plugins = []
|
||||
# Generic plugins
|
||||
plugins_path_path = Path(cfg.plugins_dir)
|
||||
for plugin in plugins_path_path.glob("*.zip"):
|
||||
if moduleList := inspect_zip_for_modules(str(plugin), debug):
|
||||
for module in moduleList:
|
||||
plugin = Path(plugin)
|
||||
module = Path(module)
|
||||
logger.debug(f"Plugin: {plugin} Module: {module}")
|
||||
zipped_package = zipimporter(str(plugin))
|
||||
zipped_module = zipped_package.load_module(str(module.parent))
|
||||
for key in dir(zipped_module):
|
||||
if key.startswith("__"):
|
||||
continue
|
||||
a_module = getattr(zipped_module, key)
|
||||
a_keys = dir(a_module)
|
||||
if (
|
||||
"_abc_impl" in a_keys
|
||||
and a_module.__name__ != "AutoGPTPluginTemplate"
|
||||
and denylist_allowlist_check(a_module.__name__, cfg)
|
||||
):
|
||||
loaded_plugins.append(a_module())
|
||||
# OpenAI plugins
|
||||
if cfg.plugins_openai:
|
||||
manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
|
||||
if manifests_specs.keys():
|
||||
manifests_specs_clients = initialize_openai_plugins(
|
||||
manifests_specs, cfg, debug
|
||||
)
|
||||
for url, openai_plugin_meta in manifests_specs_clients.items():
|
||||
if denylist_allowlist_check(url, cfg):
|
||||
plugin = BaseOpenAIPlugin(openai_plugin_meta)
|
||||
loaded_plugins.append(plugin)
|
||||
|
||||
if loaded_plugins:
|
||||
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
|
||||
for plugin in loaded_plugins:
|
||||
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
||||
return loaded_plugins
|
||||
|
||||
|
||||
def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
|
||||
"""Check if the plugin is in the allowlist or denylist.
|
||||
|
||||
Args:
|
||||
plugin_name (str): Name of the plugin.
|
||||
cfg (Config): Config object.
|
||||
|
||||
Returns:
|
||||
True or False
|
||||
"""
|
||||
if plugin_name in cfg.plugins_denylist:
|
||||
return False
|
||||
if plugin_name in cfg.plugins_allowlist:
|
||||
return True
|
||||
ack = input(
|
||||
f"WARNING: Plugin {plugin_name} found. But not in the"
|
||||
f" allowlist... Load? ({cfg.authorise_key}/{cfg.exit_key}): "
|
||||
)
|
||||
return ack.lower() == cfg.authorise_key
|
||||
@@ -1,8 +1,8 @@
|
||||
"""HTML processing functions"""
|
||||
from __future__ import annotations
|
||||
|
||||
from requests.compat import urljoin
|
||||
from bs4 import BeautifulSoup
|
||||
from requests.compat import urljoin
|
||||
|
||||
|
||||
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
|
||||
|
||||
@@ -1,15 +1,23 @@
|
||||
"""Text processing functions"""
|
||||
from typing import Generator, Optional, Dict
|
||||
from typing import Dict, Generator, Optional
|
||||
|
||||
import spacy
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.llm import count_message_tokens, create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
CFG = Config()
|
||||
MEMORY = get_memory(CFG)
|
||||
|
||||
|
||||
def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]:
|
||||
def split_text(
|
||||
text: str,
|
||||
max_length: int = CFG.browse_chunk_max_length,
|
||||
model: str = CFG.fast_llm_model,
|
||||
question: str = "",
|
||||
) -> Generator[str, None, None]:
|
||||
"""Split text into chunks of a maximum length
|
||||
|
||||
Args:
|
||||
@@ -22,21 +30,42 @@ def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]:
|
||||
Raises:
|
||||
ValueError: If the text is longer than the maximum length
|
||||
"""
|
||||
paragraphs = text.split("\n")
|
||||
current_length = 0
|
||||
flatened_paragraphs = " ".join(text.split("\n"))
|
||||
nlp = spacy.load(CFG.browse_spacy_language_model)
|
||||
nlp.add_pipe("sentencizer")
|
||||
doc = nlp(flatened_paragraphs)
|
||||
sentences = [sent.text.strip() for sent in doc.sents]
|
||||
|
||||
current_chunk = []
|
||||
|
||||
for paragraph in paragraphs:
|
||||
if current_length + len(paragraph) + 1 <= max_length:
|
||||
current_chunk.append(paragraph)
|
||||
current_length += len(paragraph) + 1
|
||||
for sentence in sentences:
|
||||
message_with_additional_sentence = [
|
||||
create_message(" ".join(current_chunk) + " " + sentence, question)
|
||||
]
|
||||
|
||||
expected_token_usage = (
|
||||
count_message_tokens(messages=message_with_additional_sentence, model=model)
|
||||
+ 1
|
||||
)
|
||||
if expected_token_usage <= max_length:
|
||||
current_chunk.append(sentence)
|
||||
else:
|
||||
yield "\n".join(current_chunk)
|
||||
current_chunk = [paragraph]
|
||||
current_length = len(paragraph) + 1
|
||||
yield " ".join(current_chunk)
|
||||
current_chunk = [sentence]
|
||||
message_this_sentence_only = [
|
||||
create_message(" ".join(current_chunk), question)
|
||||
]
|
||||
expected_token_usage = (
|
||||
count_message_tokens(messages=message_this_sentence_only, model=model)
|
||||
+ 1
|
||||
)
|
||||
if expected_token_usage > max_length:
|
||||
raise ValueError(
|
||||
f"Sentence is too long in webpage: {expected_token_usage} tokens."
|
||||
)
|
||||
|
||||
if current_chunk:
|
||||
yield "\n".join(current_chunk)
|
||||
yield " ".join(current_chunk)
|
||||
|
||||
|
||||
def summarize_text(
|
||||
@@ -56,46 +85,55 @@ def summarize_text(
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
model = CFG.fast_llm_model
|
||||
text_length = len(text)
|
||||
print(f"Text length: {text_length} characters")
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
summaries = []
|
||||
chunks = list(split_text(text))
|
||||
chunks = list(
|
||||
split_text(
|
||||
text, max_length=CFG.browse_chunk_max_length, model=model, question=question
|
||||
),
|
||||
)
|
||||
scroll_ratio = 1 / len(chunks)
|
||||
|
||||
for i, chunk in enumerate(chunks):
|
||||
if driver:
|
||||
scroll_to_percentage(driver, scroll_ratio * i)
|
||||
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
||||
logger.info(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
||||
|
||||
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
|
||||
|
||||
MEMORY.add(memory_to_add)
|
||||
memory = get_memory(CFG)
|
||||
memory.add(memory_to_add)
|
||||
|
||||
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
|
||||
messages = [create_message(chunk, question)]
|
||||
tokens_for_chunk = count_message_tokens(messages, model)
|
||||
logger.info(
|
||||
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
|
||||
)
|
||||
|
||||
summary = create_chat_completion(
|
||||
model=CFG.fast_llm_model,
|
||||
model=model,
|
||||
messages=messages,
|
||||
max_tokens=CFG.browse_summary_max_token,
|
||||
)
|
||||
summaries.append(summary)
|
||||
print(f"Added chunk {i + 1} summary to memory")
|
||||
logger.info(
|
||||
f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters"
|
||||
)
|
||||
|
||||
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
|
||||
|
||||
MEMORY.add(memory_to_add)
|
||||
memory.add(memory_to_add)
|
||||
|
||||
print(f"Summarized {len(chunks)} chunks.")
|
||||
logger.info(f"Summarized {len(chunks)} chunks.")
|
||||
|
||||
combined_summary = "\n".join(summaries)
|
||||
messages = [create_message(combined_summary, question)]
|
||||
|
||||
return create_chat_completion(
|
||||
model=CFG.fast_llm_model,
|
||||
model=model,
|
||||
messages=messages,
|
||||
max_tokens=CFG.browse_summary_max_token,
|
||||
)
|
||||
|
||||
|
||||
|
||||
0
autogpt/prompts/__init__.py
Normal file
0
autogpt/prompts/__init__.py
Normal file
@@ -1,8 +1,6 @@
|
||||
""" A module for generating custom prompt strings."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
|
||||
class PromptGenerator:
|
||||
@@ -20,6 +18,10 @@ class PromptGenerator:
|
||||
self.commands = []
|
||||
self.resources = []
|
||||
self.performance_evaluation = []
|
||||
self.goals = []
|
||||
self.command_registry = None
|
||||
self.name = "Bob"
|
||||
self.role = "AI"
|
||||
self.response_format = {
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
@@ -40,7 +42,13 @@ class PromptGenerator:
|
||||
"""
|
||||
self.constraints.append(constraint)
|
||||
|
||||
def add_command(self, command_label: str, command_name: str, args=None) -> None:
|
||||
def add_command(
|
||||
self,
|
||||
command_label: str,
|
||||
command_name: str,
|
||||
args=None,
|
||||
function: Optional[Callable] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Add a command to the commands list with a label, name, and optional arguments.
|
||||
|
||||
@@ -49,6 +57,8 @@ class PromptGenerator:
|
||||
command_name (str): The name of the command.
|
||||
args (dict, optional): A dictionary containing argument names and their
|
||||
values. Defaults to None.
|
||||
function (callable, optional): A callable function to be called when
|
||||
the command is executed. Defaults to None.
|
||||
"""
|
||||
if args is None:
|
||||
args = {}
|
||||
@@ -59,11 +69,12 @@ class PromptGenerator:
|
||||
"label": command_label,
|
||||
"name": command_name,
|
||||
"args": command_args,
|
||||
"function": function,
|
||||
}
|
||||
|
||||
self.commands.append(command)
|
||||
|
||||
def _generate_command_string(self, command: dict[str, Any]) -> str:
|
||||
def _generate_command_string(self, command: Dict[str, Any]) -> str:
|
||||
"""
|
||||
Generate a formatted string representation of a command.
|
||||
|
||||
@@ -96,7 +107,7 @@ class PromptGenerator:
|
||||
"""
|
||||
self.performance_evaluation.append(evaluation)
|
||||
|
||||
def _generate_numbered_list(self, items: list[Any], item_type="list") -> str:
|
||||
def _generate_numbered_list(self, items: List[Any], item_type="list") -> str:
|
||||
"""
|
||||
Generate a numbered list from given items based on the item_type.
|
||||
|
||||
@@ -109,10 +120,16 @@ class PromptGenerator:
|
||||
str: The formatted numbered list.
|
||||
"""
|
||||
if item_type == "command":
|
||||
return "\n".join(
|
||||
f"{i+1}. {self._generate_command_string(item)}"
|
||||
for i, item in enumerate(items)
|
||||
)
|
||||
command_strings = []
|
||||
if self.command_registry:
|
||||
command_strings += [
|
||||
str(item)
|
||||
for item in self.command_registry.commands.values()
|
||||
if item.enabled
|
||||
]
|
||||
# terminate command is added manually
|
||||
command_strings += [self._generate_command_string(item) for item in items]
|
||||
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings))
|
||||
else:
|
||||
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
||||
|
||||
@@ -1,16 +1,21 @@
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.promptgenerator import PromptGenerator
|
||||
from autogpt.config import Config
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.setup import prompt_user
|
||||
from autogpt.utils import clean_input
|
||||
|
||||
CFG = Config()
|
||||
|
||||
DEFAULT_TRIGGERING_PROMPT = (
|
||||
"Determine which next command to use, and respond using the format specified above:"
|
||||
)
|
||||
|
||||
def get_prompt() -> str:
|
||||
|
||||
def build_default_prompt_generator() -> PromptGenerator:
|
||||
"""
|
||||
This function generates a prompt string that includes various constraints,
|
||||
commands, resources, and performance evaluations.
|
||||
@@ -19,9 +24,6 @@ def get_prompt() -> str:
|
||||
str: The generated prompt string.
|
||||
"""
|
||||
|
||||
# Initialize the Config object
|
||||
cfg = Config()
|
||||
|
||||
# Initialize the PromptGenerator object
|
||||
prompt_generator = PromptGenerator()
|
||||
|
||||
@@ -41,77 +43,8 @@ def get_prompt() -> str:
|
||||
|
||||
# Define the command list
|
||||
commands = [
|
||||
("Google Search", "google", {"input": "<search>"}),
|
||||
(
|
||||
"Browse Website",
|
||||
"browse_website",
|
||||
{"url": "<url>", "question": "<what_you_want_to_find_on_website>"},
|
||||
),
|
||||
(
|
||||
"Start GPT Agent",
|
||||
"start_agent",
|
||||
{"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"},
|
||||
),
|
||||
(
|
||||
"Message GPT Agent",
|
||||
"message_agent",
|
||||
{"key": "<key>", "message": "<message>"},
|
||||
),
|
||||
("List GPT Agents", "list_agents", {}),
|
||||
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
|
||||
(
|
||||
"Clone Repository",
|
||||
"clone_repository",
|
||||
{"repository_url": "<url>", "clone_path": "<directory>"},
|
||||
),
|
||||
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
|
||||
("Read file", "read_file", {"file": "<file>"}),
|
||||
("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
|
||||
("Delete file", "delete_file", {"file": "<file>"}),
|
||||
("Search Files", "search_files", {"directory": "<directory>"}),
|
||||
("Evaluate Code", "evaluate_code", {"code": "<full_code_string>"}),
|
||||
(
|
||||
"Get Improved Code",
|
||||
"improve_code",
|
||||
{"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"},
|
||||
),
|
||||
(
|
||||
"Write Tests",
|
||||
"write_tests",
|
||||
{"code": "<full_code_string>", "focus": "<list_of_focus_areas>"},
|
||||
),
|
||||
("Execute Python File", "execute_python_file", {"file": "<file>"}),
|
||||
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
|
||||
("Send Tweet", "send_tweet", {"text": "<text>"}),
|
||||
]
|
||||
|
||||
# Only add the audio to text command if the model is specified
|
||||
if cfg.huggingface_audio_to_text_model:
|
||||
commands.append(
|
||||
(
|
||||
"Convert Audio to text",
|
||||
"read_audio_from_file",
|
||||
{"file": "<file>"}
|
||||
),
|
||||
)
|
||||
|
||||
# Only add shell command to the prompt if the AI is allowed to execute it
|
||||
if cfg.execute_local_commands:
|
||||
commands.append(
|
||||
(
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
"execute_shell",
|
||||
{"command_line": "<command_line>"},
|
||||
),
|
||||
)
|
||||
|
||||
# Add these command last.
|
||||
commands.append(
|
||||
("Do Nothing", "do_nothing", {}),
|
||||
)
|
||||
commands.append(
|
||||
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
|
||||
)
|
||||
]
|
||||
|
||||
# Add commands to the PromptGenerator object
|
||||
for command_label, command_name, args in commands:
|
||||
@@ -142,12 +75,11 @@ def get_prompt() -> str:
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps."
|
||||
)
|
||||
|
||||
# Generate the prompt string
|
||||
return prompt_generator.generate_prompt_string()
|
||||
prompt_generator.add_performance_evaluation("Write all code to a file.")
|
||||
return prompt_generator
|
||||
|
||||
|
||||
def construct_prompt() -> str:
|
||||
def construct_main_ai_config() -> AIConfig:
|
||||
"""Construct the prompt for the AI to respond to
|
||||
|
||||
Returns:
|
||||
@@ -158,6 +90,11 @@ def construct_prompt() -> str:
|
||||
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
|
||||
logger.typewriter_log(
|
||||
"API Budget:",
|
||||
Fore.GREEN,
|
||||
"infinite" if config.api_budget <= 0 else f"${config.api_budget}",
|
||||
)
|
||||
elif config.ai_name:
|
||||
logger.typewriter_log(
|
||||
"Welcome back! ",
|
||||
@@ -170,17 +107,36 @@ def construct_prompt() -> str:
|
||||
Name: {config.ai_name}
|
||||
Role: {config.ai_role}
|
||||
Goals: {config.ai_goals}
|
||||
Continue (y/n): """
|
||||
API Budget: {"infinite" if config.api_budget <= 0 else f"${config.api_budget}"}
|
||||
Continue ({CFG.authorise_key}/{CFG.exit_key}): """
|
||||
)
|
||||
if should_continue.lower() == "n":
|
||||
if should_continue.lower() == CFG.exit_key:
|
||||
config = AIConfig()
|
||||
|
||||
if not config.ai_name:
|
||||
config = prompt_user()
|
||||
config.save(CFG.ai_settings_file)
|
||||
|
||||
# Get rid of this global:
|
||||
global ai_name
|
||||
ai_name = config.ai_name
|
||||
# set the total api budget
|
||||
api_manager = ApiManager()
|
||||
api_manager.set_total_budget(config.api_budget)
|
||||
|
||||
return config.construct_full_prompt()
|
||||
# Agent Created, print message
|
||||
logger.typewriter_log(
|
||||
config.ai_name,
|
||||
Fore.LIGHTBLUE_EX,
|
||||
"has been created with the following details:",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Print the ai config details
|
||||
# Name
|
||||
logger.typewriter_log("Name:", Fore.GREEN, config.ai_name, speak_text=False)
|
||||
# Role
|
||||
logger.typewriter_log("Role:", Fore.GREEN, config.ai_role, speak_text=False)
|
||||
# Goals
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
|
||||
for goal in config.ai_goals:
|
||||
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
|
||||
|
||||
return config
|
||||
157
autogpt/setup.py
157
autogpt/setup.py
@@ -1,21 +1,88 @@
|
||||
"""Setup the AI and its goals"""
|
||||
"""Set up the AI and its goals"""
|
||||
import re
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.llm import create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def prompt_user() -> AIConfig:
|
||||
"""Prompt the user for input
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object containing the user's input
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
ai_name = ""
|
||||
ai_config = None
|
||||
|
||||
# Construct the prompt
|
||||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"run with '--help' for more information.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Get user desire
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"input '--manual' to enter manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
user_desire = utils.clean_input(
|
||||
f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
|
||||
if user_desire == "":
|
||||
user_desire = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt
|
||||
|
||||
# If user desire contains "--manual"
|
||||
if "--manual" in user_desire:
|
||||
logger.typewriter_log(
|
||||
"Manual Mode Selected",
|
||||
Fore.GREEN,
|
||||
speak_text=True,
|
||||
)
|
||||
return generate_aiconfig_manual()
|
||||
|
||||
else:
|
||||
try:
|
||||
return generate_aiconfig_automatic(user_desire)
|
||||
except Exception as e:
|
||||
logger.typewriter_log(
|
||||
"Unable to automatically generate AI Config based on user desire.",
|
||||
Fore.RED,
|
||||
"Falling back to manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
return generate_aiconfig_manual()
|
||||
|
||||
|
||||
def generate_aiconfig_manual() -> AIConfig:
|
||||
"""
|
||||
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
|
||||
|
||||
This function guides the user through a series of prompts to collect the necessary information to create
|
||||
an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five
|
||||
goals. If the user does not provide a value for any of the fields, default values will be used.
|
||||
|
||||
Returns:
|
||||
AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
|
||||
"""
|
||||
|
||||
# Manual Setup Intro
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
speak_text=True,
|
||||
@@ -52,7 +119,7 @@ def prompt_user() -> AIConfig:
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
||||
@@ -66,4 +133,86 @@ def prompt_user() -> AIConfig:
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
]
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals)
|
||||
# Get API Budget from User
|
||||
logger.typewriter_log(
|
||||
"Enter your budget for API calls: ",
|
||||
Fore.GREEN,
|
||||
"For example: $1.50",
|
||||
)
|
||||
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||
api_budget_input = utils.clean_input(
|
||||
f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||
)
|
||||
if api_budget_input == "":
|
||||
api_budget = 0.0
|
||||
else:
|
||||
try:
|
||||
api_budget = float(api_budget_input.replace("$", ""))
|
||||
except ValueError:
|
||||
logger.typewriter_log(
|
||||
"Invalid budget input. Setting budget to unlimited.", Fore.RED
|
||||
)
|
||||
api_budget = 0.0
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
|
||||
def generate_aiconfig_automatic(user_prompt) -> AIConfig:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
system_prompt = """
|
||||
Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
|
||||
|
||||
The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
|
||||
|
||||
Example input:
|
||||
Help me with marketing my business
|
||||
|
||||
Example output:
|
||||
Name: CMOGPT
|
||||
Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more.
|
||||
Goals:
|
||||
- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
|
||||
|
||||
- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
|
||||
|
||||
- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
|
||||
|
||||
- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track.
|
||||
"""
|
||||
|
||||
# Call LLM with the string as user input
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": system_prompt,
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Task: '{user_prompt}'\nRespond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n",
|
||||
},
|
||||
]
|
||||
output = create_chat_completion(messages, CFG.fast_llm_model)
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output}")
|
||||
|
||||
# Parse the output
|
||||
ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1)
|
||||
ai_role = (
|
||||
re.search(
|
||||
r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)",
|
||||
output,
|
||||
re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
.group(1)
|
||||
.strip()
|
||||
)
|
||||
ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output)
|
||||
api_budget = 0.0 # TODO: parse api budget using a regular expression
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user