mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 08:38:09 -05:00
Compare commits
4831 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e0dad5a96 | ||
|
|
f598ba27b6 | ||
|
|
286202cc66 | ||
|
|
55eb917162 | ||
|
|
c2843eecfa | ||
|
|
3971fbd800 | ||
|
|
e04838feb5 | ||
|
|
3ef1f6e380 | ||
|
|
705c63b801 | ||
|
|
9395706841 | ||
|
|
da007e3a22 | ||
|
|
9e18c26e65 | ||
|
|
30e3d65711 | ||
|
|
b9c26b1a6b | ||
|
|
220a127e51 | ||
|
|
004e49edb1 | ||
|
|
bf21bb1fa5 | ||
|
|
a98677b79d | ||
|
|
530ddf2c34 | ||
|
|
33ee2f2ee5 | ||
|
|
ae1d410b65 | ||
|
|
196a5d6b59 | ||
|
|
82c1249d33 | ||
|
|
056eb46c0f | ||
|
|
9389a30298 | ||
|
|
9b58faeeb6 | ||
|
|
9cb55c5ac0 | ||
|
|
eb1df12ce8 | ||
|
|
12f40596b3 | ||
|
|
3af26a9379 | ||
|
|
4041ed3e33 | ||
|
|
ee78653425 | ||
|
|
6fde030c37 | ||
|
|
76c5a27044 | ||
|
|
c6aba70dd4 | ||
|
|
bf1e01d423 | ||
|
|
470e7036b9 | ||
|
|
f6608754aa | ||
|
|
bd70ab00e0 | ||
|
|
2c940b381a | ||
|
|
03b30ebf5b | ||
|
|
0d8c2a820e | ||
|
|
5ce562e11f | ||
|
|
fcf2247c20 | ||
|
|
7326ee1221 | ||
|
|
70c7a3b1f3 | ||
|
|
6bdab5b777 | ||
|
|
5f5e31ac19 | ||
|
|
9e71b658d6 | ||
|
|
b7b23d68b4 | ||
|
|
630f401cee | ||
|
|
94fbcfb501 | ||
|
|
8102f78030 | ||
|
|
52c731abd6 | ||
|
|
c8fbce643e | ||
|
|
b1eb259bb3 | ||
|
|
c738eb3bc6 | ||
|
|
60fca5c5f0 | ||
|
|
bba9836735 | ||
|
|
ad76bd1300 | ||
|
|
6c001bd595 | ||
|
|
f5b89672f8 | ||
|
|
76480ffa03 | ||
|
|
ab60a57379 | ||
|
|
1d9b01fc77 | ||
|
|
e81d9f9f0b | ||
|
|
0d5d0270ea | ||
|
|
bd25f9223c | ||
|
|
07305b55ff | ||
|
|
cdfe3e5fbc | ||
|
|
e992cdf8c2 | ||
|
|
fa16c207e0 | ||
|
|
ebd2ecd84c | ||
|
|
0b919522ae | ||
|
|
ef691359b7 | ||
|
|
f8815c3053 | ||
|
|
a60ed21404 | ||
|
|
2618d1d87c | ||
|
|
e17ea22a0a | ||
|
|
60669903a0 | ||
|
|
b1b31390a4 | ||
|
|
3c12a398ae | ||
|
|
126d070396 | ||
|
|
090f22b05c | ||
|
|
1b9adf5434 | ||
|
|
3bd8040d6a | ||
|
|
b12dba13f4 | ||
|
|
2cae9ba8da | ||
|
|
3753906482 | ||
|
|
fd54ad8666 | ||
|
|
e645cc4b33 | ||
|
|
010a8ffaaf | ||
|
|
2df325d033 | ||
|
|
79ebc4c13b | ||
|
|
e5eb42d84a | ||
|
|
d62b940baf | ||
|
|
8fd22bcfd7 | ||
|
|
11827835a0 | ||
|
|
70fab8711a | ||
|
|
8ec015ba72 | ||
|
|
bc7d2f0f37 | ||
|
|
54694709bb | ||
|
|
b4b5a09b6b | ||
|
|
82239dd129 | ||
|
|
078ad29356 | ||
|
|
5000aa7ee0 | ||
|
|
dc1077f893 | ||
|
|
80df44a978 | ||
|
|
c2a79d2f10 | ||
|
|
7db85a8990 | ||
|
|
0454a9a7be | ||
|
|
09951fed4b | ||
|
|
6204d82d84 | ||
|
|
8c9fe5c167 | ||
|
|
71de1a6a5e | ||
|
|
956165adf3 | ||
|
|
e4dc16a867 | ||
|
|
cfa0b6610c | ||
|
|
933baa0e8d | ||
|
|
370b2dabe8 | ||
|
|
baa00a5b03 | ||
|
|
60a8e00578 | ||
|
|
85e7d678ce | ||
|
|
476b307d69 | ||
|
|
5dbfb4e3f1 | ||
|
|
f6d09c74f5 | ||
|
|
6d17e627e8 | ||
|
|
5cfa807f00 | ||
|
|
6fff06f0f6 | ||
|
|
cbe553a547 | ||
|
|
96ef35536c | ||
|
|
087d3a3760 | ||
|
|
5da58aa284 | ||
|
|
7de12a2200 | ||
|
|
8f1c63a7ea | ||
|
|
6ec200f912 | ||
|
|
b5db7f575e | ||
|
|
98c909f99f | ||
|
|
c5615aa862 | ||
|
|
e725305e15 | ||
|
|
9551f54c35 | ||
|
|
777f7d25bf | ||
|
|
ea6f37bf98 | ||
|
|
299530cf95 | ||
|
|
1df7d527dd | ||
|
|
407cf858e7 | ||
|
|
a670b384f6 | ||
|
|
f9b8b0a41a | ||
|
|
e59e138352 | ||
|
|
a95ee693dd | ||
|
|
26f56114d1 | ||
|
|
45ace8ccab | ||
|
|
95af63b5ad | ||
|
|
012bad72e8 | ||
|
|
efcd0f93ed | ||
|
|
4c32b46d40 | ||
|
|
41fbfe35fb | ||
|
|
c719e4f177 | ||
|
|
3d62cec553 | ||
|
|
fa12564954 | ||
|
|
f6d8e597e1 | ||
|
|
a1cbc101a5 | ||
|
|
afc8338145 | ||
|
|
7fe4e455fd | ||
|
|
52d40d0f8b | ||
|
|
9e35f8c5cb | ||
|
|
c0afb133a7 | ||
|
|
526364297c | ||
|
|
aed067e61c | ||
|
|
653eb4964f | ||
|
|
406206f5d0 | ||
|
|
1e05d6a8e9 | ||
|
|
848637bfeb | ||
|
|
cea81bfe4e | ||
|
|
1e92c284d9 | ||
|
|
98c1cb8ff9 | ||
|
|
58dc8296db | ||
|
|
4782f4383c | ||
|
|
2b60a392fb | ||
|
|
f30b2cdf25 | ||
|
|
9084c31662 | ||
|
|
183c72b2d0 | ||
|
|
55e100ee1e | ||
|
|
82c5cd2d79 | ||
|
|
f0ab795248 | ||
|
|
5b9caa4345 | ||
|
|
1e054064f6 | ||
|
|
646d98470f | ||
|
|
5a68be5419 | ||
|
|
2ff8a0743a | ||
|
|
582571631e | ||
|
|
bf10df612e | ||
|
|
c577d04692 | ||
|
|
85d895ef77 | ||
|
|
deacc2bd8f | ||
|
|
3eb0d73461 | ||
|
|
be0f6498ed | ||
|
|
0bab2714e9 | ||
|
|
9c74d76a3a | ||
|
|
78e96f8a1a | ||
|
|
904b444b13 | ||
|
|
3cad0f89ee | ||
|
|
8131fc385b | ||
|
|
335fea8605 | ||
|
|
55d32f0324 | ||
|
|
56ce7ac628 | ||
|
|
81adf84032 | ||
|
|
f8d07a27af | ||
|
|
1bad26657c | ||
|
|
31dbb543a2 | ||
|
|
60d25135e6 | ||
|
|
4678ed2e57 | ||
|
|
98a07f1265 | ||
|
|
5e8ff5e3ed | ||
|
|
89adcefd63 | ||
|
|
d82e577196 | ||
|
|
e6cc8687a5 | ||
|
|
fbad0d01ee | ||
|
|
fe5c1968bc | ||
|
|
951abf6d5b | ||
|
|
9ae6389c6c | ||
|
|
4cf1dd30f1 | ||
|
|
c7fdfa0f77 | ||
|
|
6fa7d22c91 | ||
|
|
52bd033a02 | ||
|
|
bb5baadeb2 | ||
|
|
db97b24518 | ||
|
|
533d7b7da8 | ||
|
|
183c2a4845 | ||
|
|
6440a8e217 | ||
|
|
e0930ba39d | ||
|
|
a21fd30fce | ||
|
|
e2df2cd90d | ||
|
|
6bdb849150 | ||
|
|
8469fafc6f | ||
|
|
3c2c3e57a0 | ||
|
|
ec6bae0467 | ||
|
|
f5fe96260e | ||
|
|
49a18437ac | ||
|
|
3cee893314 | ||
|
|
5d1035aeb0 | ||
|
|
2e2c6fed52 | ||
|
|
ca9c52f76a | ||
|
|
973822d973 | ||
|
|
e773329391 | ||
|
|
c9d41e69bd | ||
|
|
08905d71f9 | ||
|
|
8becde370c | ||
|
|
dccc33152b | ||
|
|
b23bd9c479 | ||
|
|
ac45b7cae9 | ||
|
|
3d54a9103c | ||
|
|
ca7182403b | ||
|
|
53826ab360 | ||
|
|
eac5548023 | ||
|
|
122f544966 | ||
|
|
29ba4c2c73 | ||
|
|
76feead3b1 | ||
|
|
081df805df | ||
|
|
acc1d79146 | ||
|
|
07811b2133 | ||
|
|
01b6c2d4bf | ||
|
|
905b1df218 | ||
|
|
edf84fb9f8 | ||
|
|
b62c24dc77 | ||
|
|
dfa855f533 | ||
|
|
da2111bafb | ||
|
|
b2dba39810 | ||
|
|
d2a5bb286f | ||
|
|
36b9a0a930 | ||
|
|
f40db85b43 | ||
|
|
0767b17779 | ||
|
|
0f0c13bae8 | ||
|
|
3b0cd9518d | ||
|
|
22b6dbbf6a | ||
|
|
d9a1a1edc8 | ||
|
|
3c0d37d5d1 | ||
|
|
c98061bc3b | ||
|
|
a8c0cbef54 | ||
|
|
8ccd14c4bf | ||
|
|
3e384c9771 | ||
|
|
22f2a05f08 | ||
|
|
e94a7b08c9 | ||
|
|
7b8928f49b | ||
|
|
699087e289 | ||
|
|
aa8ca37f86 | ||
|
|
8bdb48cba4 | ||
|
|
03ea51b266 | ||
|
|
77034f2df0 | ||
|
|
ccf4397883 | ||
|
|
e7885f943b | ||
|
|
aca7165694 | ||
|
|
22b223037e | ||
|
|
6747ae1559 | ||
|
|
39afba6da8 | ||
|
|
a00df25092 | ||
|
|
ea698ab0fe | ||
|
|
902d2a8924 | ||
|
|
ab0df04bfe | ||
|
|
d407fd101e | ||
|
|
a911f9a5eb | ||
|
|
470c738732 | ||
|
|
7de49dfbe5 | ||
|
|
a02b017cea | ||
|
|
56b82369b6 | ||
|
|
a7926584ca | ||
|
|
6ffa644fb6 | ||
|
|
a82317e2ac | ||
|
|
fd000a4173 | ||
|
|
235715e054 | ||
|
|
d0ec31b698 | ||
|
|
fa1b486c64 | ||
|
|
21084c5817 | ||
|
|
82fd3166ef | ||
|
|
f833fa3624 | ||
|
|
e6f9870f2e | ||
|
|
6e319a6881 | ||
|
|
64edf12c31 | ||
|
|
e1795b8216 | ||
|
|
057d0848ef | ||
|
|
2dc673614f | ||
|
|
24e08d57ef | ||
|
|
bd540b5cc4 | ||
|
|
6d192429a6 | ||
|
|
314a24ab8f | ||
|
|
ff962d8d88 | ||
|
|
5e5182e236 | ||
|
|
04dcd230cd | ||
|
|
c8b46109fe | ||
|
|
c00caa4bcf | ||
|
|
e382dcf823 | ||
|
|
27e6c3a95d | ||
|
|
aaf650ee23 | ||
|
|
4c003d6e20 | ||
|
|
8264d7bf5a | ||
|
|
37b7053e14 | ||
|
|
354e626965 | ||
|
|
90371e1781 | ||
|
|
e128bfaf5f | ||
|
|
62c420e26f | ||
|
|
97a5582c34 | ||
|
|
78b84289cb | ||
|
|
9e22409d66 | ||
|
|
e7c075a521 | ||
|
|
555e113706 | ||
|
|
420e6cae2f | ||
|
|
920f931a21 | ||
|
|
e874318832 | ||
|
|
cb4b96a70c | ||
|
|
03ea4c2690 | ||
|
|
e70e613f73 | ||
|
|
854f6dcaec | ||
|
|
ea5ba9d193 | ||
|
|
da14957fce | ||
|
|
629f575dde | ||
|
|
8883d7db53 | ||
|
|
a6063e1550 | ||
|
|
e311847fa8 | ||
|
|
d673bf741a | ||
|
|
110e093e7b | ||
|
|
93b6e0ee51 | ||
|
|
450f120510 | ||
|
|
2355d56801 | ||
|
|
d3dae2264d | ||
|
|
58313d9ae7 | ||
|
|
9e7dd4be74 | ||
|
|
6550bdc10c | ||
|
|
0b9f3be6b8 | ||
|
|
a2d8d9bac9 | ||
|
|
b7096e01fb | ||
|
|
bffb92bfbc | ||
|
|
2ecce27653 | ||
|
|
1089551869 | ||
|
|
58af7f9466 | ||
|
|
b89609fd16 | ||
|
|
dcfc3a4dad | ||
|
|
7cf6d3ff79 | ||
|
|
f4f164ac15 | ||
|
|
cd9d041fe5 | ||
|
|
0fb8a84382 | ||
|
|
a408da8317 | ||
|
|
976ff04cce | ||
|
|
3c91038089 | ||
|
|
d4b441932d | ||
|
|
14ddb915bf | ||
|
|
d6cbb48609 | ||
|
|
3789b00479 | ||
|
|
f94e81f48b | ||
|
|
e10c4ee4cd | ||
|
|
81dee568cb | ||
|
|
7929f1a4ac | ||
|
|
0a28c72bad | ||
|
|
b9861a5308 | ||
|
|
af3a2bb5f5 | ||
|
|
2f174837bd | ||
|
|
b30eaf653a | ||
|
|
d9c9b22886 | ||
|
|
ff71b0beb7 | ||
|
|
57cc8b69e9 | ||
|
|
7ce0c655d0 | ||
|
|
1e755f9e8d | ||
|
|
f9bedb0fd9 | ||
|
|
a32bc72314 | ||
|
|
227092b669 | ||
|
|
39556a71cc | ||
|
|
1fb8c1adac | ||
|
|
37e1780d76 | ||
|
|
0df2199c42 | ||
|
|
200800312a | ||
|
|
b7a90ce768 | ||
|
|
f359ed0983 | ||
|
|
6456285753 | ||
|
|
833944e228 | ||
|
|
db0e726954 | ||
|
|
08612cc3bf | ||
|
|
7415e24fc3 | ||
|
|
ecb054af56 | ||
|
|
39f70b0c83 | ||
|
|
7cb4d4a903 | ||
|
|
8feaced92e | ||
|
|
97e4cceb94 | ||
|
|
2fa4fd23af | ||
|
|
976ea7cd3c | ||
|
|
d5ab83aa34 | ||
|
|
cbae8b5c14 | ||
|
|
854080f7af | ||
|
|
fbb3891e79 | ||
|
|
4d8ee65ca7 | ||
|
|
6093acc813 | ||
|
|
785a40ff9d | ||
|
|
2bc22c5450 | ||
|
|
cdc658695f | ||
|
|
dd960f9306 | ||
|
|
6e1c9d44a4 | ||
|
|
26bcb26bb7 | ||
|
|
f04ddceacf | ||
|
|
3e01b19d6f | ||
|
|
9f1e521857 | ||
|
|
d9226888b2 | ||
|
|
210d7738b9 | ||
|
|
c19ab2b24f | ||
|
|
02dc198a9f | ||
|
|
227cf41612 | ||
|
|
66f373fb57 | ||
|
|
8f3ed733b9 | ||
|
|
9f71cd2437 | ||
|
|
6ec708c771 | ||
|
|
af3febd79f | ||
|
|
60e0d4c530 | ||
|
|
4aeae53a61 | ||
|
|
d5c4eca739 | ||
|
|
1e4ef7b313 | ||
|
|
fd18877dae | ||
|
|
c754ecd5d1 | ||
|
|
e688cc31f0 | ||
|
|
b803e42189 | ||
|
|
04769c54e4 | ||
|
|
8f18aebf90 | ||
|
|
8e5535157f | ||
|
|
8f43a346e9 | ||
|
|
f2cb553c9a | ||
|
|
4c17277539 | ||
|
|
2cd914d366 | ||
|
|
39084192ff | ||
|
|
23dfdad454 | ||
|
|
4de0fd8cbd | ||
|
|
e3a5663a05 | ||
|
|
246bc850a1 | ||
|
|
319232568a | ||
|
|
f71d97dd48 | ||
|
|
7a932cdf00 | ||
|
|
4e76768bc9 | ||
|
|
cb9ad6f64d | ||
|
|
16df151977 | ||
|
|
f67c2147a7 | ||
|
|
738c8ffff0 | ||
|
|
f107ff8cf0 | ||
|
|
2c13a2706c | ||
|
|
46c1762fb0 | ||
|
|
4e02f7ddb5 | ||
|
|
edcbbbce25 | ||
|
|
cdae98d36b | ||
|
|
5292736779 | ||
|
|
e16d58af0c | ||
|
|
f80bc0a523 | ||
|
|
a5ee640935 | ||
|
|
4c325724ec | ||
|
|
3475aaf384 | ||
|
|
bcc5282aba | ||
|
|
2cca4fa47f | ||
|
|
e8d7dfa386 | ||
|
|
4f81246fd4 | ||
|
|
4fe62b8063 | ||
|
|
b3967fd597 | ||
|
|
1f05881283 | ||
|
|
a9afb3cbff | ||
|
|
c562b3b6a3 | ||
|
|
b0cbf711dc | ||
|
|
7e02cfdc9f | ||
|
|
34fdbaa26b | ||
|
|
818d391284 | ||
|
|
08c32a7a12 | ||
|
|
56104bd047 | ||
|
|
2ef5cd7d4c | ||
|
|
74b3aae5c6 | ||
|
|
e9b3b5090c | ||
|
|
9bac6f4ce2 | ||
|
|
39c46ef6be | ||
|
|
78d83bb3ce | ||
|
|
d57ccf7ec9 | ||
|
|
ada2e19829 | ||
|
|
a7c7a5e18b | ||
|
|
180de0c9a9 | ||
|
|
8f0d5c73b3 | ||
|
|
3b00e8229c | ||
|
|
e97726cde3 | ||
|
|
d38e8b8f6c | ||
|
|
0014e2ac14 | ||
|
|
370615e5e4 | ||
|
|
f93c743d03 | ||
|
|
6add645597 | ||
|
|
bdda3a6698 | ||
|
|
126aacb2e3 | ||
|
|
1afc8e40df | ||
|
|
9543e5d6ac | ||
|
|
5e89b8c6d1 | ||
|
|
fd3f8fa5fc | ||
|
|
86bdbb82b1 | ||
|
|
898317c16c | ||
|
|
0704404344 | ||
|
|
a74548d3cd | ||
|
|
6ff02677d2 | ||
|
|
4db4ca08b2 | ||
|
|
7bb7c30842 | ||
|
|
35ebb10378 | ||
|
|
b77451bb3a | ||
|
|
cf00c33f90 | ||
|
|
61adf58f4f | ||
|
|
452df39a52 | ||
|
|
49a08ba7db | ||
|
|
7082e63b11 | ||
|
|
d7f00a996f | ||
|
|
cf033504c2 | ||
|
|
e866a4ba04 | ||
|
|
90f3c5e2d9 | ||
|
|
fb8ed0b46b | ||
|
|
12640f7092 | ||
|
|
5f9cc585b1 | ||
|
|
262771a69c | ||
|
|
a1ffe15142 | ||
|
|
30bc761391 | ||
|
|
2a0e087461 | ||
|
|
828b81e5ef | ||
|
|
fe3f835b3e | ||
|
|
6dd76afad5 | ||
|
|
20041d65bf | ||
|
|
028d2c319f | ||
|
|
9e39937072 | ||
|
|
07a3c1848c | ||
|
|
dde0c70a81 | ||
|
|
76d6e61941 | ||
|
|
bca50310f6 | ||
|
|
632686cfa5 | ||
|
|
1262b72f5c | ||
|
|
e985f7c105 | ||
|
|
596487b9ad | ||
|
|
a7c0440e9b | ||
|
|
03ffb50dcf | ||
|
|
e201f57861 | ||
|
|
fea62a77bc | ||
|
|
dfad535dea | ||
|
|
fa14865163 | ||
|
|
ef35028ecb | ||
|
|
fb97e15e4b | ||
|
|
da4f013a5d | ||
|
|
fd2c26188f | ||
|
|
89cf0154f4 | ||
|
|
cb1297ec74 | ||
|
|
37904a0f80 | ||
|
|
6c18627b0f | ||
|
|
d5aa8d373b | ||
|
|
7bf31dad35 | ||
|
|
29d390d54d | ||
|
|
b69f0b2cd0 | ||
|
|
0308fb45be | ||
|
|
0325370fed | ||
|
|
1e4bd0388f | ||
|
|
d1b06f0be3 | ||
|
|
3e40b35ef1 | ||
|
|
70873906b7 | ||
|
|
f93a8a93b4 | ||
|
|
4121d3712d | ||
|
|
4546dfdf17 | ||
|
|
4011294da0 | ||
|
|
48f6f83f05 | ||
|
|
51f5808430 | ||
|
|
695049bfa3 | ||
|
|
40f98f0f38 | ||
|
|
c26c79c34c | ||
|
|
2c96f6125f | ||
|
|
5047fd9fce | ||
|
|
50e5ea4e54 | ||
|
|
ce45c9b267 | ||
|
|
1881f4f7cd | ||
|
|
30762c211e | ||
|
|
5090f55eba | ||
|
|
1f1e8c9f7d | ||
|
|
e44ca4185a | ||
|
|
8fd2e48c1b | ||
|
|
69ccb185e8 | ||
|
|
a88e833831 | ||
|
|
64f48df62d | ||
|
|
0f5490075b | ||
|
|
d5f2bbf093 | ||
|
|
7dd97f2f74 | ||
|
|
8e464c53a8 | ||
|
|
7689a51f53 | ||
|
|
c8a40727d1 | ||
|
|
4ef912d734 | ||
|
|
49a6d68200 | ||
|
|
6cfe229332 | ||
|
|
1079d71699 | ||
|
|
e104427767 | ||
|
|
bfd479a50b | ||
|
|
fb63bf4425 | ||
|
|
3a17011129 | ||
|
|
c339c6b54f | ||
|
|
7f71d6d9fd | ||
|
|
784e2bbb1c | ||
|
|
959377f54c | ||
|
|
6bc83e925c | ||
|
|
4ede773f5a | ||
|
|
d5ad719757 | ||
|
|
1ca9b9fa93 | ||
|
|
15024fb5a1 | ||
|
|
fa4bdef17c | ||
|
|
e2b519ef3b | ||
|
|
09c307d679 | ||
|
|
880c8e804c | ||
|
|
5f0764b65c | ||
|
|
63e6014b27 | ||
|
|
83fcd9ad16 | ||
|
|
f9792ed7f3 | ||
|
|
d6ab470c58 | ||
|
|
666a5a8777 | ||
|
|
21f1e64559 | ||
|
|
752bac099b | ||
|
|
a5de79beb6 | ||
|
|
483c01b681 | ||
|
|
992b8874fc | ||
|
|
2a55efb322 | ||
|
|
23d58a3cc0 | ||
|
|
70e345b2ce | ||
|
|
650a701317 | ||
|
|
679339d00c | ||
|
|
fd5730b04a | ||
|
|
b7f08cd0f7 | ||
|
|
8762f7ab3d | ||
|
|
a9b7b175ff | ||
|
|
52b93dd84e | ||
|
|
6a09a44ef7 | ||
|
|
32a627eda9 | ||
|
|
67bafa6302 | ||
|
|
6017eefb32 | ||
|
|
ae197fc85f | ||
|
|
22aba6dd8a | ||
|
|
88bbdfc7fc | ||
|
|
d0c9b7c405 | ||
|
|
e7698a4610 | ||
|
|
ab05b7ae70 | ||
|
|
327fb1f916 | ||
|
|
bb7f5abc6c | ||
|
|
393d6b97e6 | ||
|
|
3b8d63dfb6 | ||
|
|
6763196d78 | ||
|
|
e1da58da02 | ||
|
|
91cec515d4 | ||
|
|
cc585a014f | ||
|
|
e641cccb42 | ||
|
|
cc73d4104b | ||
|
|
250552cb3d | ||
|
|
1d653973e9 | ||
|
|
7bf9ba5502 | ||
|
|
14c9773890 | ||
|
|
39fddb1214 | ||
|
|
fe0923ba6c | ||
|
|
dfaeda7cd5 | ||
|
|
9b7fee673e | ||
|
|
925269d17b | ||
|
|
266fe3a3f7 | ||
|
|
66e0c87894 | ||
|
|
55433f468a | ||
|
|
956cdc77fa | ||
|
|
83a0b03523 | ||
|
|
25b9e290a5 | ||
|
|
ab860981d8 | ||
|
|
a0cae78ba3 | ||
|
|
488f40a20f | ||
|
|
05b018a837 | ||
|
|
fc37ffdfcf | ||
|
|
8c65f3c748 | ||
|
|
354106be7b | ||
|
|
9e4dfd8058 | ||
|
|
faf5f9e5a4 | ||
|
|
e4687e0f03 | ||
|
|
c5b17851e0 | ||
|
|
b238abac52 | ||
|
|
9012ff4db2 | ||
|
|
f2595af362 | ||
|
|
39fd1d6be1 | ||
|
|
f0ede64ded | ||
|
|
0a4185a919 | ||
|
|
056163ee57 | ||
|
|
797c5bbc13 | ||
|
|
97023b9a3c | ||
|
|
48a2186cf3 | ||
|
|
25cc6ad6ae | ||
|
|
b8238c2228 | ||
|
|
45c8476e67 | ||
|
|
21d4ce8d4b | ||
|
|
efb5fed462 | ||
|
|
167fea3f1e | ||
|
|
5107c6bbb9 | ||
|
|
e428130e4a | ||
|
|
5978031f7a | ||
|
|
0e94c7b5fc | ||
|
|
acf4df9f87 | ||
|
|
6b0d0d4dc8 | ||
|
|
1bed3c6056 | ||
|
|
d41963d5fa | ||
|
|
9e9142aa8e | ||
|
|
bdc4c38d04 | ||
|
|
967338193e | ||
|
|
d820239a7c | ||
|
|
2e457753d0 | ||
|
|
b33f4cca6b | ||
|
|
d95e3b5b54 | ||
|
|
198a0ecad6 | ||
|
|
3e19da1258 | ||
|
|
b083d3245d | ||
|
|
b9d7dd1514 | ||
|
|
f3faa47814 | ||
|
|
2d4e16d5e1 | ||
|
|
fadfea2046 | ||
|
|
6b19b78f87 | ||
|
|
f734bdb314 | ||
|
|
a17f752705 | ||
|
|
05321c9dce | ||
|
|
20cf4cce92 | ||
|
|
55fee0471f | ||
|
|
e2f760aef4 | ||
|
|
8b27f2c47b | ||
|
|
8985b7b01b | ||
|
|
1f40d72081 | ||
|
|
fdd7f8e5f9 | ||
|
|
2c4184b81e | ||
|
|
ccb700d00a | ||
|
|
78f5ff17e6 | ||
|
|
4c495ce1b0 | ||
|
|
c47f81a3e0 | ||
|
|
ffeb45eda3 | ||
|
|
0d91006e0b | ||
|
|
a38ddc395b | ||
|
|
7b05245286 | ||
|
|
03eb921ca6 | ||
|
|
47dc5ad220 | ||
|
|
b73fa47089 | ||
|
|
775a27eea0 | ||
|
|
b8b792e844 | ||
|
|
b1419e856c | ||
|
|
760e0a0816 | ||
|
|
ef35702c4b | ||
|
|
6d439f4f63 | ||
|
|
dedab125e1 | ||
|
|
6743636996 | ||
|
|
4bcfe72485 | ||
|
|
2c70059c2d | ||
|
|
5ca48de07f | ||
|
|
38f436aa6d | ||
|
|
ba8f83d09d | ||
|
|
fc429408a3 | ||
|
|
e44d70d6d3 | ||
|
|
fe96f6d783 | ||
|
|
c072c7d008 | ||
|
|
d938c2595e | ||
|
|
604217a957 | ||
|
|
207201d907 | ||
|
|
3ad7a2cfaa | ||
|
|
6c69e16f31 | ||
|
|
a771b2c6c1 | ||
|
|
99fe114502 | ||
|
|
ca6e9e5e34 | ||
|
|
ae1e030824 | ||
|
|
c65d6f7ff8 | ||
|
|
0e332c0c12 | ||
|
|
e70a6129cf | ||
|
|
809173c3f5 | ||
|
|
8d12f3fe6b | ||
|
|
6ed833368a | ||
|
|
04d3817863 | ||
|
|
9250376ec2 | ||
|
|
d8bafa9e1b | ||
|
|
1ce82372fd | ||
|
|
6d14f314d7 | ||
|
|
aaee6d2cbb | ||
|
|
155718aa69 | ||
|
|
2b4c0f5394 | ||
|
|
6c14dd4ba0 | ||
|
|
c25b47e3a5 | ||
|
|
9f20c027bf | ||
|
|
58341a9396 | ||
|
|
bdbf51b012 | ||
|
|
32f301d85e | ||
|
|
24008e8741 | ||
|
|
84afbf6b45 | ||
|
|
1ba917a3e2 | ||
|
|
d27a5b7f49 | ||
|
|
84f6694066 | ||
|
|
712b2db3ef | ||
|
|
b106a61352 | ||
|
|
874000624d | ||
|
|
6664eec8ce | ||
|
|
787c71a9de | ||
|
|
c4494d8c02 | ||
|
|
0b3aea92d1 | ||
|
|
58292d59bc | ||
|
|
d33e7f8ba9 | ||
|
|
22fb0f7bbf | ||
|
|
60264d65db | ||
|
|
bcdc8d46f5 | ||
|
|
0719780c91 | ||
|
|
fa357dd139 | ||
|
|
6c84035c7e | ||
|
|
345ff6f88d | ||
|
|
578087ec96 | ||
|
|
ad305fd019 | ||
|
|
a2dc4b1834 | ||
|
|
7a7a144690 | ||
|
|
0bd776dde5 | ||
|
|
25c6d019fe | ||
|
|
17fa807279 | ||
|
|
5c53907895 | ||
|
|
fb74e0615e | ||
|
|
2e29a681e7 | ||
|
|
37a62de458 | ||
|
|
e2809b582f | ||
|
|
a1d6087814 | ||
|
|
0b55de62dc | ||
|
|
86f2d0ada3 | ||
|
|
0866d23481 | ||
|
|
580481d4f7 | ||
|
|
10aaeac836 | ||
|
|
abdf449aa5 | ||
|
|
c8fde2d4a4 | ||
|
|
8e4d545994 | ||
|
|
df415d1ce6 | ||
|
|
4be8a563c7 | ||
|
|
a0432c7aa1 | ||
|
|
52b935de90 | ||
|
|
e39d56781f | ||
|
|
818186bddc | ||
|
|
5bf00cf247 | ||
|
|
e738a0d733 | ||
|
|
e9f2f9e5ed | ||
|
|
914981831a | ||
|
|
9c6fde9499 | ||
|
|
1b524bd5d4 | ||
|
|
4850dde27e | ||
|
|
f01679c4f6 | ||
|
|
a90e9e75c1 | ||
|
|
569cc10153 | ||
|
|
a329e75748 | ||
|
|
e01c082bb8 | ||
|
|
ec815f50bf | ||
|
|
179a62c530 | ||
|
|
1bd2a4b426 | ||
|
|
5666c09815 | ||
|
|
50789b2558 | ||
|
|
02c2f55b06 | ||
|
|
290073dd60 | ||
|
|
ec9eed6722 | ||
|
|
a662040043 | ||
|
|
15a5cc8b17 | ||
|
|
83c1a296da | ||
|
|
9137eb474c | ||
|
|
82fecfae1b | ||
|
|
7cd4666b8e | ||
|
|
0c351aeac0 | ||
|
|
434c6e17b3 | ||
|
|
a9fdf91fe5 | ||
|
|
33080833ef | ||
|
|
aef512e49e | ||
|
|
f7340e83cc | ||
|
|
e970fdff17 | ||
|
|
2e1afa0b97 | ||
|
|
095d3191b8 | ||
|
|
9c5fdb03b7 | ||
|
|
04f7ac880f | ||
|
|
7f44946746 | ||
|
|
9482187c20 | ||
|
|
1aa8d094dc | ||
|
|
d9ec0ac3ad | ||
|
|
78e92234d6 | ||
|
|
6a698749e4 | ||
|
|
bc61ea35b5 | ||
|
|
9021d8392d | ||
|
|
94d5d94547 | ||
|
|
688ba62db7 | ||
|
|
09ad60f98d | ||
|
|
2f187a853e | ||
|
|
c65b71d51d | ||
|
|
c3569d1842 | ||
|
|
d3c58081ec | ||
|
|
653fc5851d | ||
|
|
fc1d73ba60 | ||
|
|
d9fbd26b85 | ||
|
|
2bd05827f9 | ||
|
|
bbdb113f4e | ||
|
|
5ff3f2dbbb | ||
|
|
bd496066dc | ||
|
|
336e78eca0 | ||
|
|
4109dca3f7 | ||
|
|
22ba2459c9 | ||
|
|
f4985395a9 | ||
|
|
68edb184cd | ||
|
|
6c0575233f | ||
|
|
026b632f8f | ||
|
|
405adee71d | ||
|
|
11fbbc6df6 | ||
|
|
52106e8280 | ||
|
|
0f44c9db50 | ||
|
|
21b809794a | ||
|
|
6f66376bb8 | ||
|
|
6678f6bd72 | ||
|
|
5fd82c7f15 | ||
|
|
8f1b9fc670 | ||
|
|
0e408053e2 | ||
|
|
1a6ca503a5 | ||
|
|
ab2a618335 | ||
|
|
19175badee | ||
|
|
6c9152a95c | ||
|
|
3e366d8de0 | ||
|
|
ebf2ff3ceb | ||
|
|
586660d6e0 | ||
|
|
c481d51c59 | ||
|
|
7d89d90851 | ||
|
|
b3486c1696 | ||
|
|
1e66e01849 | ||
|
|
c4df3b7074 | ||
|
|
03acf625c5 | ||
|
|
e3f2eb4c1d | ||
|
|
f26702734e | ||
|
|
decc99da48 | ||
|
|
c50e441323 | ||
|
|
da2ccdfd2e | ||
|
|
1918601b9f | ||
|
|
5c9f3bee30 | ||
|
|
2efd082525 | ||
|
|
5336352ce9 | ||
|
|
f872586073 | ||
|
|
d36cf2b116 | ||
|
|
0c940535bf | ||
|
|
f11cc545f3 | ||
|
|
b6cfb67a28 | ||
|
|
af6c2b98e9 | ||
|
|
6eb8c0eb0c | ||
|
|
538786ba71 | ||
|
|
1ebaa6d03d | ||
|
|
b497c2186e | ||
|
|
4b9f0bc288 | ||
|
|
2db6ae3a09 | ||
|
|
8eccfd7faf | ||
|
|
6fb7ebc6c7 | ||
|
|
f9227b0797 | ||
|
|
8c8cd15280 | ||
|
|
3a7e83898f | ||
|
|
21c7cb51d6 | ||
|
|
91964ed256 | ||
|
|
149ab3c7b5 | ||
|
|
702cde7546 | ||
|
|
87cacacd72 | ||
|
|
d118edac20 | ||
|
|
add6c6aa7c | ||
|
|
4e5f4cc0de | ||
|
|
89d333f3bb | ||
|
|
ab362f96c3 | ||
|
|
90082b46ff | ||
|
|
4edc791633 | ||
|
|
30a2286aaa | ||
|
|
e289f8f8ab | ||
|
|
9823f9e795 | ||
|
|
7364f2ec2a | ||
|
|
8261c0c3a2 | ||
|
|
44d4800b2c | ||
|
|
0b6c409dc6 | ||
|
|
a44d169416 | ||
|
|
b4ee485906 | ||
|
|
16e266c65f | ||
|
|
eda21d5192 | ||
|
|
115f9f7f5e | ||
|
|
b0bf039a61 | ||
|
|
4ecf940674 | ||
|
|
0f54c65d5d | ||
|
|
415b4ceed1 | ||
|
|
e5e0c4bf9d | ||
|
|
03e56fece5 | ||
|
|
32300906c9 | ||
|
|
643846eb04 | ||
|
|
825c3adf62 | ||
|
|
0e51b12d61 | ||
|
|
82434a248c | ||
|
|
d29785e66d | ||
|
|
09f6a37292 | ||
|
|
ef52c2f73a | ||
|
|
fc22dd01c9 | ||
|
|
eab4275cd7 | ||
|
|
c38809a71a | ||
|
|
62a9510d61 | ||
|
|
de2473d076 | ||
|
|
d14f27ac7d | ||
|
|
3ffef50dfc | ||
|
|
b4dd0c6d94 | ||
|
|
fc8622689d | ||
|
|
6b5cef218f | ||
|
|
4acc2f5e15 | ||
|
|
9cabd16bc9 | ||
|
|
3dc73e67ab | ||
|
|
baafadac69 | ||
|
|
6cee22585e | ||
|
|
b77450fc3e | ||
|
|
dae6ee2c47 | ||
|
|
2187f66149 | ||
|
|
27ff99a9a5 | ||
|
|
952f181377 | ||
|
|
9219bfba0e | ||
|
|
17d93421e7 | ||
|
|
052802ff8d | ||
|
|
8ef4098a1f | ||
|
|
655bc8b08e | ||
|
|
7ddef39918 | ||
|
|
b4588f6425 | ||
|
|
4b1e8f6e8b | ||
|
|
2bbc3b9cbc | ||
|
|
9b3c6dec62 | ||
|
|
1559b5dfdd | ||
|
|
9ae284e885 | ||
|
|
2529753684 | ||
|
|
a07d36d769 | ||
|
|
01d4534726 | ||
|
|
77f88b81c3 | ||
|
|
e4547b0045 | ||
|
|
16073ababa | ||
|
|
7c2f1eb03e | ||
|
|
1a7a504e17 | ||
|
|
31490003d2 | ||
|
|
93149a34b7 | ||
|
|
3ccbcf9239 | ||
|
|
4a2b492e92 | ||
|
|
93edbc41d0 | ||
|
|
1e50f8584c | ||
|
|
2a3cd0dce4 | ||
|
|
9217262b39 | ||
|
|
0549472372 | ||
|
|
284ae8dcc6 | ||
|
|
3aa92c082a | ||
|
|
9be8412f23 | ||
|
|
abf435e843 | ||
|
|
b1ee7b63d2 | ||
|
|
dd9e53d450 | ||
|
|
a7f6cea57f | ||
|
|
64a3b89cf7 | ||
|
|
2a87c9f15f | ||
|
|
8a134630fc | ||
|
|
50a1c35429 | ||
|
|
db1692fbbe | ||
|
|
42d1fa8523 | ||
|
|
54c672b097 | ||
|
|
569a869e84 | ||
|
|
9e499ce782 | ||
|
|
064f364f65 | ||
|
|
e19b847553 | ||
|
|
5ca85a00df | ||
|
|
3f23ab6348 | ||
|
|
f62651ff3f | ||
|
|
9af173032c | ||
|
|
6683662bc0 | ||
|
|
191d45a755 | ||
|
|
e01fdec00e | ||
|
|
c4e612608c | ||
|
|
c2e7be60b0 | ||
|
|
e585ede4de | ||
|
|
2cf27de3d5 | ||
|
|
cafe910132 | ||
|
|
b5f7e0a463 | ||
|
|
66c4b54de2 | ||
|
|
83d4be7a33 | ||
|
|
0192c1e252 | ||
|
|
3ea3bcc5cf | ||
|
|
a2248ea22e | ||
|
|
a36d99226c | ||
|
|
8fc12b9d19 | ||
|
|
347aa095e5 | ||
|
|
f78edf0c11 | ||
|
|
a2fc295a20 | ||
|
|
516e39b211 | ||
|
|
4689a59e3c | ||
|
|
167c4f699d | ||
|
|
0438df97ba | ||
|
|
fa4c9a08b3 | ||
|
|
833a8cbb9f | ||
|
|
84f045c234 | ||
|
|
4118cbbe1d | ||
|
|
430b553264 | ||
|
|
a78689a865 | ||
|
|
f7129de8b8 | ||
|
|
ee026cd40d | ||
|
|
1dcd719d96 | ||
|
|
47fe55067e | ||
|
|
bf20ae0c09 | ||
|
|
d7c7ded784 | ||
|
|
b38712bc6d | ||
|
|
e8d16d6823 | ||
|
|
1a30d00194 | ||
|
|
39ababef89 | ||
|
|
a151a2efcb | ||
|
|
e9b64adae9 | ||
|
|
8dbcdc28ed | ||
|
|
d617c3fa2f | ||
|
|
21a014790f | ||
|
|
344ef3bf8b | ||
|
|
4fd3df9b87 | ||
|
|
e0a9341fc0 | ||
|
|
cd0df40a4f | ||
|
|
7a7e1578d3 | ||
|
|
c8fbb5153a | ||
|
|
07e8603345 | ||
|
|
62df429ffa | ||
|
|
b1f1f7aa05 | ||
|
|
d173dd772d | ||
|
|
bceb66f3b0 | ||
|
|
ae9fc68b37 | ||
|
|
18352d834e | ||
|
|
6a05e11239 | ||
|
|
fc9689b000 | ||
|
|
3cc8b2abf7 | ||
|
|
7e7365eac2 | ||
|
|
129483b21c | ||
|
|
03992b3232 | ||
|
|
9dc059fa3a | ||
|
|
4a3f052b3a | ||
|
|
7efd62233e | ||
|
|
11eeaeb4b9 | ||
|
|
8adfca5fb7 | ||
|
|
fac52a6bc6 | ||
|
|
fff7667c68 | ||
|
|
27e362c6ea | ||
|
|
b75b41250b | ||
|
|
24cd8e3eeb | ||
|
|
69094139bf | ||
|
|
70f617ddd6 | ||
|
|
2660e5ec8c | ||
|
|
12d959f780 | ||
|
|
57751a7780 | ||
|
|
8cf246bb9e | ||
|
|
2fc7c62a36 | ||
|
|
53c2b26dde | ||
|
|
648fac7e8a | ||
|
|
eb7eb37e65 | ||
|
|
0bd5d4420e | ||
|
|
799ee19334 | ||
|
|
10aececc6a | ||
|
|
7f3ca0b76a | ||
|
|
ce1353bdc5 | ||
|
|
cba90e20e9 | ||
|
|
1eadc64dc0 | ||
|
|
3ae376c584 | ||
|
|
0f555693f7 | ||
|
|
ec8d42a477 | ||
|
|
abb77fc5b1 | ||
|
|
aa9e219fe3 | ||
|
|
374b02845a | ||
|
|
265255120b | ||
|
|
a558c9fd04 | ||
|
|
0278873fb5 | ||
|
|
f01588f927 | ||
|
|
e75073b495 | ||
|
|
eb57d1f405 | ||
|
|
0c04bf56df | ||
|
|
65a7e2b257 | ||
|
|
546e08a5cf | ||
|
|
3d312fd405 | ||
|
|
5a3851fd66 | ||
|
|
f2446e4a66 | ||
|
|
540178d079 | ||
|
|
ed65f75b68 | ||
|
|
839fde4d7f | ||
|
|
d656e3d9e9 | ||
|
|
9c358e4a14 | ||
|
|
c0fda8d37d | ||
|
|
a81de65912 | ||
|
|
6aaf1b92f5 | ||
|
|
76e15ee1ba | ||
|
|
837b071525 | ||
|
|
454e761f98 | ||
|
|
9ea973e9e9 | ||
|
|
2f79caa6b9 | ||
|
|
04d7deeae3 | ||
|
|
521305f8e9 | ||
|
|
b30b61340f | ||
|
|
74ee69daf1 | ||
|
|
7896f7676a | ||
|
|
c5cbea00ab | ||
|
|
6fdc56a610 | ||
|
|
0ceaa5b753 | ||
|
|
0c2d564c33 | ||
|
|
e6b21bdd57 | ||
|
|
93e3ec36ed | ||
|
|
38790a27ed | ||
|
|
bd93ec5cc0 | ||
|
|
766796ae1e | ||
|
|
704e332bee | ||
|
|
57bcbdf45c | ||
|
|
dbec110bac | ||
|
|
8245433d7f | ||
|
|
8972132eb0 | ||
|
|
0eb476c3fa | ||
|
|
fc915d0469 | ||
|
|
860f985e87 | ||
|
|
0856f68061 | ||
|
|
9b8094329d | ||
|
|
c77ade5b2f | ||
|
|
4841d31179 | ||
|
|
3bd8ae4843 | ||
|
|
f77d383a9f | ||
|
|
51ebe2407d | ||
|
|
a7e27d1a64 | ||
|
|
7e5bdac2a0 | ||
|
|
027054ae02 | ||
|
|
2d865cc9e6 | ||
|
|
f3328513f3 | ||
|
|
ad0c3ebf07 | ||
|
|
aae650fe3a | ||
|
|
b52aba4ef5 | ||
|
|
36e2dae6b0 | ||
|
|
12656646ae | ||
|
|
34352afd53 | ||
|
|
2b8d91fb1b | ||
|
|
e99e9b6181 | ||
|
|
0b709a4393 | ||
|
|
34f2229479 | ||
|
|
69de6c0467 | ||
|
|
683257b697 | ||
|
|
a00d880a3f | ||
|
|
63161b357b | ||
|
|
7003d21f09 | ||
|
|
6432445add | ||
|
|
7a33af387e | ||
|
|
27fff76ec3 | ||
|
|
a3fee97bcb | ||
|
|
d1313cd782 | ||
|
|
d742108967 | ||
|
|
338eb9c594 | ||
|
|
2c4c435072 | ||
|
|
5d1ef4d3d4 | ||
|
|
dfc17a067d | ||
|
|
fa1f077aaf | ||
|
|
ca1c5c0b40 | ||
|
|
b2d53d8d18 | ||
|
|
0f2126e6f0 | ||
|
|
a615eda205 | ||
|
|
abf88fe509 | ||
|
|
a7c11a994b | ||
|
|
1d80969b7f | ||
|
|
47eb5124fa | ||
|
|
c2c60969af | ||
|
|
9501c2c6b1 | ||
|
|
4212ea7327 | ||
|
|
8ae39a95db | ||
|
|
c6541ea128 | ||
|
|
9e353e09b5 | ||
|
|
bc10845cb5 | ||
|
|
f810264c65 | ||
|
|
5e6b348685 | ||
|
|
8ae9b75150 | ||
|
|
35efc897df | ||
|
|
bd1de98c84 | ||
|
|
1bebb2f5c0 | ||
|
|
d7e4c6ed00 | ||
|
|
e9240d0dd7 | ||
|
|
bcb24c1a58 | ||
|
|
bef8203da2 | ||
|
|
3b7d83a1a6 | ||
|
|
a55ed27679 | ||
|
|
bc4257dc1e | ||
|
|
73ef89e03a | ||
|
|
828027f236 | ||
|
|
f04ef7c20d | ||
|
|
4f2699f86d | ||
|
|
9823768e95 | ||
|
|
eb213be9d9 | ||
|
|
7f89b8aae8 | ||
|
|
f97fc0dd3d | ||
|
|
1bd85cbc09 | ||
|
|
45c15fc8c6 | ||
|
|
da6311fb1e | ||
|
|
6bd3136fe0 | ||
|
|
f227d5adb1 | ||
|
|
f81b466c9a | ||
|
|
72c54be164 | ||
|
|
0d5c2a98c0 | ||
|
|
a41b5ea4a9 | ||
|
|
c75214acb8 | ||
|
|
de7bc83076 | ||
|
|
265abea0f6 | ||
|
|
4653ac32e0 | ||
|
|
2caf1e112e | ||
|
|
d2e56c568f | ||
|
|
507d8994e6 | ||
|
|
af339a119f | ||
|
|
8db4debcc4 | ||
|
|
e37d538ca2 | ||
|
|
c63c514feb | ||
|
|
c7a9ac3bf7 | ||
|
|
3374fd1818 | ||
|
|
48f0775a1a | ||
|
|
d8d7fc4858 | ||
|
|
18c70bf3f7 | ||
|
|
f2e4bf208b | ||
|
|
7351b8fee6 | ||
|
|
11cb40af88 | ||
|
|
5ae18dcd00 | ||
|
|
949ab477a8 | ||
|
|
d39bec8163 | ||
|
|
2a036894b6 | ||
|
|
d46c5b10f1 | ||
|
|
3291a921ad | ||
|
|
563df6ca3a | ||
|
|
a30cbcc2ce | ||
|
|
163ab75379 | ||
|
|
e2e74a6a5b | ||
|
|
846cfc005d | ||
|
|
80487bc4b1 | ||
|
|
75116ff6b9 | ||
|
|
a29fe9ff66 | ||
|
|
eee93fc581 | ||
|
|
3bac20a8d6 | ||
|
|
1403578911 | ||
|
|
aba155de7f | ||
|
|
2be14cab3e | ||
|
|
062d286c23 | ||
|
|
ef8688b1a4 | ||
|
|
6682653ddd | ||
|
|
b4db69d8ba | ||
|
|
82f15f9b0e | ||
|
|
7ec92d8c06 | ||
|
|
1f1abcdc57 | ||
|
|
87dcc6933c | ||
|
|
501d0f77af | ||
|
|
8252a2fa8f | ||
|
|
3da29eae45 | ||
|
|
f08075c22b | ||
|
|
f864eb39df | ||
|
|
0706ba502c | ||
|
|
68563f34af | ||
|
|
c663105fcb | ||
|
|
a250cbaf12 | ||
|
|
1699240936 | ||
|
|
019b907985 | ||
|
|
26cf7c2e3f | ||
|
|
dbfe623be9 | ||
|
|
14e4bc2435 | ||
|
|
faf530a49a | ||
|
|
86c21f9ef4 | ||
|
|
a0fba5d1f1 | ||
|
|
369f3fc169 | ||
|
|
b1ae3f9032 | ||
|
|
59c3193e02 | ||
|
|
55e935431e | ||
|
|
d6abb27db6 | ||
|
|
5c6db98b4a | ||
|
|
a65b11aff2 | ||
|
|
5e27fef0e4 | ||
|
|
ca0496ce0a | ||
|
|
cd08eeb774 | ||
|
|
b195431357 | ||
|
|
bfaf56fee3 | ||
|
|
443fa1c832 | ||
|
|
fd008654be | ||
|
|
74bfef5f0b | ||
|
|
40b3be22dc | ||
|
|
92d3eaa706 | ||
|
|
416093ab69 | ||
|
|
62b19706cd | ||
|
|
2d74998483 | ||
|
|
24613a498b | ||
|
|
9bd7bc0db9 | ||
|
|
3848b32e1c | ||
|
|
fd13ba0a6a | ||
|
|
1bce889532 | ||
|
|
3811ac1323 | ||
|
|
af9008c81c | ||
|
|
c5028b3ce8 | ||
|
|
76c321d6b1 | ||
|
|
cb1e36d916 | ||
|
|
e71943f83b | ||
|
|
2640fc85cf | ||
|
|
4840dd25a3 | ||
|
|
91435136d5 | ||
|
|
cbdb6ac0bf | ||
|
|
d91236deda | ||
|
|
8c58df706a | ||
|
|
88a4d1a0dd | ||
|
|
1f367618ed | ||
|
|
0e804e27dd | ||
|
|
ab2a49c8d2 | ||
|
|
f3210fba96 | ||
|
|
a438619177 | ||
|
|
61653d122f | ||
|
|
959e1304d1 | ||
|
|
e374e51663 | ||
|
|
4f15b1c582 | ||
|
|
ece0f9189f | ||
|
|
f0087ab80a | ||
|
|
5360313271 | ||
|
|
ca65dfc4fa | ||
|
|
41f0b472c0 | ||
|
|
ee55b85945 | ||
|
|
a555e936c4 | ||
|
|
d2f64a1163 | ||
|
|
b04f4e0f55 | ||
|
|
c814fc4edd | ||
|
|
3329e6bc4a | ||
|
|
c739e049c3 | ||
|
|
37fbb52d19 | ||
|
|
5acb5ad9b7 | ||
|
|
d466689c94 | ||
|
|
747f27d26f | ||
|
|
f5f92cb67b | ||
|
|
39e4b7e03f | ||
|
|
10b7af08be | ||
|
|
5496a82043 | ||
|
|
0847505add | ||
|
|
122c996714 | ||
|
|
105b80101e | ||
|
|
ec03170e6e | ||
|
|
e4d84dad0a | ||
|
|
9c1b55b9fb | ||
|
|
52c0929e3b | ||
|
|
793ff1c163 | ||
|
|
6f289e6dfa | ||
|
|
1bcd402e1c | ||
|
|
3109fa583b | ||
|
|
4360527b43 | ||
|
|
e0aa11f4d7 | ||
|
|
a14aadd914 | ||
|
|
8f8b7d9ecd | ||
|
|
b32125d0ad | ||
|
|
c94835432e | ||
|
|
5f4454737d | ||
|
|
dc4ca47bc7 | ||
|
|
5df70eabd8 | ||
|
|
2d64647543 | ||
|
|
193278fb53 | ||
|
|
654f3dff06 | ||
|
|
1b9ec31d84 | ||
|
|
efe1b3e0eb | ||
|
|
d056793309 | ||
|
|
79f96c8f91 | ||
|
|
2605cd04d4 | ||
|
|
6365071bb4 | ||
|
|
389131f2ab | ||
|
|
cf630e4f2c | ||
|
|
7203627483 | ||
|
|
66d010a458 | ||
|
|
577ea41143 | ||
|
|
08b0d34a4e | ||
|
|
6aa9bd3f7b | ||
|
|
8ee59101db | ||
|
|
bf21938bcc | ||
|
|
1a2cd8e057 | ||
|
|
a492573970 | ||
|
|
74e61b08b8 | ||
|
|
dad3deccd3 | ||
|
|
600d95aa41 | ||
|
|
dc03a32e9c | ||
|
|
c136fe226c | ||
|
|
e808c027a9 | ||
|
|
7ac647f3b6 | ||
|
|
77d0516674 | ||
|
|
3b909d2fd2 | ||
|
|
3d4307a848 | ||
|
|
4a8da53d85 | ||
|
|
bec207568a | ||
|
|
075529ddc9 | ||
|
|
cc65efa86c | ||
|
|
f34486a15a | ||
|
|
470cfa6c4e | ||
|
|
a6446ce43e | ||
|
|
77ae0f490d | ||
|
|
72cbfd4d96 | ||
|
|
41a4ff828c | ||
|
|
da8b9d58ac | ||
|
|
ffa76c3a19 | ||
|
|
9463fbbe2b | ||
|
|
9bd06cefdd | ||
|
|
a09d2a581f | ||
|
|
4c0349a6a8 | ||
|
|
9d1b235472 | ||
|
|
20b40061e2 | ||
|
|
98b32c6276 | ||
|
|
8f8e157abe | ||
|
|
fa9fc18e22 | ||
|
|
ecc8d9430c | ||
|
|
078559182a | ||
|
|
45819e68d0 | ||
|
|
8f41dbe27d | ||
|
|
bb627442d4 | ||
|
|
5d00457d79 | ||
|
|
18333fbc7c | ||
|
|
a0e383f4d9 | ||
|
|
6fb7baf789 | ||
|
|
ca644f8be0 | ||
|
|
ea1e381003 | ||
|
|
4fd1d4b426 | ||
|
|
6a776d10b7 | ||
|
|
ce3ab44609 | ||
|
|
6705a6be08 | ||
|
|
06ee9eb744 | ||
|
|
4acccc0b8d | ||
|
|
135bf18d65 | ||
|
|
945400ab4e | ||
|
|
6cc148d2ad | ||
|
|
77f3940e22 | ||
|
|
7542cdffb5 | ||
|
|
db4ac8d1d8 | ||
|
|
18e576cb53 | ||
|
|
f67a352937 | ||
|
|
f4e7b1c61c | ||
|
|
295787c948 | ||
|
|
447f9963fb | ||
|
|
e6206c0ed6 | ||
|
|
2dd0a61a6e | ||
|
|
3f1df1684a | ||
|
|
7373933a18 | ||
|
|
8c49b84faa | ||
|
|
ec7d3e73d7 | ||
|
|
58a183544a | ||
|
|
d0db337af8 | ||
|
|
01f68601d3 | ||
|
|
de527d3fdf | ||
|
|
c773815c70 | ||
|
|
8d29f97f46 | ||
|
|
c14762a495 | ||
|
|
c1494ba1ef | ||
|
|
7720f6af24 | ||
|
|
3f8088b12d | ||
|
|
1936eaa425 | ||
|
|
f66c8b6f2f | ||
|
|
a9c4e6daa8 | ||
|
|
94c511d0e0 | ||
|
|
853add7e86 | ||
|
|
4de327e0e3 | ||
|
|
88f0ccfd7e | ||
|
|
040c6bcd8c | ||
|
|
13c8d81f15 | ||
|
|
5c0ddd3a81 | ||
|
|
12f3a321b7 | ||
|
|
618e7606ef | ||
|
|
a9ad805ba9 | ||
|
|
3f83e20387 | ||
|
|
186508e75c | ||
|
|
62efc6b07e | ||
|
|
22ea449850 | ||
|
|
ff4c76ba00 | ||
|
|
1a471b73cd | ||
|
|
7901c750b6 | ||
|
|
a0512254ca | ||
|
|
fe96664afb | ||
|
|
d4222519eb | ||
|
|
88f0b04015 | ||
|
|
cfc6180233 | ||
|
|
311f69b7cf | ||
|
|
fc193568b9 | ||
|
|
afe77bbc4f | ||
|
|
50ef7b31eb | ||
|
|
39f8ae515b | ||
|
|
c72a35e92e | ||
|
|
7e65df3f39 | ||
|
|
4d629960bb | ||
|
|
9c4617eefa | ||
|
|
b952d0d2e0 | ||
|
|
55bcb99e91 | ||
|
|
6dcee70eab | ||
|
|
8fdccfa05a | ||
|
|
4f002d66be | ||
|
|
93be3f54e3 | ||
|
|
309a6af359 | ||
|
|
585ba1a1fd | ||
|
|
c707cec362 | ||
|
|
edcd103958 | ||
|
|
8897e47691 | ||
|
|
377d0af228 | ||
|
|
99035103e0 | ||
|
|
525571c32e | ||
|
|
80682b41cb | ||
|
|
a37b486227 | ||
|
|
0ca003d858 | ||
|
|
f130aa7972 | ||
|
|
5afab461ee | ||
|
|
2098e192da | ||
|
|
cc7476656f | ||
|
|
fa265fdf25 | ||
|
|
08db74b8ee | ||
|
|
aa1a65c59c | ||
|
|
ccd0eb800b | ||
|
|
360ce60b83 | ||
|
|
172d256e15 | ||
|
|
2c187b66b7 | ||
|
|
9a94ce31d8 | ||
|
|
c7f4bd265d | ||
|
|
de4839b050 | ||
|
|
50842af1e5 | ||
|
|
833a37e9a6 | ||
|
|
bf03dd8739 | ||
|
|
5814c5a365 | ||
|
|
b3d0cf9a22 | ||
|
|
0e069c2679 | ||
|
|
da9fd926c8 | ||
|
|
c09a0e7afa | ||
|
|
8923e79b29 | ||
|
|
93094c7223 | ||
|
|
9f92488443 | ||
|
|
3cbe5a84e4 | ||
|
|
1d735caf40 | ||
|
|
e446d723ee | ||
|
|
e90eb0fd61 | ||
|
|
79727bd075 | ||
|
|
2cf350b783 | ||
|
|
6b22abd526 | ||
|
|
869682a87d | ||
|
|
4e2d48b8bd | ||
|
|
7022eda1a3 | ||
|
|
af7c5e0291 | ||
|
|
c1abe0e85f | ||
|
|
060ba0721f | ||
|
|
8b3a915b2f | ||
|
|
7875cb67ec | ||
|
|
f54e334e0d | ||
|
|
e38008879a | ||
|
|
ac36f7bfc7 | ||
|
|
95f2621a3f | ||
|
|
6d5f93038b | ||
|
|
d8f1d34345 | ||
|
|
b6fd54f599 | ||
|
|
3ea9d6a70f | ||
|
|
b181733d59 | ||
|
|
d5a88d43a7 | ||
|
|
11920b8fe5 | ||
|
|
f4d319cee4 | ||
|
|
4463f75756 | ||
|
|
60ae12dfd5 | ||
|
|
11101286a3 | ||
|
|
6b921b5eda | ||
|
|
25ce1d6be0 | ||
|
|
f76d45cd9e | ||
|
|
4e43e4be78 | ||
|
|
ece9e85b41 | ||
|
|
d48eb99669 | ||
|
|
3e612e97de | ||
|
|
098bcb8b8d | ||
|
|
5039f25585 | ||
|
|
cb8cb5f7a3 | ||
|
|
255c6a8f3a | ||
|
|
b101fec16b | ||
|
|
454b78c1ac | ||
|
|
39c0a1c4f8 | ||
|
|
991e816ea2 | ||
|
|
cbcdcad43c | ||
|
|
b155508b12 | ||
|
|
7f5c50dfeb | ||
|
|
4245a6c4f0 | ||
|
|
5bfefd6a12 | ||
|
|
02bda90a29 | ||
|
|
2211efc800 | ||
|
|
7fda0c7884 | ||
|
|
aea21a9694 | ||
|
|
03abb32cca | ||
|
|
dd2078a6f5 | ||
|
|
295702867a | ||
|
|
688cd52be2 | ||
|
|
f26ae288d0 | ||
|
|
4d41584547 | ||
|
|
058e679931 | ||
|
|
a6541b60fc | ||
|
|
001b795ea0 | ||
|
|
b7f04b0663 | ||
|
|
80c35e7623 | ||
|
|
70625f85a3 | ||
|
|
7320f9d3f8 | ||
|
|
92db8eedd7 | ||
|
|
7bc7cd4671 | ||
|
|
aff6dc24b1 | ||
|
|
7d2e2344d1 | ||
|
|
ccd2dcf1f7 | ||
|
|
8958f58eca | ||
|
|
54f1098dcf | ||
|
|
169f7210c1 | ||
|
|
433526b732 | ||
|
|
71cb72b3a4 | ||
|
|
2edb59736c | ||
|
|
582570639f | ||
|
|
8f104918a8 | ||
|
|
cd589e9891 | ||
|
|
885f120aa4 | ||
|
|
bbb526e6f8 | ||
|
|
77eebbdcaa | ||
|
|
9bd13c5897 | ||
|
|
5074d696cf | ||
|
|
b4401cd409 | ||
|
|
a216197e34 | ||
|
|
43fba808a2 | ||
|
|
252d2fead1 | ||
|
|
43f6ff6d28 | ||
|
|
74c46bae7a | ||
|
|
19e4af4ce7 | ||
|
|
1a78150b60 | ||
|
|
d85b196952 | ||
|
|
89862289e2 | ||
|
|
5f95920d66 | ||
|
|
dce7636798 | ||
|
|
42bc3af109 | ||
|
|
b21d68a8ab | ||
|
|
f3a112fca3 | ||
|
|
5ac4b38bda | ||
|
|
a97e0dbe62 | ||
|
|
16efb96409 | ||
|
|
595a892f71 | ||
|
|
8e25cd2391 | ||
|
|
a1284336ba | ||
|
|
35e0184ca9 | ||
|
|
0ee7209c2b | ||
|
|
9e6ebf38c7 | ||
|
|
0782aad3bf | ||
|
|
840d6f6330 | ||
|
|
5a2833ffb8 | ||
|
|
e31bf9081f | ||
|
|
6342a77037 | ||
|
|
4bb86c0cb5 | ||
|
|
3bba27dd3c | ||
|
|
a7bb335752 | ||
|
|
01b2892775 | ||
|
|
876fe5a590 | ||
|
|
b58894ef33 | ||
|
|
30934f400a | ||
|
|
774ccc4ed2 | ||
|
|
3c35cab55e | ||
|
|
5e2e7a11c3 | ||
|
|
63b235e7ce | ||
|
|
839e045da4 | ||
|
|
144d159695 | ||
|
|
52c8b53122 | ||
|
|
d319473e3c | ||
|
|
ed172dec19 | ||
|
|
ab26c07296 | ||
|
|
9eb01d85a3 | ||
|
|
d44a4f591d | ||
|
|
bacd0e5e4e | ||
|
|
b2fc8f2a06 | ||
|
|
69dadee416 | ||
|
|
436823c2e7 | ||
|
|
314cce75b5 | ||
|
|
a6e195aa06 | ||
|
|
c23278eabd | ||
|
|
14ef3410eb | ||
|
|
87e972c138 | ||
|
|
cd30e18986 | ||
|
|
983130c9ac | ||
|
|
afce057b19 | ||
|
|
17246ba086 | ||
|
|
1b14d304d4 | ||
|
|
978a980d72 | ||
|
|
1281a264f5 | ||
|
|
d3067c83d3 | ||
|
|
cc4706ece0 | ||
|
|
255144bf78 | ||
|
|
90f70aca4d | ||
|
|
8502d9cbe3 | ||
|
|
049d6c7a35 | ||
|
|
c8add70676 | ||
|
|
421965c67d | ||
|
|
8080fb098e | ||
|
|
c7550ba845 | ||
|
|
ba46b25ac2 | ||
|
|
c0a8a91109 | ||
|
|
e5d2563a2e | ||
|
|
18e66de8d4 | ||
|
|
1d10919b91 | ||
|
|
897d492235 | ||
|
|
cc24dd50e5 | ||
|
|
bbed5c5749 | ||
|
|
b08a588c4f | ||
|
|
3e45f0f569 | ||
|
|
f95a285b59 | ||
|
|
39efed59af | ||
|
|
91572681a5 | ||
|
|
e44fe44605 | ||
|
|
d56b4d3368 | ||
|
|
ef2107d9c2 | ||
|
|
c73e90c4e6 | ||
|
|
bce4bd6755 | ||
|
|
cd4589d4d9 | ||
|
|
5f82c7533c | ||
|
|
a41cfedfe7 | ||
|
|
0268b42e20 | ||
|
|
45589e833d | ||
|
|
7e7830a1d7 | ||
|
|
8790733ab5 | ||
|
|
07fb0deb49 | ||
|
|
b290d02688 | ||
|
|
e1a5a2a481 | ||
|
|
f85dd1b09b | ||
|
|
19db097709 | ||
|
|
60264d68b5 | ||
|
|
70a05ec82a | ||
|
|
90e73b83a1 | ||
|
|
d6b0894c6b | ||
|
|
a6b791c4f0 | ||
|
|
e16e48f893 | ||
|
|
5726613dfb | ||
|
|
5ed6a08c22 | ||
|
|
e13f7ca757 | ||
|
|
bea78c50e6 | ||
|
|
6ee25868a8 | ||
|
|
bf476e9e05 | ||
|
|
732d16dcee | ||
|
|
17a2ec13dc | ||
|
|
0f96db5e90 | ||
|
|
bfd76c53d7 | ||
|
|
608372f4dd | ||
|
|
75f68a80f3 | ||
|
|
cedad3b976 | ||
|
|
993c7d53e8 | ||
|
|
3edd5613ec | ||
|
|
b44e9e41f4 | ||
|
|
1e141c9682 | ||
|
|
1cfc3f9d43 | ||
|
|
77806700ab | ||
|
|
6d7d5ae065 | ||
|
|
69fdaa45d4 | ||
|
|
b1dcd83a58 | ||
|
|
aef6b50b11 | ||
|
|
fc96309a73 | ||
|
|
94bdcbabfc | ||
|
|
b00be97030 | ||
|
|
97bd56b263 | ||
|
|
4c302e8f0b | ||
|
|
08e6254232 | ||
|
|
03a2882915 | ||
|
|
a5aa8446af | ||
|
|
fd109ba1f3 | ||
|
|
e626404f00 | ||
|
|
dd5788968b | ||
|
|
60ccf70ca2 | ||
|
|
a5a9142b20 | ||
|
|
f416e59ed7 | ||
|
|
86170b40df | ||
|
|
dcda11f3fc | ||
|
|
b505e358e5 | ||
|
|
b2324f2888 | ||
|
|
95bc1e7660 | ||
|
|
40ce4a1b6b | ||
|
|
52c01e072e | ||
|
|
85a7fb33e3 | ||
|
|
b6396462f2 | ||
|
|
0ebf2234da | ||
|
|
0c0e17c349 | ||
|
|
b512808653 | ||
|
|
1320ce66d4 | ||
|
|
0e977e6818 | ||
|
|
01796cb940 | ||
|
|
ba6454ff5d | ||
|
|
8ccd2fd367 | ||
|
|
67e2fcf9d6 | ||
|
|
76f2e12b97 | ||
|
|
0e8ecdb192 | ||
|
|
1e89772bb9 | ||
|
|
3cbc5bbae7 | ||
|
|
ca9dadb28f | ||
|
|
4b99cf279c | ||
|
|
0b3c12a681 | ||
|
|
fa888bfafa | ||
|
|
24a51a3179 | ||
|
|
54b2936320 | ||
|
|
1aeff329c2 | ||
|
|
eda0b605ea | ||
|
|
d901d01be8 | ||
|
|
5e4d6675de | ||
|
|
bc14028294 | ||
|
|
3ccffdab19 | ||
|
|
17f284a9ac | ||
|
|
04054bfde4 | ||
|
|
5df79d9233 | ||
|
|
54988f5c93 | ||
|
|
34c4418298 | ||
|
|
a933f117fb | ||
|
|
a8023c2598 | ||
|
|
27e9868aa5 | ||
|
|
2e62c517e2 | ||
|
|
4e499c5bac | ||
|
|
d3b4b50a5c | ||
|
|
d80053e8dc | ||
|
|
5cd1abab94 | ||
|
|
a7c37da713 | ||
|
|
ef2d64513b | ||
|
|
e5d30a9f6d | ||
|
|
4e7ea36490 | ||
|
|
121e79410b | ||
|
|
d16d8b70e3 | ||
|
|
751cde420b | ||
|
|
78c1460e29 | ||
|
|
6834e38505 | ||
|
|
2883ea0abb | ||
|
|
e56a2b1f41 | ||
|
|
c056780c64 | ||
|
|
ae1452c487 | ||
|
|
4328597114 | ||
|
|
6968b69cee | ||
|
|
650072eb99 | ||
|
|
9724f44105 | ||
|
|
e364804615 | ||
|
|
c45f675119 | ||
|
|
78cd591798 | ||
|
|
ea37eb95f9 | ||
|
|
d3fe982e27 | ||
|
|
b881189651 | ||
|
|
63213bc74d | ||
|
|
d851d9baa8 | ||
|
|
799ce6ad0b | ||
|
|
01e59672bd | ||
|
|
5fcc6578c5 | ||
|
|
2949683c23 | ||
|
|
e9cc9fdd8a | ||
|
|
4b3d11cff0 | ||
|
|
583eedc240 | ||
|
|
d234a12942 | ||
|
|
8d9f6a21b2 | ||
|
|
a15f391c96 | ||
|
|
668fc8c352 | ||
|
|
81bdcb9477 | ||
|
|
66719e4c78 | ||
|
|
c9b65353e7 | ||
|
|
33c18102de | ||
|
|
401231eab5 | ||
|
|
f41bf592b9 | ||
|
|
9c4aca255a | ||
|
|
965a6187ac | ||
|
|
c2dc4baa43 | ||
|
|
5b18765e31 | ||
|
|
9fc0e2d4d8 | ||
|
|
45c15e370f | ||
|
|
a1f7445f03 | ||
|
|
8489052358 | ||
|
|
cf2952f338 | ||
|
|
1277e368c1 | ||
|
|
c8c57059eb | ||
|
|
70941212e4 | ||
|
|
f816f33099 | ||
|
|
cbbc5be4ce | ||
|
|
dd7e231224 | ||
|
|
730bd35abd | ||
|
|
5eefa36acb | ||
|
|
922cf6e1bf | ||
|
|
0bb45be584 | ||
|
|
2dc1a0a9cd | ||
|
|
57907bcda1 | ||
|
|
994bdca948 | ||
|
|
e1d45645ae | ||
|
|
2c0c2e7663 | ||
|
|
a470a30311 | ||
|
|
613dd111f0 | ||
|
|
4ce962db74 | ||
|
|
11c1bc3aa9 | ||
|
|
009eeebb70 | ||
|
|
b9be0f7834 | ||
|
|
011be8f2ca | ||
|
|
f1133af05c | ||
|
|
bbfaf555b2 | ||
|
|
16be886ba0 | ||
|
|
e5cef441e2 | ||
|
|
de60cf8e56 | ||
|
|
c539866f7c | ||
|
|
fc44da3161 | ||
|
|
e598fe95b4 | ||
|
|
abefbddda2 | ||
|
|
59265a4d56 | ||
|
|
44b19c4b58 | ||
|
|
4c54b4637e | ||
|
|
bc211827ad | ||
|
|
99bbed24ef | ||
|
|
f3f7fdf381 | ||
|
|
9cebc85193 | ||
|
|
1b314a5782 | ||
|
|
c0f0a48540 | ||
|
|
1789821aa5 | ||
|
|
222c10e9ee | ||
|
|
1935d5504c | ||
|
|
dc6e1372dc | ||
|
|
7a8a6ff8f4 | ||
|
|
cd64b26545 | ||
|
|
5bbcf0d876 | ||
|
|
cd7c6139c1 | ||
|
|
aa26cad9e7 | ||
|
|
bf60feaa7e | ||
|
|
c9bde250b1 | ||
|
|
eaa4825e6b | ||
|
|
e69e683f70 | ||
|
|
b0bd42a57e | ||
|
|
1049a4d868 | ||
|
|
32c977f7f1 | ||
|
|
7966b81e96 | ||
|
|
7a817f4989 | ||
|
|
bda3b8b649 | ||
|
|
3b710b3c7c | ||
|
|
855c7fe8ea | ||
|
|
e3200d87ba | ||
|
|
d7b2a952da | ||
|
|
291f7f2b85 | ||
|
|
4c236b16e4 | ||
|
|
618d1975b2 | ||
|
|
0cac00d5f8 | ||
|
|
04e7ecb3e5 | ||
|
|
21773c3789 | ||
|
|
54861833b5 | ||
|
|
d54f7948f1 | ||
|
|
64aaf0640a | ||
|
|
6eeefbd800 | ||
|
|
abed1ae879 | ||
|
|
819764372e | ||
|
|
f1ce7f628e | ||
|
|
36955cd4e3 | ||
|
|
c8756022cf | ||
|
|
c8351ff054 | ||
|
|
69fd7d15dd | ||
|
|
805c07d578 | ||
|
|
4848a72a2b | ||
|
|
44436fe1a3 | ||
|
|
aec47db740 | ||
|
|
f1cfb14085 | ||
|
|
3dbb0a21f8 | ||
|
|
6206749875 | ||
|
|
de2507cc5e | ||
|
|
6c043d19c1 | ||
|
|
a9f16dd3b3 | ||
|
|
687cdcaadf | ||
|
|
0472e0305c | ||
|
|
41909f0de7 | ||
|
|
ecb9580a13 | ||
|
|
ff7667108f | ||
|
|
2df6c5e334 | ||
|
|
5d865a36d9 | ||
|
|
deb84cc804 | ||
|
|
5ae17d009b | ||
|
|
651e112e3d | ||
|
|
ae5799fc6a | ||
|
|
078db3105c | ||
|
|
ed03a32bc3 | ||
|
|
0dcdaaf641 | ||
|
|
ac692d50e6 | ||
|
|
1c862be18a | ||
|
|
43593d849d | ||
|
|
8950ab44be | ||
|
|
c4d08aefb9 | ||
|
|
517bdb88d0 | ||
|
|
56e4e83bb4 | ||
|
|
36d7a8c978 | ||
|
|
68e4de434a | ||
|
|
195842b81a | ||
|
|
2e26d6293b | ||
|
|
2e93ac0eea | ||
|
|
62395a429a | ||
|
|
eff9434930 | ||
|
|
bea7a3c626 | ||
|
|
3091f28f06 | ||
|
|
cfd86ada8a | ||
|
|
bc731e0125 | ||
|
|
cb0c145eba | ||
|
|
ddd038f14d | ||
|
|
edb50d8445 | ||
|
|
1c93114a99 | ||
|
|
595e04def1 | ||
|
|
f0f290b0ae | ||
|
|
7fd7ac5ce0 | ||
|
|
71d66d7a1e | ||
|
|
26df847b24 | ||
|
|
a41216a9dd | ||
|
|
3eeebdd948 | ||
|
|
76d103cb41 | ||
|
|
ddffc6317a | ||
|
|
fca97c9777 | ||
|
|
c36ec21420 | ||
|
|
33b3ea6e43 | ||
|
|
01a226cdda | ||
|
|
635a3c5c8e | ||
|
|
b6ad300eda | ||
|
|
b263cab511 | ||
|
|
45a6d02f05 | ||
|
|
b0862471ab | ||
|
|
e96d492fb1 | ||
|
|
7c49b0f29c | ||
|
|
afb59a0778 | ||
|
|
ba030eac1d | ||
|
|
96324b3502 | ||
|
|
5bfccd800e | ||
|
|
b486c621eb | ||
|
|
e4370652e9 | ||
|
|
8644dcc216 | ||
|
|
22f42cb704 | ||
|
|
16a1d884f1 | ||
|
|
a4ef53c55c | ||
|
|
d4b25eddab | ||
|
|
70d7ccf8f2 | ||
|
|
624497bd3b | ||
|
|
c66b903a13 | ||
|
|
7a933cacb7 | ||
|
|
87c6874a54 | ||
|
|
80a8b95b8c | ||
|
|
6ae60371f0 | ||
|
|
77a726dd79 | ||
|
|
71a56e9eca | ||
|
|
fba7bcee83 | ||
|
|
35f5e5e402 | ||
|
|
c587da5dd9 | ||
|
|
e92087bd80 | ||
|
|
bbcfa710b3 | ||
|
|
fdae35481c | ||
|
|
3f202d5e78 | ||
|
|
ea741a9e5e | ||
|
|
e1f82d1469 | ||
|
|
adbd6e1c7d | ||
|
|
0a484630bf | ||
|
|
33674b5575 | ||
|
|
69d452fa5b | ||
|
|
b73b7fa43d | ||
|
|
a25d537162 | ||
|
|
8c2cef6621 | ||
|
|
79f214079d | ||
|
|
42f10ea1cf | ||
|
|
abc80fe223 | ||
|
|
6715b462fd | ||
|
|
dab064f5a8 | ||
|
|
d2cc22c698 | ||
|
|
6fac2386c7 | ||
|
|
5a8b2658fa | ||
|
|
b367c3d125 | ||
|
|
0a5e08538d | ||
|
|
707c3203dd | ||
|
|
a929716456 | ||
|
|
82a81e5865 | ||
|
|
0e2c98ba64 | ||
|
|
d86ed40f83 | ||
|
|
78a620c1ec | ||
|
|
a8dd079d4c | ||
|
|
13c53b650d | ||
|
|
372c73fb33 | ||
|
|
3d02e7d609 | ||
|
|
80955f3a1d | ||
|
|
572541132b | ||
|
|
87b25cf870 | ||
|
|
48eeab6466 | ||
|
|
ca4a97c1ad | ||
|
|
e61b266938 | ||
|
|
6d62d97a2a | ||
|
|
194b71d94b | ||
|
|
54c423a590 | ||
|
|
28cb9dd823 | ||
|
|
fc32db80e5 | ||
|
|
61c3d423aa | ||
|
|
a0cd65da86 | ||
|
|
1e3234e5bc | ||
|
|
c0838772f3 | ||
|
|
cdb8df637f | ||
|
|
c055be1d64 | ||
|
|
59655a8d96 | ||
|
|
d3e86622b7 | ||
|
|
a107723456 | ||
|
|
2cd7b1e571 | ||
|
|
93f6c26e36 | ||
|
|
dc17ec6e2e | ||
|
|
2e418653ad | ||
|
|
6b59bf7877 | ||
|
|
3f8c4c6f9a | ||
|
|
7dfc1a80b2 | ||
|
|
3fb6b889e8 | ||
|
|
803e3f6ef9 | ||
|
|
c08165c66a | ||
|
|
62a8c7ae04 | ||
|
|
3d322b791f | ||
|
|
24e2fb2e35 | ||
|
|
6593b9cda7 | ||
|
|
883db39d5f | ||
|
|
05059e4c80 | ||
|
|
ca6bb9b9b6 | ||
|
|
97f1ae34c2 | ||
|
|
b3109037ff | ||
|
|
2f1eef53ff | ||
|
|
85369a117c | ||
|
|
09ce160dfb | ||
|
|
b312b1d481 | ||
|
|
2ca75159e3 | ||
|
|
9f8c93ebb0 | ||
|
|
0fb67241c5 | ||
|
|
33c900b73f | ||
|
|
ddfbd1b9f8 | ||
|
|
05ce744f8c | ||
|
|
f6e9565d1f | ||
|
|
16cbf5dc2a | ||
|
|
e2724c1108 | ||
|
|
c6e7d21364 | ||
|
|
621355884e | ||
|
|
249226c1e4 | ||
|
|
3c48c4a2d6 | ||
|
|
1f3db55ee8 | ||
|
|
69f43edfee | ||
|
|
7285f7dad3 | ||
|
|
e153ae23a3 | ||
|
|
2c07e8a4d9 | ||
|
|
c31bccc1da | ||
|
|
f4bc24aedf | ||
|
|
3be38790a0 | ||
|
|
1b618fd5d7 | ||
|
|
d7b6d1e49a | ||
|
|
5db1a93257 | ||
|
|
f871688a3f | ||
|
|
efe9278693 | ||
|
|
055bbc10d4 | ||
|
|
4bcb39dc14 | ||
|
|
2176e1179a | ||
|
|
bcd00b39b7 | ||
|
|
714f0fc676 | ||
|
|
0e369c045f | ||
|
|
172b22ab6a | ||
|
|
9dcbe072aa | ||
|
|
1df351e1ba | ||
|
|
b444cbf7ed | ||
|
|
368f57a8b8 | ||
|
|
b3846a7d17 | ||
|
|
2e7ce18fd2 | ||
|
|
a72b438fbc | ||
|
|
e8a1386718 | ||
|
|
9db62c7941 | ||
|
|
529955683f | ||
|
|
17ca1f0da4 | ||
|
|
3c5b7f0be4 | ||
|
|
7f55d8584e | ||
|
|
8c0a4e5379 | ||
|
|
3bab48a154 | ||
|
|
1db5af1367 | ||
|
|
57d7ad2df2 | ||
|
|
2b5ca2dbfb | ||
|
|
6e2d325994 | ||
|
|
34fe38627b | ||
|
|
3a0db45b3b | ||
|
|
f36468240b | ||
|
|
558480503b | ||
|
|
d6b0ba602a | ||
|
|
986f554982 | ||
|
|
88d6d8fa30 | ||
|
|
6d395049cd | ||
|
|
dd6e17b12f | ||
|
|
8a12be0a4c | ||
|
|
f56e76cd7b | ||
|
|
97ccaba45f | ||
|
|
a660619ea8 | ||
|
|
d8f02dc79f | ||
|
|
092afbce62 | ||
|
|
b0405ac8ee | ||
|
|
380e92596e | ||
|
|
2d400ed167 | ||
|
|
2a8cc4816e | ||
|
|
cef5e6535e | ||
|
|
f4658cfff3 | ||
|
|
b71b7d1f26 | ||
|
|
eb5976d56b | ||
|
|
d830a7ebcf | ||
|
|
2520ec6e08 | ||
|
|
4e64519a26 | ||
|
|
aa28507c0a | ||
|
|
076f242929 | ||
|
|
33d2ce66c2 | ||
|
|
5b49624e16 | ||
|
|
2f29d116d7 | ||
|
|
f06545f5ae | ||
|
|
af71138605 | ||
|
|
d24eec9cf3 | ||
|
|
baad116966 | ||
|
|
1436092a35 | ||
|
|
26d9f46da4 | ||
|
|
4e761b49f3 | ||
|
|
9ed77593b3 | ||
|
|
19ee68bcd9 | ||
|
|
ff62f60583 | ||
|
|
7e46fb9b07 | ||
|
|
bf9afa0327 | ||
|
|
7fefba77a8 | ||
|
|
e1cf065c87 | ||
|
|
d35ec18202 | ||
|
|
f80f3794e8 | ||
|
|
5b520eb5ae | ||
|
|
fb946e3d57 | ||
|
|
cf4aa4fe9c | ||
|
|
d65e7ab830 | ||
|
|
8cc3f258cc | ||
|
|
d505724e90 | ||
|
|
9bedb9fa08 | ||
|
|
831fe730f7 | ||
|
|
eaa7bd6974 | ||
|
|
43edec2c81 | ||
|
|
66cf84b007 | ||
|
|
4a464373cc | ||
|
|
e02f55a282 | ||
|
|
8200974fe1 | ||
|
|
bb6f5150e8 | ||
|
|
cadc8c5b61 | ||
|
|
3de18d7c59 | ||
|
|
9f1631719c | ||
|
|
3fe2246468 | ||
|
|
ebec2ac813 | ||
|
|
7f940a552b | ||
|
|
fde1dcc447 | ||
|
|
5b445c2433 | ||
|
|
d9a6b1e8fb | ||
|
|
079cb77c05 | ||
|
|
d3750754cc | ||
|
|
7acbdccef5 | ||
|
|
26a4b4d946 | ||
|
|
75aa8760d9 | ||
|
|
c6a1fc44a7 | ||
|
|
13ac319c0a | ||
|
|
75b80ffeb1 | ||
|
|
39be472f6c | ||
|
|
baec508552 | ||
|
|
67656d6398 | ||
|
|
3977a164d2 | ||
|
|
e3b42e8da7 | ||
|
|
c484bb81e9 | ||
|
|
6e4fa9743e | ||
|
|
038dfdd668 | ||
|
|
4dd75ca083 | ||
|
|
bed480925a | ||
|
|
d54bff62c1 | ||
|
|
a1a76ffa16 | ||
|
|
fb087a023f | ||
|
|
e4671145fc | ||
|
|
bd367d9a3d | ||
|
|
f28ae236e7 | ||
|
|
4943022ff3 | ||
|
|
b06ca33742 | ||
|
|
8b44ecb6cc | ||
|
|
3d13f9522e | ||
|
|
1d1728a800 | ||
|
|
5775a1f7ef | ||
|
|
08077026d8 | ||
|
|
e25dd96b55 | ||
|
|
28bfcbb60e | ||
|
|
3ce10dfa25 | ||
|
|
f9f3adcf9e | ||
|
|
5d575a3eed | ||
|
|
4b832764d6 | ||
|
|
6fa303509f | ||
|
|
a796f15cf3 | ||
|
|
c21b7bb13b | ||
|
|
6b9a75f786 | ||
|
|
5fde2d3a20 | ||
|
|
b650823842 | ||
|
|
62c52643b4 | ||
|
|
760b60b249 | ||
|
|
c13a46fd3b | ||
|
|
82ed4a136a | ||
|
|
277f3e4e4d | ||
|
|
281d8486df | ||
|
|
16053a3137 | ||
|
|
d7dd3b2f0b | ||
|
|
0575442201 | ||
|
|
662810525f | ||
|
|
f9bf3f855d | ||
|
|
297f41c8aa | ||
|
|
265f059ac0 | ||
|
|
bd54b53c92 | ||
|
|
91e21ae5a9 | ||
|
|
b9f58e0b55 | ||
|
|
8a7dd42c1b | ||
|
|
b6b7de3be5 | ||
|
|
fcdfa2ae2b | ||
|
|
8317058170 | ||
|
|
23d209f09c | ||
|
|
a006fe358b | ||
|
|
688abc724c | ||
|
|
8bc3710e23 | ||
|
|
b6a9d905f1 | ||
|
|
c59e5fb7d8 | ||
|
|
d978e801c4 | ||
|
|
65fdbaf27f | ||
|
|
76d0942616 | ||
|
|
499b09f206 | ||
|
|
dba4156637 | ||
|
|
35884fc31a | ||
|
|
39dc076ad4 | ||
|
|
d44df645cf | ||
|
|
d598bfd51e | ||
|
|
dc35dc3024 | ||
|
|
9f1a2870bf | ||
|
|
550d4c92d8 | ||
|
|
d88f845af2 | ||
|
|
79875195b3 | ||
|
|
006a555dd3 | ||
|
|
de45fcad37 | ||
|
|
683dd89bec | ||
|
|
dd1cad5784 | ||
|
|
a6b229f4cd | ||
|
|
0d7fbba134 | ||
|
|
22026ded57 | ||
|
|
092acab8ea | ||
|
|
0f010def5d | ||
|
|
47a347b6ba | ||
|
|
07f831878f | ||
|
|
3f6fabd9c4 | ||
|
|
219b90543e | ||
|
|
2ec3402660 | ||
|
|
e7f65664a3 | ||
|
|
af67027b12 | ||
|
|
8ac4cb4a89 | ||
|
|
28714d37f0 | ||
|
|
91a1c3dedd | ||
|
|
d38d3920d7 | ||
|
|
28f7a01797 | ||
|
|
ee6899c68a | ||
|
|
670394bf7d | ||
|
|
ed6227ef5d | ||
|
|
a302293fa6 | ||
|
|
9a24bbf127 | ||
|
|
245e3e06d0 | ||
|
|
0072993a1b | ||
|
|
375670adf0 | ||
|
|
4af3cc0e07 | ||
|
|
c3c1934d7b | ||
|
|
4bda2cf107 | ||
|
|
f448a35957 | ||
|
|
d27d17e51b | ||
|
|
0da8a2bd99 | ||
|
|
1129e6b426 | ||
|
|
c8c55c1297 | ||
|
|
c50767e18b | ||
|
|
dc71939e9f | ||
|
|
ea35d8b77c | ||
|
|
2562a35e14 | ||
|
|
ef57b2acdb | ||
|
|
572cbf60c8 | ||
|
|
040173afb8 | ||
|
|
2343ef99ed | ||
|
|
cdb25404be | ||
|
|
4c80029a13 | ||
|
|
16e5a7f740 | ||
|
|
477f1fc038 | ||
|
|
421188c466 | ||
|
|
f9c1f080c8 | ||
|
|
fb173a143c | ||
|
|
3a0cbe67cc | ||
|
|
786a223b71 | ||
|
|
5b76379846 | ||
|
|
ec835af308 | ||
|
|
a8471387bf | ||
|
|
2c70016dc6 | ||
|
|
5f82c18d5a | ||
|
|
f92b645aa4 | ||
|
|
afca61eac6 | ||
|
|
9d7063f37d | ||
|
|
1c57e4a82a | ||
|
|
7ceea969d2 | ||
|
|
0ae2fb49d4 | ||
|
|
a4d19cf151 | ||
|
|
c52942ce23 | ||
|
|
f8d80265b1 | ||
|
|
85be563c43 | ||
|
|
689ce14a07 | ||
|
|
99d80bfbbe | ||
|
|
7f7bc0d2c5 | ||
|
|
0bb9b2d828 | ||
|
|
99fafaccca | ||
|
|
3ab57f13a9 | ||
|
|
e2610bbcba | ||
|
|
8bf2f3fe5d | ||
|
|
0a73e391d9 | ||
|
|
3fa1b6aedb | ||
|
|
74b7678477 | ||
|
|
f9c07a5f27 | ||
|
|
e4f8f89e72 | ||
|
|
ecf3b7dfdc | ||
|
|
3ee6450836 | ||
|
|
a51acfa482 | ||
|
|
064e543c77 | ||
|
|
e1c043975f | ||
|
|
2273396124 | ||
|
|
c6ad3a6a0f | ||
|
|
278b2810d9 | ||
|
|
6dd125870f | ||
|
|
f736c66fff | ||
|
|
b2955b35cb | ||
|
|
324e7f4357 | ||
|
|
50b0c18677 | ||
|
|
231aa5763c | ||
|
|
b7269f2eb8 | ||
|
|
518a5681ed | ||
|
|
d8d7fa662b | ||
|
|
c8ff1fd646 | ||
|
|
5634464cf3 | ||
|
|
7b8f72637d | ||
|
|
1b6dbe05d0 | ||
|
|
8eda98a88d | ||
|
|
fc0334c381 | ||
|
|
789e777434 | ||
|
|
2428cf3596 | ||
|
|
119fcacc84 | ||
|
|
7002523620 | ||
|
|
d9a06c36e6 | ||
|
|
cd9da9a470 | ||
|
|
447514e170 | ||
|
|
d73928d5c9 | ||
|
|
0ed5a8c625 | ||
|
|
58eb65290c | ||
|
|
64db78b51f | ||
|
|
0828d9bbdd | ||
|
|
e00ba3a210 | ||
|
|
702edeae3c | ||
|
|
70e4e75437 | ||
|
|
3efbd45f41 | ||
|
|
6dc713059c | ||
|
|
af7a456e00 | ||
|
|
1560892c58 | ||
|
|
ec2757f6d1 | ||
|
|
f426f30d59 | ||
|
|
79be5cd70f | ||
|
|
7013d99242 | ||
|
|
be7852a057 | ||
|
|
b8fb139641 | ||
|
|
2e76104d6a | ||
|
|
3ec09a3b69 | ||
|
|
cf036d356b | ||
|
|
f1f84c8130 | ||
|
|
25a90a72f7 | ||
|
|
d1fa5fa2e7 | ||
|
|
58b35ce4be | ||
|
|
a513b449f7 | ||
|
|
f26ccda097 | ||
|
|
1a61c66898 | ||
|
|
f74a960d05 | ||
|
|
ef58884a38 | ||
|
|
38e1419820 | ||
|
|
355a6c1d9d | ||
|
|
2cc5b3ced8 | ||
|
|
e127f459db | ||
|
|
ef5f52c4e6 | ||
|
|
1e7bf33fe6 | ||
|
|
c2269397f1 | ||
|
|
47c6062092 | ||
|
|
fb67c3aaf1 | ||
|
|
70628e9018 | ||
|
|
5211f2a589 | ||
|
|
a2380a7bdd | ||
|
|
cfbb11c218 | ||
|
|
57d44e62d2 | ||
|
|
ef66260365 | ||
|
|
de5c162f5a | ||
|
|
51c483938f | ||
|
|
2bb57b9800 | ||
|
|
d9fe391c4a | ||
|
|
22db949d90 | ||
|
|
7f8a3a737f | ||
|
|
1b20e45ec1 | ||
|
|
582c3e06a4 | ||
|
|
e53a459e76 | ||
|
|
b5d475e07c | ||
|
|
2acfe1b42a | ||
|
|
6afd962270 | ||
|
|
f7ea78b83d | ||
|
|
fef76b1240 | ||
|
|
534ff7d6e7 | ||
|
|
2a46abead9 | ||
|
|
5f12bfe9f9 | ||
|
|
8b3ddc6a37 | ||
|
|
e3f1e2184f | ||
|
|
7d60ce5f44 | ||
|
|
223938b48c | ||
|
|
7fccf6c4da | ||
|
|
b5388f874e | ||
|
|
1d2112657b | ||
|
|
e6b4d33c4f | ||
|
|
80701bb91b | ||
|
|
164bf6aa88 | ||
|
|
f1f4cac5de | ||
|
|
7ae56bb231 | ||
|
|
4f2719c910 | ||
|
|
4e07aeb94c | ||
|
|
ae2fbfdbbd | ||
|
|
8bbd7332bb | ||
|
|
c093f2ee6b | ||
|
|
c063f3504e | ||
|
|
210bd49f19 | ||
|
|
2a894f60b1 | ||
|
|
14e6d4968e | ||
|
|
305f3a6138 | ||
|
|
e89d073bfa | ||
|
|
6592e1aa06 | ||
|
|
c639254f7a | ||
|
|
fbc08a9828 | ||
|
|
cb46a7f8e6 | ||
|
|
9b0a76288b | ||
|
|
90fe636b29 | ||
|
|
143244c42b | ||
|
|
4f72049505 | ||
|
|
a3f73493e8 | ||
|
|
746befa2a0 | ||
|
|
e0a72b86c1 | ||
|
|
870988f0dd | ||
|
|
0cba9f198c | ||
|
|
e857d77dfb | ||
|
|
f41bb9655b | ||
|
|
0f624be366 | ||
|
|
d30f45b88e | ||
|
|
e615dda22c | ||
|
|
9326ef7826 | ||
|
|
fa8f010e80 | ||
|
|
db48e7849b | ||
|
|
f157f46a07 | ||
|
|
0d224ca25b | ||
|
|
5ff0146193 | ||
|
|
3c20191156 | ||
|
|
710ad448fe | ||
|
|
19848f362d | ||
|
|
aa37109707 | ||
|
|
5a7ad44221 | ||
|
|
c83eaa0077 | ||
|
|
e32713be68 | ||
|
|
f2c0f87cf8 | ||
|
|
ab3b8f228e | ||
|
|
5232522e47 | ||
|
|
53ec3337f3 | ||
|
|
7e40b57d2b | ||
|
|
6e3270ee05 | ||
|
|
abc328645e | ||
|
|
3e9de83cf4 | ||
|
|
f8feeac3cf | ||
|
|
67d8f5887b | ||
|
|
6330f46414 | ||
|
|
80f2b186ed | ||
|
|
530eb61f25 | ||
|
|
13d2dcbf5e | ||
|
|
5db931c094 | ||
|
|
023a49a7f7 | ||
|
|
fb13a83d15 | ||
|
|
921ee89272 | ||
|
|
e1532b1506 | ||
|
|
3bc626a530 | ||
|
|
99e35bf9ac | ||
|
|
d1ad240bd7 | ||
|
|
0314af87e8 | ||
|
|
c3a8965aed | ||
|
|
6c90793254 | ||
|
|
594d1b15f4 | ||
|
|
e9c99c4130 | ||
|
|
ec262f0667 | ||
|
|
1ad1d9f61b | ||
|
|
df86819f20 | ||
|
|
e7ca844488 | ||
|
|
745a4ee2be | ||
|
|
5f8c5d33c2 | ||
|
|
34814d837a | ||
|
|
c56fca0a4f | ||
|
|
e3c1404b02 | ||
|
|
298c72d3f8 | ||
|
|
26883ac9cf | ||
|
|
2d4e62d31d | ||
|
|
302b776f23 | ||
|
|
0e94198907 | ||
|
|
071c8a5f8e | ||
|
|
568eeff70a | ||
|
|
46a18e0c0a | ||
|
|
f97e45c063 | ||
|
|
ec2e2cccb6 | ||
|
|
8e438fcc32 | ||
|
|
f5f1f57a55 | ||
|
|
196c004181 | ||
|
|
b0540c0b83 | ||
|
|
b77be85ddd | ||
|
|
75f7a0d9f2 | ||
|
|
e89bdcca9e | ||
|
|
85f4adff24 | ||
|
|
ace7533277 | ||
|
|
e9518de5ca | ||
|
|
b4caf57be1 | ||
|
|
16b08719e8 | ||
|
|
dde0fcf8e5 | ||
|
|
02dd294ea7 | ||
|
|
1f9c12bf7b | ||
|
|
1675861b8a | ||
|
|
f43dc51077 | ||
|
|
102c46f0d7 | ||
|
|
594cef137a | ||
|
|
777c91fa0d | ||
|
|
9c6c451d2a | ||
|
|
3433b10734 | ||
|
|
ca81051515 | ||
|
|
4919d9956c | ||
|
|
568c1c04dc | ||
|
|
d51d37343f | ||
|
|
20c87fbc26 | ||
|
|
143db2540c | ||
|
|
49ccf73923 | ||
|
|
59f015ab93 | ||
|
|
8fa67ea466 | ||
|
|
8a51117f90 | ||
|
|
ccc903badd | ||
|
|
e3562a4b66 | ||
|
|
4629b601bd | ||
|
|
7955947ef6 | ||
|
|
021961cc03 | ||
|
|
caab1f279a | ||
|
|
3ee53f4e5d | ||
|
|
70b4707e57 | ||
|
|
3922440f25 | ||
|
|
987346a8f0 | ||
|
|
5a029a7123 | ||
|
|
7caeb5d6c1 | ||
|
|
5a9428426a | ||
|
|
819bd5059e | ||
|
|
91a086c02c | ||
|
|
78e80fc33e | ||
|
|
532f8dc191 | ||
|
|
c74e702338 | ||
|
|
4af86d2d07 | ||
|
|
b5164f4386 | ||
|
|
ef73ba225d | ||
|
|
08856ef252 | ||
|
|
3ce1d2067e | ||
|
|
6ce5bae042 | ||
|
|
af5b15add5 | ||
|
|
cc486af467 | ||
|
|
ea8bc06230 | ||
|
|
a04b4b1f89 | ||
|
|
0044f088ea | ||
|
|
f6b160d322 | ||
|
|
aa505807ae | ||
|
|
6f4129caae | ||
|
|
f5c359059c | ||
|
|
e9087816c3 | ||
|
|
d4f6e444b9 | ||
|
|
ae5f259ccf | ||
|
|
327d79f978 | ||
|
|
56cf3e8206 | ||
|
|
b76eccbb70 | ||
|
|
abd91eba30 | ||
|
|
884152ebfe | ||
|
|
26e9729b71 | ||
|
|
44761c9e11 | ||
|
|
c9ba9a560f | ||
|
|
f815fb8af3 | ||
|
|
90cef67f83 | ||
|
|
af076b6858 | ||
|
|
2dcfb70de6 | ||
|
|
521459be69 | ||
|
|
c4305f91cd | ||
|
|
c06c43edb9 | ||
|
|
32734ed1c3 | ||
|
|
d682961954 | ||
|
|
ccbf32b58f | ||
|
|
87753dbeb5 | ||
|
|
6e5935a932 | ||
|
|
2bf3ed16fc | ||
|
|
d73399cdee | ||
|
|
6ee998dfcd | ||
|
|
ffe0b36d29 | ||
|
|
edf1f84344 | ||
|
|
7b81f35383 | ||
|
|
f41533ce62 | ||
|
|
8130e47c76 | ||
|
|
3d5478b9d7 | ||
|
|
5525c23fee | ||
|
|
0744784803 | ||
|
|
9407ccb507 | ||
|
|
6f7e12472f | ||
|
|
1fd5162c0d | ||
|
|
d1bd383cec | ||
|
|
eeb68858d7 | ||
|
|
828512f7ca | ||
|
|
13a4dcef1b | ||
|
|
472475e5fe | ||
|
|
e460344e05 | ||
|
|
9fd59a50f8 | ||
|
|
f72e1c00d9 | ||
|
|
873630fc74 | ||
|
|
3992f0865b | ||
|
|
3a2d08fb41 | ||
|
|
a593c32727 | ||
|
|
0fec6a90bc | ||
|
|
d659ef4670 | ||
|
|
7624fcee50 | ||
|
|
cebedaa9ec | ||
|
|
bab6fccdec | ||
|
|
6d3b07b188 | ||
|
|
3ac26d96d7 | ||
|
|
7cd407b7b4 | ||
|
|
df6b5e761d | ||
|
|
46421b6d58 | ||
|
|
60ec91665d | ||
|
|
27f9ee0fa4 | ||
|
|
e73905d75b | ||
|
|
f4225f63bf | ||
|
|
9add0a6b87 | ||
|
|
c70821bccb | ||
|
|
3d278594ec | ||
|
|
44fb3ab616 | ||
|
|
0474f1ed6f | ||
|
|
79d3541034 | ||
|
|
f8a01ef70a | ||
|
|
fc6255296a | ||
|
|
c8914ebb66 | ||
|
|
d830194d29 | ||
|
|
fcaca0daf7 | ||
|
|
4c6d04455e | ||
|
|
ad74a70721 | ||
|
|
8fb5ead57e | ||
|
|
b345ef0c5e | ||
|
|
f195840d35 | ||
|
|
b2507cd556 | ||
|
|
9a86050609 | ||
|
|
7c9169174d | ||
|
|
1e87c7e74d | ||
|
|
857761845c | ||
|
|
da829f23dd | ||
|
|
6f3fd2a578 | ||
|
|
74e33c0bec | ||
|
|
a4ab5f51c9 | ||
|
|
f799503105 | ||
|
|
4425ed6655 | ||
|
|
2cd4ffcd90 | ||
|
|
f0615f77ec | ||
|
|
ad18f77e25 | ||
|
|
d96aedd521 | ||
|
|
b14247b351 | ||
|
|
4ac4213d12 | ||
|
|
c28beda47c | ||
|
|
c8a1693d47 | ||
|
|
3eb614dea9 | ||
|
|
ce24857a74 | ||
|
|
e0db4e9638 | ||
|
|
2f9850370b | ||
|
|
63787f231e | ||
|
|
243b2f0769 | ||
|
|
5a6f663f06 | ||
|
|
32aab6efb2 | ||
|
|
46dce97c4e | ||
|
|
a14ddcd6af | ||
|
|
5477064f2d | ||
|
|
48383433b5 | ||
|
|
2a2b064cd9 | ||
|
|
f1f1f82340 | ||
|
|
a593b76c3e | ||
|
|
a2dc4693a3 | ||
|
|
8464decd6f | ||
|
|
ca9b313cab | ||
|
|
20f14e79e1 | ||
|
|
2bc3011900 | ||
|
|
1abbcdb208 | ||
|
|
ec2228e6e3 | ||
|
|
f9fea473f5 | ||
|
|
1a7079f6c5 | ||
|
|
b45fce862f | ||
|
|
9e063d260b | ||
|
|
2f50a01256 | ||
|
|
efdaafc33c | ||
|
|
5c1f52fcb7 | ||
|
|
719f894520 | ||
|
|
5503385bc1 | ||
|
|
e364b4bb96 | ||
|
|
a9e6efd24c | ||
|
|
9e77f1f8f3 | ||
|
|
b8a2267805 | ||
|
|
d70dbeb273 | ||
|
|
100d4f0d07 | ||
|
|
8ee7c63a2f | ||
|
|
a6a3995326 | ||
|
|
ff994a8fee | ||
|
|
f62b7918f3 | ||
|
|
f39d2caf78 | ||
|
|
ce88ae3ed1 | ||
|
|
3a32adbce5 | ||
|
|
3651d22147 | ||
|
|
09727467d4 | ||
|
|
a9e610a81c | ||
|
|
0a473b6e34 | ||
|
|
f7736408e2 | ||
|
|
3e2f4ea676 | ||
|
|
8909c17ede | ||
|
|
9d75712bae | ||
|
|
9595a3f227 | ||
|
|
31bfc0d453 | ||
|
|
c4bd135943 | ||
|
|
187979bffb | ||
|
|
05e7411ff3 | ||
|
|
f060d77e5f | ||
|
|
f8de706a15 | ||
|
|
8726cd3c0a | ||
|
|
8e0f56e17f | ||
|
|
9e2ff3e6d8 | ||
|
|
e561bac004 | ||
|
|
22e9e1a811 | ||
|
|
204a2536ed | ||
|
|
2ec306e850 | ||
|
|
db49e8de15 | ||
|
|
94ea25a755 | ||
|
|
470d7d57d8 | ||
|
|
9ffd688f60 | ||
|
|
b5c0321c4e | ||
|
|
e43588a80a | ||
|
|
853a88e2a7 | ||
|
|
14c49fa7ea | ||
|
|
1403d8d4db | ||
|
|
d04239a88a | ||
|
|
984bbb2721 | ||
|
|
4c41ce07b5 | ||
|
|
a3476a3cc1 | ||
|
|
dbabefbb94 | ||
|
|
4011cb228f | ||
|
|
3a07f3da39 | ||
|
|
dad68421a4 | ||
|
|
ca4221267d | ||
|
|
2910de60af | ||
|
|
4ef92eb59a | ||
|
|
28ea0e587f | ||
|
|
f7cfa607f0 | ||
|
|
11af229278 | ||
|
|
3cfbabadf6 | ||
|
|
b24c077b27 | ||
|
|
16f6a52e6c | ||
|
|
bd73ced7e8 | ||
|
|
ad00a0634e | ||
|
|
cd543efb4d | ||
|
|
9c4858b431 | ||
|
|
d54a4cec55 | ||
|
|
74b2f07dfe | ||
|
|
2d479d7413 | ||
|
|
2486f6331c | ||
|
|
6309bc9c3d | ||
|
|
74eba632ac | ||
|
|
38b5ee438f | ||
|
|
5a1769ac73 | ||
|
|
65f98641b0 | ||
|
|
50b063ace4 | ||
|
|
acd8641207 | ||
|
|
d93950e6d9 | ||
|
|
b7f1df3e1d | ||
|
|
3bd6f0d496 | ||
|
|
07dc40fe8a | ||
|
|
975f97e074 | ||
|
|
32d7b01000 | ||
|
|
f879d0bd7b | ||
|
|
19db3151dd | ||
|
|
f528b9a995 | ||
|
|
c1567c22f5 | ||
|
|
37330c031e | ||
|
|
095c32a342 | ||
|
|
651345482f | ||
|
|
0e9dfef283 | ||
|
|
cca7be9271 | ||
|
|
5f1865cbd9 | ||
|
|
fd25f94953 | ||
|
|
6b55c8728a | ||
|
|
c2f141d60e | ||
|
|
c785a8fc4f | ||
|
|
d6c117731e | ||
|
|
cfa42673d8 | ||
|
|
1fdca8cab3 | ||
|
|
8584a43785 | ||
|
|
5ca7d39612 | ||
|
|
598fdc5467 | ||
|
|
8f8fcf2ec2 | ||
|
|
e460d13430 | ||
|
|
a6c3730ac8 | ||
|
|
73e02cd99c | ||
|
|
d47874e01e | ||
|
|
10185ca02d | ||
|
|
eadfc296cd | ||
|
|
2ec73a342a | ||
|
|
019b0280a6 | ||
|
|
52b8d1af07 | ||
|
|
2c53435a11 | ||
|
|
6f08a0fa49 | ||
|
|
8709b16af4 | ||
|
|
9e0396dfcd | ||
|
|
68ff87d4d1 | ||
|
|
c9bf2ee48d | ||
|
|
e1a10f2ddc | ||
|
|
3b723bca16 | ||
|
|
2a735d7eaf | ||
|
|
bd7fcc06e6 | ||
|
|
b71aec463a | ||
|
|
2dcaa07470 | ||
|
|
45c9566298 | ||
|
|
b53ed6a190 | ||
|
|
c52a9553bb | ||
|
|
517750f4c3 | ||
|
|
b527116bac | ||
|
|
56556690ed | ||
|
|
b5092820d7 | ||
|
|
c4554225bd | ||
|
|
cdafee02fc | ||
|
|
2eb346e06e | ||
|
|
ba7f73f559 | ||
|
|
eb15330216 | ||
|
|
b21ead08ae | ||
|
|
ffc95f6db3 | ||
|
|
22a95cc346 | ||
|
|
a76b571959 | ||
|
|
cbec3d440b | ||
|
|
af62c335e4 | ||
|
|
72a28c74c7 | ||
|
|
1083957fc3 | ||
|
|
ecc386ec7b | ||
|
|
565180c928 | ||
|
|
2adcaea31b | ||
|
|
9c95e0ea9e | ||
|
|
915041761e | ||
|
|
623ae9aebe | ||
|
|
8ffec71197 | ||
|
|
f07e7b60d4 | ||
|
|
d284cd4295 | ||
|
|
866ea769f1 | ||
|
|
9b73366c1f | ||
|
|
fcd07c1c13 | ||
|
|
a5b56fdb3d | ||
|
|
54e55e31a2 | ||
|
|
c8d9b69ee2 | ||
|
|
831757c9c0 | ||
|
|
3a266ae030 | ||
|
|
05a737fee3 | ||
|
|
4aad8b4002 | ||
|
|
661efbdcca | ||
|
|
1e27bdb5b1 | ||
|
|
471f7e76ed | ||
|
|
0646913f0d | ||
|
|
83839ade7d | ||
|
|
54de90aa9f | ||
|
|
86f73dab68 | ||
|
|
e10ea033bd | ||
|
|
a52477e1ed | ||
|
|
1da11db3b3 | ||
|
|
47e4040528 | ||
|
|
57d5b5cb04 | ||
|
|
80bd0c4260 | ||
|
|
4675b41dd5 | ||
|
|
cbe6bbc4fc | ||
|
|
0650f3a655 | ||
|
|
3c2023ace1 | ||
|
|
f8ee2a5995 | ||
|
|
28fee2d373 | ||
|
|
88feef0f2a | ||
|
|
d3d16da884 | ||
|
|
4616925c7b | ||
|
|
bcfa30c9c6 | ||
|
|
cf7627007c | ||
|
|
2e56dbaeac | ||
|
|
4487f3e743 | ||
|
|
6098b70408 | ||
|
|
7827abc6f4 | ||
|
|
8090a03bbc | ||
|
|
45b89f8b02 | ||
|
|
cc9d7f3d3c | ||
|
|
5785a3309d | ||
|
|
31897e7892 | ||
|
|
270afd64b2 | ||
|
|
753f7a97d0 | ||
|
|
b3e49fe103 | ||
|
|
708bdf212a | ||
|
|
bf1e3d3fce | ||
|
|
9fc50c25ae | ||
|
|
f23631cef5 | ||
|
|
901d050643 | ||
|
|
b9b354230c | ||
|
|
3c757d6897 | ||
|
|
375bd70248 | ||
|
|
71e0c598d6 | ||
|
|
0e6be16d07 | ||
|
|
eb57b15380 | ||
|
|
5df710fd35 | ||
|
|
f87b01620d | ||
|
|
fe4bdd8f97 | ||
|
|
66d1fec07e | ||
|
|
ce86a5e697 | ||
|
|
10c1803caa | ||
|
|
1607b74126 | ||
|
|
7dffd1a4b7 | ||
|
|
01b118e590 | ||
|
|
b778af156b | ||
|
|
6d806a7096 | ||
|
|
80506e9a3b | ||
|
|
6b7e2da1df | ||
|
|
63f9fbe202 | ||
|
|
2cb0c17808 | ||
|
|
e5a6cb0a97 | ||
|
|
8e98905baf | ||
|
|
63ceaee955 | ||
|
|
74e5a8c669 | ||
|
|
db3ab26af0 | ||
|
|
fd4f405b1f | ||
|
|
3beea19496 | ||
|
|
d9087c6b93 | ||
|
|
a1e02f243c | ||
|
|
8ef61fb6df | ||
|
|
1546dab363 | ||
|
|
158789b40a | ||
|
|
cd98518ccd | ||
|
|
0cf47c7ea0 | ||
|
|
3133a4530e | ||
|
|
5e3bbb946f | ||
|
|
a36f5ffadc | ||
|
|
bd665f12a2 | ||
|
|
c9499897fc | ||
|
|
c4ff643cb1 | ||
|
|
374c0c18d6 | ||
|
|
ac20cd3224 | ||
|
|
bd2eb4419a | ||
|
|
e2d5df156b | ||
|
|
9385153b92 | ||
|
|
84a8e9b40e | ||
|
|
b96d31f3a0 | ||
|
|
81c7325cb3 | ||
|
|
16ab201b31 | ||
|
|
c8698fc4df | ||
|
|
9c153163ae | ||
|
|
68d99200ac | ||
|
|
9ba6975f59 | ||
|
|
ef8cd430fb | ||
|
|
2aa88fd163 | ||
|
|
d47950c551 | ||
|
|
bf863f7be2 | ||
|
|
b82277515f | ||
|
|
ddb7efa593 | ||
|
|
0c5051d37b | ||
|
|
0aaab9c3dd | ||
|
|
62d56dcb6f | ||
|
|
4a5403653e | ||
|
|
a83a090c34 | ||
|
|
3edd916bc9 | ||
|
|
bb0c756d2c | ||
|
|
622c6f9217 | ||
|
|
f13cc06e3d | ||
|
|
208862096a | ||
|
|
49c1f2beda | ||
|
|
89a39690ea | ||
|
|
5e18e648f4 | ||
|
|
ac41b296d3 | ||
|
|
37b19b70df | ||
|
|
afa9bafce3 | ||
|
|
625a3383ec | ||
|
|
5c4aec6eae | ||
|
|
e0ea10ed53 | ||
|
|
82eb7dde49 | ||
|
|
879401a1c4 | ||
|
|
8030440fd5 | ||
|
|
9b5bbdc687 | ||
|
|
85370fe42a | ||
|
|
d186c3b395 | ||
|
|
70bdfb81c9 | ||
|
|
e90bc0f1d1 | ||
|
|
b683cbc8bf | ||
|
|
b69c786969 | ||
|
|
787c7c0b3a | ||
|
|
8a76eff4db | ||
|
|
596af8013e | ||
|
|
ef5b9d1e98 | ||
|
|
3f85bd7684 | ||
|
|
5b42947bbc | ||
|
|
2dde6d6b21 | ||
|
|
21d720b99f | ||
|
|
b04bfde009 | ||
|
|
de955402f0 | ||
|
|
6db0bc8ec6 | ||
|
|
8f8c62c1af | ||
|
|
cb16da1325 | ||
|
|
33f9ff86ee | ||
|
|
d5ebb45c7b | ||
|
|
419da57202 | ||
|
|
da91b53067 | ||
|
|
98e0f77fde | ||
|
|
9b701d6d39 | ||
|
|
3bcba0e7ab | ||
|
|
d385cc4941 | ||
|
|
4a861dfd94 | ||
|
|
1dd8129a5c | ||
|
|
fe7534f471 | ||
|
|
9931f0aad2 | ||
|
|
2b809bbc6b | ||
|
|
dc41172052 | ||
|
|
d9b3d7da37 | ||
|
|
a56dc479e5 | ||
|
|
c148140d75 | ||
|
|
71e7424baf | ||
|
|
b6d9c3ab03 | ||
|
|
bf8f586984 | ||
|
|
d1e5ab04a8 | ||
|
|
abed282db3 | ||
|
|
c4aebdada7 | ||
|
|
c232f42a98 | ||
|
|
09242faf94 | ||
|
|
a0028aeb69 | ||
|
|
06b2467db1 | ||
|
|
727cf3291d | ||
|
|
60d0f5edac | ||
|
|
7b4cd8c86a | ||
|
|
f7890ce933 | ||
|
|
56617462aa | ||
|
|
011d0637e9 | ||
|
|
8ebe4085e4 | ||
|
|
ba53475f43 | ||
|
|
18255634a2 | ||
|
|
9ab32233a2 | ||
|
|
f5c20cf30f | ||
|
|
af2be8b0b9 | ||
|
|
750b3db7bf | ||
|
|
9c1f304325 | ||
|
|
e120d29cf4 | ||
|
|
f888bd83b7 | ||
|
|
c4f64c8288 | ||
|
|
86ac54fb06 | ||
|
|
c33c2f17e0 | ||
|
|
5e0ae8c4b5 | ||
|
|
99426111dd | ||
|
|
5bf6a2ccbe | ||
|
|
7288d4ccc0 | ||
|
|
549d046dc2 | ||
|
|
bf32f67d4e | ||
|
|
1160de97c9 | ||
|
|
95bb44a84e | ||
|
|
8b59af3232 | ||
|
|
5f03c8292c | ||
|
|
48084c86ca | ||
|
|
68445ae577 | ||
|
|
d2f4353c34 | ||
|
|
640e2037c6 | ||
|
|
8949e80b01 | ||
|
|
ed2ba3fc89 | ||
|
|
25fa79a352 | ||
|
|
4f07194770 | ||
|
|
fb8e051ec1 | ||
|
|
d604c27578 | ||
|
|
a0092beff1 | ||
|
|
b13798c951 | ||
|
|
02f3203e03 | ||
|
|
6713a3729f | ||
|
|
698588b491 | ||
|
|
4fd47f1a86 | ||
|
|
a855c2be97 | ||
|
|
025a73feb2 | ||
|
|
292bb77bd4 | ||
|
|
2d2cdf277b | ||
|
|
2314c72bd9 | ||
|
|
6b426e29c5 | ||
|
|
9330b2efe1 | ||
|
|
cc5b62f304 | ||
|
|
8800543297 | ||
|
|
3878caf0b7 | ||
|
|
01148433bd | ||
|
|
6407e258b5 | ||
|
|
58e85adb1a | ||
|
|
c81bbaa452 | ||
|
|
95fd92a404 | ||
|
|
28c1ddeed8 | ||
|
|
c757bca4b1 | ||
|
|
4ada7d1784 | ||
|
|
9094137d25 | ||
|
|
f25a2feb8d | ||
|
|
8a2251627f | ||
|
|
f809ceb59c | ||
|
|
c3886539cd | ||
|
|
7135a06b5e | ||
|
|
b8c5c261b8 | ||
|
|
232b3a433a | ||
|
|
f13860ebb9 | ||
|
|
dbe30a407a | ||
|
|
bbb406c869 | ||
|
|
30be0dfd30 | ||
|
|
ca669fd31a | ||
|
|
e0bcde178e | ||
|
|
295473551f | ||
|
|
d19c4391d4 | ||
|
|
ffb43ac977 | ||
|
|
9a480ff627 | ||
|
|
03c8370ad5 | ||
|
|
669e66a1e7 | ||
|
|
811177099e | ||
|
|
12d126339e | ||
|
|
2c53530e99 | ||
|
|
e0d8e6b75f | ||
|
|
0cd0a71b63 | ||
|
|
3a9e6c1a79 | ||
|
|
ce33e238a9 | ||
|
|
160cec0adb | ||
|
|
4a747b738f | ||
|
|
56bda48c27 | ||
|
|
44fcf033ab | ||
|
|
2b3abeff4e | ||
|
|
8503e961d8 | ||
|
|
ee13f1ac87 | ||
|
|
76a5acb725 | ||
|
|
5fa07e5768 | ||
|
|
d9c9aebc18 | ||
|
|
b5989d0b18 | ||
|
|
a1444fe603 | ||
|
|
589b26cb18 | ||
|
|
73d179c75a | ||
|
|
b6fb3d6f21 | ||
|
|
7d70de376e | ||
|
|
3ae571ea35 | ||
|
|
d1be029da1 | ||
|
|
c98011901b | ||
|
|
956d038275 | ||
|
|
ce5830802a | ||
|
|
a03b22f6be | ||
|
|
4263d9cdda | ||
|
|
afa22d11d6 | ||
|
|
b5ec610f97 | ||
|
|
5746bfe806 | ||
|
|
9f6db44bbb | ||
|
|
6f7ba67368 | ||
|
|
38502c0218 | ||
|
|
4fabd6d21f | ||
|
|
8ff34d0cd7 | ||
|
|
1ebe617833 | ||
|
|
f46b0a0ffb | ||
|
|
c5ff923bbd | ||
|
|
40488944aa | ||
|
|
52ec72e110 | ||
|
|
5a3b4f3d1d | ||
|
|
2c74232400 | ||
|
|
c05b48f2eb | ||
|
|
05677e176b | ||
|
|
133b16bead | ||
|
|
fd02a74b46 | ||
|
|
98c3f6b781 | ||
|
|
db95d4cb84 | ||
|
|
08a1e22973 | ||
|
|
307644a8c5 | ||
|
|
ad05c9886d | ||
|
|
5f795e203a | ||
|
|
622e0a2d62 | ||
|
|
204af6597d | ||
|
|
71ff6f1b8c | ||
|
|
d14ccd71cd | ||
|
|
aec0e2fe7a | ||
|
|
df5c66d8a7 | ||
|
|
ef684baa44 | ||
|
|
147425837c | ||
|
|
2fcf5352b2 | ||
|
|
dcdc0c9727 | ||
|
|
9799ec5551 | ||
|
|
f475631cdd | ||
|
|
34309a6c00 | ||
|
|
5fe95adc06 | ||
|
|
0c94bb5f25 | ||
|
|
fb1c34ec9f | ||
|
|
ef1ded3460 | ||
|
|
52e7f92e54 | ||
|
|
953060335d | ||
|
|
d46124a9d8 | ||
|
|
15d50a5ccb | ||
|
|
b2f52f08f4 | ||
|
|
e585658865 | ||
|
|
86978b5924 | ||
|
|
857b72df87 | ||
|
|
0eb04c1e34 | ||
|
|
328643e5f2 | ||
|
|
12c5d54583 | ||
|
|
c7a5498f0f | ||
|
|
cf14609b51 | ||
|
|
ed5fd3416a | ||
|
|
2d8fa5ca6f | ||
|
|
d76317fbf3 | ||
|
|
a758acef2c | ||
|
|
515742ee61 | ||
|
|
8aa6452cc4 | ||
|
|
dffc1dfd51 | ||
|
|
ce4cefe7e7 | ||
|
|
5c7acbc719 | ||
|
|
a36eadf554 | ||
|
|
b904041ea1 | ||
|
|
117e8c8dd1 | ||
|
|
2cfafcfbf0 | ||
|
|
2704bcee5e | ||
|
|
9f3a2d4f05 | ||
|
|
757baba3ff | ||
|
|
02dce41937 | ||
|
|
5886d75059 | ||
|
|
cbd2e49d97 | ||
|
|
dab4e90e15 | ||
|
|
bb65473416 | ||
|
|
8be2a0b2e1 | ||
|
|
66fc7ccb31 | ||
|
|
5ae044f53d | ||
|
|
7de965ab3f | ||
|
|
c821b294c6 | ||
|
|
281cb0ef37 | ||
|
|
7bc7d9213d | ||
|
|
a9702e4629 | ||
|
|
376ecf0c5f | ||
|
|
3a9dfa4c59 | ||
|
|
7c4fc45b4a | ||
|
|
ed12b2c7d6 | ||
|
|
a0f5aa942d | ||
|
|
c9adedf746 | ||
|
|
4177c37b51 | ||
|
|
077e143cc2 | ||
|
|
78df4915cf | ||
|
|
48ac1c91cd | ||
|
|
e0b16cf4ac | ||
|
|
3582ada3df | ||
|
|
21c0cdcb76 | ||
|
|
b00570f6d9 | ||
|
|
8d0c5179ed | ||
|
|
e292ffebaf | ||
|
|
504634b4a6 | ||
|
|
b3c506cd94 | ||
|
|
d676280a5c | ||
|
|
e746a45158 | ||
|
|
2240033b07 | ||
|
|
4ecb70c5e3 | ||
|
|
22295350a6 | ||
|
|
46f31cb643 | ||
|
|
0799be7e28 | ||
|
|
8df82909b2 | ||
|
|
437e066a66 | ||
|
|
30ba51593f | ||
|
|
4d51469473 | ||
|
|
62e330469c | ||
|
|
5e39dd1d26 | ||
|
|
9adcad8b8a | ||
|
|
b8830f8625 | ||
|
|
0fa5286ad0 | ||
|
|
3d43117554 | ||
|
|
2b56996a27 | ||
|
|
573130549f | ||
|
|
c562fbf4bc | ||
|
|
050c52a008 | ||
|
|
51d8b43fbf | ||
|
|
e8b6676b22 | ||
|
|
bfdfeff1b3 | ||
|
|
1e756ed5f1 | ||
|
|
bc600169a9 | ||
|
|
1e1eff70bc | ||
|
|
7dc6d736c7 | ||
|
|
f6b9e86ccb | ||
|
|
d89264998d | ||
|
|
69bd41f741 | ||
|
|
6afdd1be10 | ||
|
|
89c1f2d0c0 | ||
|
|
db86ccdcb4 | ||
|
|
a38d400207 | ||
|
|
4f87f70904 | ||
|
|
2d05c3ec56 | ||
|
|
a35569a77b | ||
|
|
a6f4fa4416 | ||
|
|
f6da992b3e | ||
|
|
43a62fdc7c | ||
|
|
426b7d12da | ||
|
|
59f353ae06 | ||
|
|
6735b8155c | ||
|
|
6cf8ec9315 | ||
|
|
8b8b3a2cdd | ||
|
|
082a876612 | ||
|
|
e56b112aab | ||
|
|
8bce02736b | ||
|
|
57315bddfb | ||
|
|
487f99f8f2 | ||
|
|
f0f7d2be90 | ||
|
|
e34c83ca1c | ||
|
|
3defe044bd | ||
|
|
4562bc6caf | ||
|
|
e61523e59e | ||
|
|
6ef32a9b1f | ||
|
|
9ede17891b | ||
|
|
9a2a9f7439 | ||
|
|
16787b32e5 | ||
|
|
0f538f6e2c | ||
|
|
35b072f7e8 | ||
|
|
ebe3221405 | ||
|
|
3b7e1014f6 | ||
|
|
053caaa222 | ||
|
|
9e5492bd13 | ||
|
|
9706ff8c26 | ||
|
|
bde007e6f7 | ||
|
|
bfd0d5c826 | ||
|
|
ac17518663 | ||
|
|
e4a337f1a5 | ||
|
|
6893b7e5e7 | ||
|
|
0b4ae5ea78 | ||
|
|
70e8b07428 | ||
|
|
e0882955e3 | ||
|
|
2fcd91b765 | ||
|
|
82d8f67f6a | ||
|
|
c76062b092 | ||
|
|
5b19340f8e | ||
|
|
4ebc5aa3b3 | ||
|
|
de44d6ace5 | ||
|
|
e6f7bcf0ae | ||
|
|
7102fe1a18 | ||
|
|
74fc969dd6 | ||
|
|
bfb45f2cbd | ||
|
|
9cf35010c6 | ||
|
|
0c8288b5e1 | ||
|
|
b9f01330db | ||
|
|
66635f3ae6 | ||
|
|
bfc7dfdb29 | ||
|
|
351131bbff | ||
|
|
e3c453f10e | ||
|
|
cef259c945 | ||
|
|
744275b932 | ||
|
|
f8e550773f | ||
|
|
7e45b6a975 | ||
|
|
c67940edec | ||
|
|
e6e92e9952 | ||
|
|
62d37755bc | ||
|
|
ed9aef5f43 | ||
|
|
73a3e9e42d | ||
|
|
e25f610344 | ||
|
|
7f098d5fb6 | ||
|
|
5318535d0d | ||
|
|
f183e91ccd | ||
|
|
101ffdbce0 | ||
|
|
07133fb041 | ||
|
|
838f72097c | ||
|
|
2062844fa6 | ||
|
|
0f33416b0e | ||
|
|
7c352b745e | ||
|
|
2987d71264 | ||
|
|
5070cc32ac | ||
|
|
fce421fb33 | ||
|
|
975094fcdd | ||
|
|
ac5af73696 | ||
|
|
30f153e695 | ||
|
|
80151dd6b2 | ||
|
|
0c81585a53 | ||
|
|
11303e2ef7 | ||
|
|
76ee994d2c | ||
|
|
f933717d8b | ||
|
|
445228348d | ||
|
|
c025132c84 | ||
|
|
36ef54340f | ||
|
|
fa0df12439 | ||
|
|
d6a6e69f2e | ||
|
|
2411c35d0e | ||
|
|
a2f79760ce | ||
|
|
06a6f08054 | ||
|
|
2f28a66591 | ||
|
|
60a7ac2343 | ||
|
|
22458a04e8 | ||
|
|
8c44b9eddf | ||
|
|
a7972ad873 | ||
|
|
bafcdcea7c | ||
|
|
ebe1861894 | ||
|
|
5d5a35e383 | ||
|
|
9f353f41c4 | ||
|
|
b1570543c8 | ||
|
|
9e99db1d3e | ||
|
|
84f170c9e0 | ||
|
|
4be22ae5ab | ||
|
|
321edc5e3d | ||
|
|
7604ae07bb | ||
|
|
adc6b225a6 | ||
|
|
31c1192719 | ||
|
|
d1c5e0a91a | ||
|
|
f895d54e02 | ||
|
|
4fa9f72083 | ||
|
|
66c9e68b04 | ||
|
|
a5073ab577 | ||
|
|
de97097db1 | ||
|
|
cfdb24efac | ||
|
|
307f6e50ad | ||
|
|
7309c12592 | ||
|
|
222101b30e | ||
|
|
b6562f3420 | ||
|
|
ffd1d15a0e | ||
|
|
15c5469bb1 | ||
|
|
15ff666d3f | ||
|
|
857d26d101 | ||
|
|
32038c9f5b | ||
|
|
e5974ca3ea | ||
|
|
b7deb984f7 | ||
|
|
04536e92a5 | ||
|
|
e5d6206a15 | ||
|
|
4e3f832dc3 | ||
|
|
012a286921 | ||
|
|
ea1b28f025 | ||
|
|
d147b42cf7 | ||
|
|
7632067768 | ||
|
|
a564fdf034 | ||
|
|
f4c000a547 | ||
|
|
162d77707b | ||
|
|
2568164cb3 | ||
|
|
3e697d5bd0 | ||
|
|
1fead303a0 | ||
|
|
2a8f4ce0b7 | ||
|
|
71ca4ea990 | ||
|
|
eb2410ac6c | ||
|
|
abb397e442 | ||
|
|
1eb278f3cc | ||
|
|
f37981c388 | ||
|
|
7d923f83e6 | ||
|
|
9f737274b7 | ||
|
|
ee7d04775e | ||
|
|
0abfa3a68f | ||
|
|
8077d2a249 | ||
|
|
a7f805604c | ||
|
|
51f2295971 | ||
|
|
85651de3bc | ||
|
|
7d4d51ccbb | ||
|
|
c6a22abb10 | ||
|
|
096d27f342 | ||
|
|
7bac56b57d | ||
|
|
dc1b484630 | ||
|
|
10d7747ae2 | ||
|
|
7f6f18f642 | ||
|
|
0b6fec4a28 | ||
|
|
4a307ad4eb | ||
|
|
e02105ee89 | ||
|
|
d923004e20 | ||
|
|
9943c58fba | ||
|
|
244393e4ef | ||
|
|
a30e5a85b2 | ||
|
|
512d7ba208 | ||
|
|
195a7fcad8 | ||
|
|
12588b6483 | ||
|
|
426cfef882 | ||
|
|
3525a4b6db | ||
|
|
f0a5250da5 | ||
|
|
a1e5be7077 | ||
|
|
bbaa5b89c2 | ||
|
|
6e6e7fcc9a | ||
|
|
a5544a06e1 | ||
|
|
c17d825bba | ||
|
|
c76c67a69c | ||
|
|
49d1a5a17b | ||
|
|
0c8f2cfd1c | ||
|
|
3df8c1b501 | ||
|
|
857c330d58 | ||
|
|
07d9b584f7 | ||
|
|
7bf39cbb72 | ||
|
|
f277e6aa2d | ||
|
|
d5afbbee26 | ||
|
|
a9d177eeeb | ||
|
|
ff46c16805 | ||
|
|
62dd1675a0 | ||
|
|
e60dfca9fc | ||
|
|
a7faab32fe | ||
|
|
2ce6ae6707 | ||
|
|
fd04db12fa | ||
|
|
bc5dbb6692 | ||
|
|
9150f32f8b | ||
|
|
6fb9b6d03b | ||
|
|
4e621280bb | ||
|
|
0594ba33a2 | ||
|
|
097ce08908 | ||
|
|
c1ee8cb62e | ||
|
|
6b9e3b21d3 | ||
|
|
15c6b0c1c3 | ||
|
|
6ff8478118 | ||
|
|
3c51ff501f | ||
|
|
f5a447308d | ||
|
|
fdca233fe3 | ||
|
|
3f2547295f | ||
|
|
5fe600af9d | ||
|
|
3081f56ecb | ||
|
|
923c67e92a | ||
|
|
474a9c4d95 | ||
|
|
ff4e53d0e6 | ||
|
|
cce50bef50 | ||
|
|
8215039785 | ||
|
|
94280b2d14 | ||
|
|
3d06b2e4c0 | ||
|
|
12ed5a957b | ||
|
|
3b0d49a3e0 | ||
|
|
bd2e26a20f | ||
|
|
f4505add69 | ||
|
|
1e851ba3ea | ||
|
|
dae58f8167 | ||
|
|
c9f1f050af | ||
|
|
624af3945c | ||
|
|
463dc54733 | ||
|
|
fdc6e12945 | ||
|
|
20a4922b40 | ||
|
|
1b04e5cafc | ||
|
|
835decc6c1 | ||
|
|
53efa8f6bf | ||
|
|
055806e124 | ||
|
|
dafbd11686 | ||
|
|
8a881f70a3 | ||
|
|
ee6b97ef5e | ||
|
|
60ac0c4da1 | ||
|
|
53add7b594 | ||
|
|
576f24e3ae | ||
|
|
25a7957bb8 | ||
|
|
3dbc377308 | ||
|
|
74e8a886e6 | ||
|
|
120fe762df | ||
|
|
59d31b021d | ||
|
|
af28510aba | ||
|
|
84e58051fa | ||
|
|
02846fcf91 | ||
|
|
0a20fa4fdf | ||
|
|
c4e2d8fbdd | ||
|
|
3e5868f223 | ||
|
|
4c11b21dff | ||
|
|
378126822f | ||
|
|
55a8e242b0 | ||
|
|
79ba85a22e | ||
|
|
ce44012866 | ||
|
|
4cc6e27f02 | ||
|
|
9156db41a1 | ||
|
|
9247f9480c | ||
|
|
fc0688673e | ||
|
|
15c157343d | ||
|
|
3fffe65a3b | ||
|
|
63b79a88c6 | ||
|
|
9e9b128b72 | ||
|
|
64973bfe12 | ||
|
|
41df0204f3 | ||
|
|
f6ee61d607 | ||
|
|
87776b2886 | ||
|
|
86b6231f70 | ||
|
|
387f65c16c | ||
|
|
4c25fabec9 | ||
|
|
6806b66509 | ||
|
|
b56352e218 | ||
|
|
d3a1770dc0 | ||
|
|
22429c6e98 | ||
|
|
56650b60f4 | ||
|
|
ba8046753e | ||
|
|
d34b8a2b61 | ||
|
|
1446ffddb0 | ||
|
|
1ddf2324ff | ||
|
|
b5d78a48a7 | ||
|
|
1127d9c5f5 | ||
|
|
31cd836530 | ||
|
|
daafda320b | ||
|
|
ee9f10a8d8 | ||
|
|
78774526f4 | ||
|
|
44f6d946f5 | ||
|
|
03036c1bd6 | ||
|
|
4b7fa7f49d | ||
|
|
e05c000615 | ||
|
|
064e95b46b | ||
|
|
e7c0d3330c | ||
|
|
6c45fcd067 | ||
|
|
acfd966aa4 | ||
|
|
f07fcdf0a7 | ||
|
|
d3fc8c4286 | ||
|
|
ddf59273e6 | ||
|
|
bfbe613960 | ||
|
|
10489e0df2 | ||
|
|
6e14782f82 | ||
|
|
2a6f01aae2 | ||
|
|
1ab3a40f03 | ||
|
|
e3e9a8121c | ||
|
|
43565b280f | ||
|
|
5168cb5219 | ||
|
|
438d3e489b | ||
|
|
d4d50fda7c | ||
|
|
a8af3313c1 | ||
|
|
9c7f5671e6 | ||
|
|
3a2800bcdd | ||
|
|
cc709bbbaa | ||
|
|
77ee9f8119 | ||
|
|
3e24d312d3 | ||
|
|
360d5cd577 | ||
|
|
dcb1cbe5d6 | ||
|
|
31525dfef7 | ||
|
|
e1c6778c3a | ||
|
|
bcc32cc441 | ||
|
|
57ea7b5216 | ||
|
|
c30f5b7d5e | ||
|
|
023a50d26b | ||
|
|
7e21f38185 | ||
|
|
c3f7916dfd | ||
|
|
a08fc850f0 | ||
|
|
bf33f4a7b0 | ||
|
|
a6d4deaf20 | ||
|
|
ccc03c1a8d | ||
|
|
ee98641210 | ||
|
|
812be60d2a | ||
|
|
f13aba0958 | ||
|
|
5b4bcf1a04 | ||
|
|
e2851dc73f | ||
|
|
c4b32e067c | ||
|
|
dc41db9522 | ||
|
|
0b39b92bde | ||
|
|
19767ca4a4 | ||
|
|
42a5a0ce13 | ||
|
|
1c399e67f4 | ||
|
|
7508e9941f | ||
|
|
feae20d8fa | ||
|
|
7c71b43d42 | ||
|
|
2cd835e5a4 | ||
|
|
1c96a5acf7 | ||
|
|
55af3e19b4 | ||
|
|
52874cc442 | ||
|
|
627aeb20a2 | ||
|
|
f424fac1d8 | ||
|
|
c1cd54d1ea | ||
|
|
0839a16d27 | ||
|
|
cb2b13dff6 | ||
|
|
85fe6f36c2 | ||
|
|
517c080544 | ||
|
|
c04f2210cc | ||
|
|
6c4426d8e6 | ||
|
|
17c45ee53a | ||
|
|
3c80e05e00 | ||
|
|
cf35112291 | ||
|
|
d06d8a609f | ||
|
|
16b7e7a91e | ||
|
|
1a2f7684bc | ||
|
|
19e642259c | ||
|
|
3d494f1032 | ||
|
|
dda8d0f6bf | ||
|
|
f778483ac3 | ||
|
|
15ebe23bc2 | ||
|
|
bc6f8a27ff | ||
|
|
2d9b9294d0 | ||
|
|
de6b8ee9f2 | ||
|
|
a110ff94a5 | ||
|
|
4143d212a5 | ||
|
|
e6f8e51504 | ||
|
|
b958386689 | ||
|
|
2f7beebc61 | ||
|
|
900de5fe63 | ||
|
|
233f900fa6 | ||
|
|
7a34d49264 | ||
|
|
6c78d80d37 | ||
|
|
12e806d754 | ||
|
|
21a202b655 | ||
|
|
79fba4ab7b | ||
|
|
c771e1fd50 | ||
|
|
a60512cdae | ||
|
|
b06ea616d9 | ||
|
|
2513178980 | ||
|
|
8f3119621c | ||
|
|
5989c14577 | ||
|
|
980bbe2bc3 | ||
|
|
23e1e1ed53 | ||
|
|
0166eacb2b | ||
|
|
33a3e6f998 | ||
|
|
d1327fd1c2 | ||
|
|
baa7873ec1 | ||
|
|
dc959596fc | ||
|
|
08bc8ff3f7 | ||
|
|
ca5abff93f | ||
|
|
d184d0d235 | ||
|
|
06317dfb2b | ||
|
|
d57af05f66 | ||
|
|
7eddfacd10 | ||
|
|
3df88be6cb | ||
|
|
b496cdcfb2 | ||
|
|
dfb73204bf | ||
|
|
6d4bea3bb6 | ||
|
|
e12438de41 | ||
|
|
f2bef76368 | ||
|
|
ea08050049 | ||
|
|
7d234522b7 | ||
|
|
d9170cab22 | ||
|
|
ad8b8cb9eb | ||
|
|
fad24b3525 | ||
|
|
b0163230a9 | ||
|
|
cb97f5c101 | ||
|
|
a48f26c150 | ||
|
|
911cea781f | ||
|
|
d2a9e54dfb | ||
|
|
d74428057e | ||
|
|
e21917cc93 | ||
|
|
04722e7fc5 | ||
|
|
26c6cfeefd | ||
|
|
479c7468b4 | ||
|
|
b0c6ed999c | ||
|
|
b5f95fd672 | ||
|
|
d8f5cdbb50 | ||
|
|
6e5ddeb015 | ||
|
|
725abbb662 | ||
|
|
e4129e1a3a | ||
|
|
dbd68df40c | ||
|
|
3a80e2f399 | ||
|
|
0e1c0c55f8 | ||
|
|
2e9c80a486 | ||
|
|
1d26f6b697 | ||
|
|
4767fe63d3 | ||
|
|
0ef6f06462 | ||
|
|
a5f856328d | ||
|
|
7fc6f2abfc | ||
|
|
94ec4a4ea5 | ||
|
|
9c56b1beef | ||
|
|
34261a1583 | ||
|
|
d8968ae899 | ||
|
|
6ae90a3ea2 | ||
|
|
c317cf0e75 | ||
|
|
c1329c92fd | ||
|
|
abd6115aea | ||
|
|
6d2c0c4242 | ||
|
|
aab79fdf6d | ||
|
|
dd96d98fa1 | ||
|
|
064ac5c742 | ||
|
|
06ae4684c8 | ||
|
|
6997bb0bdd | ||
|
|
cdd91f7ea3 | ||
|
|
4f72ee7815 | ||
|
|
095883ca54 | ||
|
|
f77c3604ce | ||
|
|
2d058feaf8 | ||
|
|
9c6494aca7 | ||
|
|
b8478a96ae | ||
|
|
c7d75643d3 | ||
|
|
cfc7817869 | ||
|
|
92009ceb32 | ||
|
|
aa3e37ac14 | ||
|
|
3b74d2150e | ||
|
|
ee4043ae19 | ||
|
|
c1f1da27e7 | ||
|
|
aebe891489 | ||
|
|
cf5fdabdfc | ||
|
|
20ef130341 | ||
|
|
1772a01d04 | ||
|
|
5ce6da95fc | ||
|
|
94dc6f19aa | ||
|
|
427b8648ee | ||
|
|
4b54e3c6d8 | ||
|
|
6b4ad1f933 | ||
|
|
3d89ed1787 | ||
|
|
adbb47fb65 | ||
|
|
7cd76b8d8e | ||
|
|
9e17a304de | ||
|
|
7a161cc0bd | ||
|
|
d8c16de123 | ||
|
|
65b6c2706e | ||
|
|
76bd192f82 | ||
|
|
02f546d2bc | ||
|
|
3b56716a68 | ||
|
|
a3195d84d3 | ||
|
|
bfaf36099e | ||
|
|
7a006afb17 | ||
|
|
cd8fdb31ef | ||
|
|
a0cfdb0830 | ||
|
|
83f11465f5 | ||
|
|
76df14b831 | ||
|
|
109fa04c7c | ||
|
|
a6355a6bc8 | ||
|
|
0ff471a49a | ||
|
|
4241fbbbf0 | ||
|
|
3ae6c1b03f | ||
|
|
1e71f952f9 | ||
|
|
749b1bbfc0 | ||
|
|
265a23212e | ||
|
|
f0f34030a0 | ||
|
|
d75379358f | ||
|
|
8670b3039e | ||
|
|
eec86a7b82 | ||
|
|
fac8f7da21 | ||
|
|
6fbac455d4 | ||
|
|
1806fc683d | ||
|
|
f962939737 | ||
|
|
2619740daa | ||
|
|
940b115f0a | ||
|
|
58d84787f3 | ||
|
|
6fc6ea69d2 | ||
|
|
93bbd13a34 | ||
|
|
ae31dd4bb1 | ||
|
|
411a13a0d4 | ||
|
|
eb0e96715e | ||
|
|
7e5afd8744 | ||
|
|
960eb4f367 | ||
|
|
956d9fdcd6 | ||
|
|
140fd6f3bf | ||
|
|
3d47b47901 | ||
|
|
c7f4734826 | ||
|
|
45f9b570a2 | ||
|
|
29284a5460 | ||
|
|
dfcbf6eee6 | ||
|
|
83b91a31bc | ||
|
|
b984f985bc | ||
|
|
a5cc67badd | ||
|
|
8bf4eb7e90 | ||
|
|
128d83a0c8 | ||
|
|
5de1025520 | ||
|
|
4a206168a7 | ||
|
|
5f646498c4 | ||
|
|
06e81b7dfd | ||
|
|
97d2f417c7 | ||
|
|
45f2513a73 | ||
|
|
1f58ca47b5 | ||
|
|
17819e2a55 | ||
|
|
ffdc652605 | ||
|
|
3886afc825 | ||
|
|
cade788a7e | ||
|
|
9c60eecce6 | ||
|
|
f8dfedf1c6 | ||
|
|
40a75c804c | ||
|
|
794a164098 | ||
|
|
89125376ba | ||
|
|
efc17f21b9 | ||
|
|
7ddc44d48e | ||
|
|
e8473d4920 | ||
|
|
91aa40e0df | ||
|
|
882a9086a8 | ||
|
|
43fa67ca81 | ||
|
|
715916a5ba | ||
|
|
a28b8906a6 | ||
|
|
aedd288dbe | ||
|
|
680c7b5aaa | ||
|
|
374f543bea | ||
|
|
ec71075bfe | ||
|
|
d6ef9d1b5d | ||
|
|
bf895eb656 | ||
|
|
dcd6aa912b | ||
|
|
da48f9c972 | ||
|
|
cac1ea27e2 | ||
|
|
6e588bb2ed | ||
|
|
1c352f5ff0 | ||
|
|
582c85b140 | ||
|
|
a38646409f | ||
|
|
4906e3d7ef | ||
|
|
9ed2a7a2d2 | ||
|
|
eaa6ed85e1 | ||
|
|
0b08b4f1c5 | ||
|
|
bb786461c7 | ||
|
|
bc354a3df6 | ||
|
|
f462674e32 | ||
|
|
2b5852f7da | ||
|
|
b8c7c05dd5 | ||
|
|
986bdaab36 | ||
|
|
d3e4ec14a6 | ||
|
|
b7cd56f72b | ||
|
|
78a6b44b21 | ||
|
|
eb5a8a87d8 | ||
|
|
0410331ecd | ||
|
|
996a3b331a | ||
|
|
8173e4d139 | ||
|
|
5a95ead608 | ||
|
|
f04755be30 | ||
|
|
ea26988a95 | ||
|
|
f9f540738c | ||
|
|
894027f5f6 | ||
|
|
8e8a5a1522 | ||
|
|
1ffa9b2ebe | ||
|
|
ad5d8b2341 | ||
|
|
e9f3f9bd1d | ||
|
|
e39cd1bf57 | ||
|
|
0efbe23d89 | ||
|
|
b4bd11d708 | ||
|
|
fe0baf233d | ||
|
|
e9e1f04818 | ||
|
|
602b6e9901 | ||
|
|
1b043305c1 | ||
|
|
ba87cb0867 | ||
|
|
798d2d6978 | ||
|
|
fc4b5ad1d2 | ||
|
|
e09bbc43d4 | ||
|
|
ca31c4699a | ||
|
|
6e5df9e9e7 | ||
|
|
780a77bb31 | ||
|
|
f342b84479 | ||
|
|
019ac37d49 | ||
|
|
3ab67e746d | ||
|
|
e8aaba9ce2 | ||
|
|
f3ac658dd0 | ||
|
|
7c4921758c | ||
|
|
3bf5934b20 | ||
|
|
a8fe3085fd | ||
|
|
14a1588ffd | ||
|
|
504a85bbdb | ||
|
|
9dcdb6d6f8 | ||
|
|
1520816e61 | ||
|
|
a2e16695af | ||
|
|
e2b599051e | ||
|
|
e20d388ec9 | ||
|
|
44d3302b4e | ||
|
|
72a56acfb8 | ||
|
|
b975aaa848 | ||
|
|
77de428524 | ||
|
|
6c5d21cbfc | ||
|
|
04093e9517 | ||
|
|
8364426420 | ||
|
|
3dd07d3119 | ||
|
|
68803d559c | ||
|
|
a63fc643c8 | ||
|
|
7a9c6a52fa | ||
|
|
81de438569 | ||
|
|
185429287e | ||
|
|
c2f86f6934 | ||
|
|
7f99fa3da8 | ||
|
|
c58cf15565 | ||
|
|
4eaec80438 | ||
|
|
7b22809530 | ||
|
|
d573bee791 | ||
|
|
e7c2a4068e | ||
|
|
45a9ff6e74 | ||
|
|
781f2934e6 | ||
|
|
1b5743dc73 | ||
|
|
26ee15d327 | ||
|
|
78bddf3055 | ||
|
|
de1ea5f916 | ||
|
|
d5162d332f | ||
|
|
63c2182870 | ||
|
|
b49ef913a8 | ||
|
|
ec27d5729c | ||
|
|
a2e75aabdd | ||
|
|
6b7787ce99 | ||
|
|
b05d56462b | ||
|
|
558003704e | ||
|
|
00ecb983e7 | ||
|
|
f26541188b | ||
|
|
91537b0496 | ||
|
|
1e3bcc3f8b | ||
|
|
8faf4f5f79 | ||
|
|
48f4119fb7 | ||
|
|
ad6f18b737 | ||
|
|
68e479bdbd | ||
|
|
1dd8e570a5 | ||
|
|
511b0212c6 | ||
|
|
121e08c18e | ||
|
|
785c90ddb7 | ||
|
|
ef5c4f8a11 | ||
|
|
d9d5fd5b9a | ||
|
|
c145d95312 | ||
|
|
011ed2f2b9 | ||
|
|
625d6e72ec | ||
|
|
37c5ebfe73 | ||
|
|
0efa0d1185 | ||
|
|
14d3ecaae7 | ||
|
|
25db6e56b0 | ||
|
|
e006a61c52 | ||
|
|
5ecb08c8e8 | ||
|
|
0bf4987e1a | ||
|
|
3871fc70ce | ||
|
|
9b78e71d16 | ||
|
|
4c686f8fc0 | ||
|
|
9aacb68fbc | ||
|
|
3de732508c | ||
|
|
cf7544c146 | ||
|
|
2a20ea638e | ||
|
|
6699a8ef38 | ||
|
|
f99c37aede | ||
|
|
bb7ca692e3 | ||
|
|
c09ed61aba | ||
|
|
9f6d6f32a6 | ||
|
|
000389c762 | ||
|
|
c963a209ab | ||
|
|
744c94c96a | ||
|
|
c561fe8925 | ||
|
|
999990b614 | ||
|
|
99eac6c1d9 | ||
|
|
c4008971f7 | ||
|
|
5155056198 | ||
|
|
9cb4739e4a | ||
|
|
fe855fef13 | ||
|
|
b9623ed424 | ||
|
|
7c45b21aa7 | ||
|
|
c9bf95edf4 | ||
|
|
0fa9cf6eb0 | ||
|
|
bcda3c1a32 | ||
|
|
2f053fe9db | ||
|
|
376db5a123 | ||
|
|
3c23e7145d | ||
|
|
981b6073e7 | ||
|
|
a82d49247a | ||
|
|
19f893e1e2 | ||
|
|
c731675443 | ||
|
|
d8fd834142 | ||
|
|
d876de0bef | ||
|
|
16f0e22ffa | ||
|
|
d7679d755f | ||
|
|
23c650ca10 | ||
|
|
d5523600c7 | ||
|
|
f5a2acd82a | ||
|
|
fa91bc154c | ||
|
|
a5a9b5dbd8 | ||
|
|
cdbcd8596e | ||
|
|
9b7719071f | ||
|
|
a71ae26b52 | ||
|
|
66b5c760f4 | ||
|
|
37ff26ec2c | ||
|
|
a2723f16f2 | ||
|
|
a3aaf621fe | ||
|
|
903e21b2dd | ||
|
|
0400d72824 | ||
|
|
e08b4d601f | ||
|
|
20bd2de54a | ||
|
|
52233dff50 | ||
|
|
1f3cd214e6 | ||
|
|
0d7ab414d9 | ||
|
|
7eed489ea1 | ||
|
|
45f490e0ad | ||
|
|
bb2066df04 | ||
|
|
ec945d1022 | ||
|
|
9240a554f1 | ||
|
|
6cecb9766a | ||
|
|
d9cb000f65 | ||
|
|
d163c564e5 | ||
|
|
d4cef97e2f | ||
|
|
ce8dfcc604 | ||
|
|
a56459fee3 | ||
|
|
fa8562bc0c | ||
|
|
c5b81b5e10 | ||
|
|
fdd79223b0 | ||
|
|
a053bb074a | ||
|
|
598eea9851 | ||
|
|
4ba46307f7 | ||
|
|
8581ee2c0c | ||
|
|
ecf2ba12db | ||
|
|
6e94409594 | ||
|
|
239d64a602 | ||
|
|
f582d9ca49 | ||
|
|
fdaa55a452 | ||
|
|
aeb1178a47 | ||
|
|
5b86682e24 | ||
|
|
7086961e00 | ||
|
|
8532307b2f | ||
|
|
4c7b582454 | ||
|
|
3f2d14f4d8 | ||
|
|
2db4a5da57 | ||
|
|
221a4b0b50 | ||
|
|
86d3444fb8 | ||
|
|
4701357a21 | ||
|
|
9514919d37 | ||
|
|
5813592206 | ||
|
|
7d45de8901 | ||
|
|
ac023e95c0 | ||
|
|
085842d43c | ||
|
|
24d5e1fc8a | ||
|
|
da4c765378 | ||
|
|
74aa4add1b | ||
|
|
5576994c2c | ||
|
|
e2accab87e | ||
|
|
b188c2b3e3 | ||
|
|
ebee041c35 | ||
|
|
8020eaa2e9 | ||
|
|
59a9986786 | ||
|
|
ef0216dbe7 | ||
|
|
ae7b81dc50 | ||
|
|
78734dade8 | ||
|
|
89539d0cf1 | ||
|
|
1887f51516 | ||
|
|
3ebe125d3f | ||
|
|
bed0860c71 | ||
|
|
88ebebf74f | ||
|
|
49e4b75039 | ||
|
|
c62c8c6e71 | ||
|
|
894026cdd4 | ||
|
|
913c933e8c | ||
|
|
90e6a55e37 | ||
|
|
4a07790910 | ||
|
|
5752a466a2 | ||
|
|
b5f1ba0df1 | ||
|
|
5c55c35821 | ||
|
|
efb4429d33 | ||
|
|
285188bdde | ||
|
|
4ca8b376b6 | ||
|
|
61f5925502 | ||
|
|
8d9505cda5 | ||
|
|
4cc90b8eb4 | ||
|
|
f00ced6612 | ||
|
|
09e29f1e1b | ||
|
|
486c7e3a5e | ||
|
|
17caf226d0 | ||
|
|
7a6eb19b1c | ||
|
|
c846c1d331 | ||
|
|
d6b1aa677d | ||
|
|
dad4804b4e | ||
|
|
2fbb03dc6c | ||
|
|
fd4a2ed414 | ||
|
|
9d14b113a3 | ||
|
|
0c8467c404 | ||
|
|
a2c0db44d6 | ||
|
|
63c8e4da84 | ||
|
|
7286ef3a52 | ||
|
|
6f87fb63c1 | ||
|
|
fbdf9d4bd4 | ||
|
|
b5378174f3 | ||
|
|
c1fe34adcb | ||
|
|
f7014e8773 | ||
|
|
fc6070d574 | ||
|
|
3b0091c231 | ||
|
|
4c2a566acc | ||
|
|
7ac296081c | ||
|
|
525073bb94 | ||
|
|
0664b737ab | ||
|
|
e34ede79b9 | ||
|
|
a0160eef0c | ||
|
|
b84de4f7f8 | ||
|
|
275b2eaae1 | ||
|
|
a88113de33 | ||
|
|
9fd80a8660 | ||
|
|
193c80849f | ||
|
|
9ed5e0f1fc | ||
|
|
6787c2eeed | ||
|
|
22d997d088 | ||
|
|
31900f6733 | ||
|
|
67846bad21 | ||
|
|
59ff485253 | ||
|
|
7f4e38844f | ||
|
|
7212c3876d | ||
|
|
89081d942c | ||
|
|
da65bc3f68 | ||
|
|
cf9a94a8b6 | ||
|
|
9577468f0c | ||
|
|
3134beb983 | ||
|
|
254cd69748 | ||
|
|
2f4ef3ba6a | ||
|
|
9705f60dd3 | ||
|
|
9cb1555ade | ||
|
|
ea67b6772c | ||
|
|
f784049079 | ||
|
|
d23ada30d7 | ||
|
|
dea5000a01 | ||
|
|
239aa3aa02 | ||
|
|
75baa11e81 | ||
|
|
e40d6f3314 | ||
|
|
e849e4ff0b | ||
|
|
6222b2d542 | ||
|
|
9c062b44aa | ||
|
|
cd587bc406 | ||
|
|
935481c4b5 | ||
|
|
d063436b0a | ||
|
|
3f0b84eb7b | ||
|
|
64e05778ef | ||
|
|
6cbf00df60 | ||
|
|
3b37c89d88 | ||
|
|
d4b74661aa | ||
|
|
ee224c395e | ||
|
|
9dea8b1f66 | ||
|
|
efc7b4deb6 | ||
|
|
e6ef12d313 | ||
|
|
a5506abdad | ||
|
|
cf2c3fde41 | ||
|
|
ed89d9f801 | ||
|
|
2bb0ecf497 | ||
|
|
08ad320d19 | ||
|
|
ef7b417105 | ||
|
|
f2baa0872b | ||
|
|
8637b8b61b | ||
|
|
6ac9ce614a | ||
|
|
bd670b4db3 | ||
|
|
def96ffe2f | ||
|
|
6b64158356 | ||
|
|
23e7031326 | ||
|
|
a2a6f84f13 | ||
|
|
316f37bfce | ||
|
|
e7c3ff9b9e | ||
|
|
baf31e69e5 | ||
|
|
7fd55fa2f4 | ||
|
|
35106ef662 | ||
|
|
d4860fe9f0 | ||
|
|
d47466ddf9 | ||
|
|
57ee84437b | ||
|
|
286edbbb8c | ||
|
|
00ba50bcb4 | ||
|
|
1d49b87e48 | ||
|
|
6700ac94fa | ||
|
|
10b2458f58 | ||
|
|
2c55ff0b3d | ||
|
|
9887016bdf | ||
|
|
a0b0a4cec5 | ||
|
|
8dadf79614 | ||
|
|
10cd0f3362 | ||
|
|
d82ca101de | ||
|
|
0d2e196368 | ||
|
|
125f0ba61a | ||
|
|
74a8b5d832 | ||
|
|
bd25822b35 | ||
|
|
2b87245e22 | ||
|
|
60b779a905 | ||
|
|
f41febd3ae | ||
|
|
1001e5489e | ||
|
|
be712fc606 | ||
|
|
cf25831ad5 | ||
|
|
6dbe84a1bf | ||
|
|
eefbccd957 | ||
|
|
fe85f079b0 | ||
|
|
2cb559ebdd | ||
|
|
64383776a2 | ||
|
|
8386188356 | ||
|
|
664f896696 | ||
|
|
e86764df45 | ||
|
|
71c6600abf | ||
|
|
fbd4e06df5 | ||
|
|
3715ebc7eb | ||
|
|
d394b032d7 | ||
|
|
23d3dafc51 | ||
|
|
708374d95b | ||
|
|
0fa8073947 | ||
|
|
81c65af560 | ||
|
|
c0aa423d7b | ||
|
|
03c137741a | ||
|
|
c110f3489d | ||
|
|
9589334a30 | ||
|
|
0409079983 | ||
|
|
1d4dc0c534 | ||
|
|
6fb1369939 | ||
|
|
9ffa587f6f | ||
|
|
42f81c62dc | ||
|
|
da72e69196 | ||
|
|
24648fb537 | ||
|
|
56ecbeeef7 | ||
|
|
7a32e03bd5 | ||
|
|
15059c2090 | ||
|
|
c71c61dc58 | ||
|
|
1513be4acd | ||
|
|
4eb8e7823d | ||
|
|
30e7693b24 | ||
|
|
4f33e1bf89 | ||
|
|
89e0e89927 | ||
|
|
3b80253fb3 | ||
|
|
b5e0127b16 | ||
|
|
b50259c25d | ||
|
|
21ccaf2ce8 | ||
|
|
b0accbfe58 | ||
|
|
8f0d553e4e | ||
|
|
b8baa549cc | ||
|
|
5ff7fc340b | ||
|
|
955a5b0a43 | ||
|
|
147d3733bf | ||
|
|
4269326ddf | ||
|
|
627533bed6 | ||
|
|
167628c696 | ||
|
|
df5cc3303f | ||
|
|
7b7d7c1d74 | ||
|
|
ec8ff0fcde | ||
|
|
7d9269e1a1 | ||
|
|
33aae9ab17 | ||
|
|
399690e1ef | ||
|
|
c8a349a573 | ||
|
|
5ef103b68a | ||
|
|
176f74bd3a | ||
|
|
34f9bc40b3 | ||
|
|
2d24876530 | ||
|
|
3fadf2c90b | ||
|
|
c544cebbe6 | ||
|
|
f746107697 | ||
|
|
9b6bce4592 | ||
|
|
5b3afeccc1 | ||
|
|
dc80a5a2ec | ||
|
|
fdb0a06803 | ||
|
|
3944f29add | ||
|
|
45a2dea042 | ||
|
|
bb541ad3a7 | ||
|
|
dca10ab876 | ||
|
|
75162339f5 | ||
|
|
b2b31dbc8f | ||
|
|
63d2a1085c | ||
|
|
af50d6cfb5 | ||
|
|
cfbec56b2b | ||
|
|
fec25cd690 | ||
|
|
5c67484295 | ||
|
|
70100af98e | ||
|
|
bf24cd9508 | ||
|
|
d934d226ce | ||
|
|
005479f8c3 | ||
|
|
97d62cc16b | ||
|
|
a91ef56954 | ||
|
|
5802f17726 | ||
|
|
1df47bb0be | ||
|
|
713e4c1822 | ||
|
|
8990911522 | ||
|
|
13602b4a63 | ||
|
|
f02b6832e2 | ||
|
|
5634eee2cf | ||
|
|
4fa97e9218 | ||
|
|
cd78f21b51 | ||
|
|
e6d2de7893 | ||
|
|
92ab3e0e8b | ||
|
|
41a0a68782 | ||
|
|
5698689361 | ||
|
|
4a98745788 | ||
|
|
11620cc571 | ||
|
|
35175fc19b | ||
|
|
c3f01d9b2f | ||
|
|
83930335f0 | ||
|
|
ccf3c7b89e | ||
|
|
5b428f509b | ||
|
|
4a67c687c3 | ||
|
|
fb9430da0a | ||
|
|
9c8d95d4db | ||
|
|
ad7cefa10c | ||
|
|
0b936a2bb8 | ||
|
|
5e67722836 | ||
|
|
2193d64f7e | ||
|
|
19d2aa5c97 | ||
|
|
405632f187 | ||
|
|
4f3bb609df | ||
|
|
5f2f694dca | ||
|
|
bc09ce93eb | ||
|
|
6a0a3811d9 | ||
|
|
bf98791330 | ||
|
|
1cc3a00eb2 | ||
|
|
ad4c3e055b | ||
|
|
b865e2c2f8 | ||
|
|
4173e184bd | ||
|
|
37a1dc1e34 | ||
|
|
9389509017 | ||
|
|
4cd412c39f | ||
|
|
34bedec044 | ||
|
|
23b89b80cd | ||
|
|
a7c52579f8 | ||
|
|
6aa76ec794 | ||
|
|
cfdd7c1206 | ||
|
|
b23b832332 | ||
|
|
0d1fd4fcf0 | ||
|
|
f048f88337 | ||
|
|
e61c48ea85 | ||
|
|
c5f513ae2d | ||
|
|
2ea9a8bb1b | ||
|
|
2038dff027 | ||
|
|
236f30043c | ||
|
|
c6c08eb0e7 | ||
|
|
7872c4ecf4 | ||
|
|
ca47a58a5d | ||
|
|
05bafb9838 | ||
|
|
abb54df4d0 | ||
|
|
17f3df0a04 | ||
|
|
4374e4a43f | ||
|
|
83403ad3ab | ||
|
|
17478d6a05 | ||
|
|
b64a4881d9 | ||
|
|
397627d1b9 | ||
|
|
00225e01b3 | ||
|
|
fc7db7d86f | ||
|
|
765166e807 | ||
|
|
63501c2ff4 | ||
|
|
2576b299e7 | ||
|
|
ee42b4d06c | ||
|
|
ee8aa5074f | ||
|
|
6703beea22 | ||
|
|
a2000b4b9d | ||
|
|
09a5b3149d | ||
|
|
68e26bf9d6 | ||
|
|
e36b74893f | ||
|
|
2761a5c361 | ||
|
|
d5ae51aab0 | ||
|
|
b7a29e71cd | ||
|
|
1af463b03c | ||
|
|
66ee7e1a81 | ||
|
|
16553be539 | ||
|
|
4daa083fd3 | ||
|
|
bf3142ad67 | ||
|
|
32e09665ad | ||
|
|
74a0944862 | ||
|
|
a52be46e69 | ||
|
|
7b4e2bdb4d | ||
|
|
d141383305 | ||
|
|
b225bc24dc | ||
|
|
bdb93631d6 | ||
|
|
fd1a11c452 | ||
|
|
6f7153324c | ||
|
|
4a38fbaa99 | ||
|
|
7abc03e523 | ||
|
|
8a89b6be12 | ||
|
|
4870356899 | ||
|
|
017371b492 | ||
|
|
a3f25ca5af | ||
|
|
cc51abd4dd | ||
|
|
1908af52df | ||
|
|
a99beb0628 | ||
|
|
fef9a1e42f | ||
|
|
6a98ebdb9c | ||
|
|
08eb2566e4 | ||
|
|
8cbe438ad5 | ||
|
|
6e9cc463b3 | ||
|
|
60881ed856 | ||
|
|
d5534f1e5f | ||
|
|
424564825a | ||
|
|
c30a621195 | ||
|
|
cfba3d0a60 | ||
|
|
f817daba17 | ||
|
|
04189de9c5 | ||
|
|
f785c8cf03 | ||
|
|
9e4cc5cc78 | ||
|
|
ef4e4eb5d4 | ||
|
|
f57e3cfecb | ||
|
|
5a8700060e | ||
|
|
f2035231e3 | ||
|
|
fd824143e9 | ||
|
|
572aedfcef | ||
|
|
973e3c56b7 | ||
|
|
1586966003 | ||
|
|
4a19124cb7 | ||
|
|
f86ca43b2f | ||
|
|
a6432e6ce4 | ||
|
|
18168cc347 | ||
|
|
2fb1b70a14 | ||
|
|
52bb22d8d1 | ||
|
|
3239d6879b | ||
|
|
9696fc622c | ||
|
|
9cf7227a67 | ||
|
|
5495d6c0d3 | ||
|
|
03d2032a6a | ||
|
|
dfe5550ad0 | ||
|
|
09f13033ae | ||
|
|
bd525ab9e2 | ||
|
|
d626a0637d | ||
|
|
167d1be130 | ||
|
|
92c0106e81 | ||
|
|
accec9ab75 | ||
|
|
862adb2b64 | ||
|
|
6d369671c8 | ||
|
|
9607ae0c1e | ||
|
|
e3751f0e36 | ||
|
|
ff5b8f1490 | ||
|
|
8978844111 | ||
|
|
8293d96f24 | ||
|
|
106bf2c52e | ||
|
|
43a5a9e653 | ||
|
|
898b7eed8a | ||
|
|
e758a4de3e | ||
|
|
93b3e8428c | ||
|
|
3f535e3b56 | ||
|
|
11d6dabe37 | ||
|
|
51224229eb | ||
|
|
bebc015eb3 | ||
|
|
2f776957d8 | ||
|
|
27a21e848d | ||
|
|
051b5372ce | ||
|
|
82f53aae54 | ||
|
|
8bcab8796e | ||
|
|
097fcd8f56 | ||
|
|
f5c600a9f8 | ||
|
|
77f44cdbbe | ||
|
|
1c12a84ded | ||
|
|
a1d201028b | ||
|
|
33e8d61959 | ||
|
|
35192cf413 | ||
|
|
c8c4e2b59c | ||
|
|
c1f18b5324 | ||
|
|
9f822ec5ca | ||
|
|
11f3e97b28 | ||
|
|
899c815676 | ||
|
|
8916b76f11 | ||
|
|
1ce6419698 | ||
|
|
e0590e08d7 | ||
|
|
5e189c83ee | ||
|
|
fdac81e908 | ||
|
|
3e6a3c42c2 | ||
|
|
f6a8da0b07 | ||
|
|
b171774051 | ||
|
|
36019cb5ab | ||
|
|
51fc59b45f | ||
|
|
af46c0471e | ||
|
|
63936209a0 | ||
|
|
5f4e317321 | ||
|
|
33b7866377 | ||
|
|
17cdeee214 | ||
|
|
6a3fcda751 | ||
|
|
9a09a35502 | ||
|
|
885d81b354 | ||
|
|
58eb0b37b4 | ||
|
|
5c342bd974 | ||
|
|
f67b81e200 | ||
|
|
26b3126c34 | ||
|
|
712982a7d5 | ||
|
|
919a784a20 | ||
|
|
110e2f3ae5 | ||
|
|
793ea4d893 | ||
|
|
de575eba60 | ||
|
|
fe1241aa61 | ||
|
|
d7f8748572 | ||
|
|
5bc7304675 | ||
|
|
3564fdaec6 | ||
|
|
7c0789252e | ||
|
|
26ffa41f20 | ||
|
|
fbe1b0e5b0 | ||
|
|
a84fc06483 | ||
|
|
d6124b77cc | ||
|
|
f957331310 | ||
|
|
e0af761c35 | ||
|
|
d28ac11d56 | ||
|
|
3ee961c600 | ||
|
|
4354065f78 | ||
|
|
df4f160846 | ||
|
|
5e18bb4b61 | ||
|
|
71abd6f2e4 | ||
|
|
8c4b985df0 | ||
|
|
95f7ed607a | ||
|
|
0c1ff5d6a4 | ||
|
|
8ec2538584 | ||
|
|
2678a5a74b | ||
|
|
02db53e12f | ||
|
|
99c4f93ee3 | ||
|
|
0a137e4e63 | ||
|
|
11faf42c7e | ||
|
|
5f5eac61e3 | ||
|
|
4f9d5b9e32 | ||
|
|
0569d6652f | ||
|
|
4d8de551b5 | ||
|
|
eb8b3e6622 | ||
|
|
5dfdb2e2a9 | ||
|
|
b2bfd395ed | ||
|
|
a0de3868c6 | ||
|
|
84aed05ebb | ||
|
|
f9265e9b01 | ||
|
|
44bd3d6717 | ||
|
|
a51b37f01c | ||
|
|
afd2c5e2c6 | ||
|
|
53297e55bf | ||
|
|
55facfd8db | ||
|
|
005be024f1 | ||
|
|
b987cff7da | ||
|
|
e90e618c5e | ||
|
|
395d9d0481 | ||
|
|
d52381eead | ||
|
|
bdefa24ac6 | ||
|
|
1073954fb7 | ||
|
|
cc7d421c77 | ||
|
|
c69050fc84 | ||
|
|
a40f335464 | ||
|
|
d53bd020ea | ||
|
|
a791d7a244 | ||
|
|
870c5948d2 | ||
|
|
b2d6987cac | ||
|
|
e986af5de0 | ||
|
|
bee1bc8c06 | ||
|
|
72da564db5 | ||
|
|
4c7deef9ae | ||
|
|
9990b78702 | ||
|
|
4049708aa5 | ||
|
|
60b2b61b52 | ||
|
|
82bf1c6367 | ||
|
|
2644bc86db | ||
|
|
6e05db972a | ||
|
|
773324dcd6 | ||
|
|
0b4f0f5622 | ||
|
|
bac898f993 | ||
|
|
6a93537c42 | ||
|
|
dc4094b264 | ||
|
|
4bb7a598a5 | ||
|
|
6c9ec32195 | ||
|
|
753394228a | ||
|
|
16c0dc9267 | ||
|
|
ca5a52f48a | ||
|
|
36091853e0 | ||
|
|
7e21123a5d | ||
|
|
c60e654a9b | ||
|
|
b65b7acace | ||
|
|
d57d3ea83e | ||
|
|
b144464674 | ||
|
|
a8cf64736f | ||
|
|
062176d3f5 | ||
|
|
091457a24f | ||
|
|
1b3f82e729 | ||
|
|
55eef983d4 | ||
|
|
19a011cf03 | ||
|
|
3ec5f1209b | ||
|
|
328ba5e69e | ||
|
|
a0e3c238a4 | ||
|
|
359c3bc067 | ||
|
|
6ca6a8aa60 | ||
|
|
5389b2deb1 | ||
|
|
087642f793 | ||
|
|
8da77020b9 | ||
|
|
9b56ebe5c4 | ||
|
|
4322784b01 | ||
|
|
1804a804df | ||
|
|
9d0bc54b07 | ||
|
|
7daa3fc8f9 | ||
|
|
8dbc71da0c | ||
|
|
adf7c3ac98 | ||
|
|
d0dd107f39 | ||
|
|
d64f866bfa | ||
|
|
34eac5754c | ||
|
|
2a147acd3f | ||
|
|
121f4e606c | ||
|
|
653904a359 | ||
|
|
fb6684450c | ||
|
|
b042376db4 | ||
|
|
64db4eef39 | ||
|
|
967c9270ce | ||
|
|
02f23db210 | ||
|
|
988d6d877f | ||
|
|
a94b93b38e | ||
|
|
0c3562fcdd | ||
|
|
2f8cf68762 | ||
|
|
067e697b8b | ||
|
|
855de1890f | ||
|
|
5592dbd277 | ||
|
|
530894608b | ||
|
|
b7d0cc3b24 | ||
|
|
35ecd95c49 | ||
|
|
415c1cb4b5 | ||
|
|
b9a4f97790 | ||
|
|
67b84b5811 | ||
|
|
e3aea6d6c4 | ||
|
|
f2a6ac5dc2 | ||
|
|
75c4132f02 | ||
|
|
453b428d33 | ||
|
|
96c5e929be | ||
|
|
990297b463 | ||
|
|
3c7767fab0 | ||
|
|
786ee6003c | ||
|
|
5fe784aabe | ||
|
|
0b955c0546 | ||
|
|
65b626c5e1 | ||
|
|
bcc1b5f8bf | ||
|
|
76a1462e37 | ||
|
|
97ac802f0c | ||
|
|
0ce0c553a6 | ||
|
|
1e63bc52be | ||
|
|
da4ba3c10f | ||
|
|
986d32ca42 | ||
|
|
3095591064 | ||
|
|
b4a0ef9bab | ||
|
|
e2a6ed6955 | ||
|
|
a24ab0e879 | ||
|
|
0b899eb4cf | ||
|
|
29c0b544a4 | ||
|
|
fa0ec78441 | ||
|
|
6adef8ed7c | ||
|
|
6003d98f3a | ||
|
|
6ea2a97e83 | ||
|
|
ac7fefe96e | ||
|
|
4cde35267b | ||
|
|
f20d6f3fdb | ||
|
|
1e07373696 |
@@ -1,23 +0,0 @@
|
||||
# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster
|
||||
ARG VARIANT=3-bullseye
|
||||
FROM python:3.8
|
||||
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
|
||||
&& apt-get purge -y imagemagick imagemagick-6-common
|
||||
|
||||
# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897
|
||||
# They are installed by the base image (python) which does not have the patch.
|
||||
RUN python3 -m pip install --upgrade setuptools
|
||||
|
||||
# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
|
||||
# COPY requirements.txt /tmp/pip-tmp/
|
||||
# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \
|
||||
# && rm -rf /tmp/pip-tmp
|
||||
|
||||
# [Optional] Uncomment this section to install additional OS packages.
|
||||
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
# && apt-get -y install --no-install-recommends <your-package-list-here>
|
||||
|
||||
# [Optional] Uncomment this line to install global node packages.
|
||||
# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g <your-package-here>" 2>&1
|
||||
40
.dockerignore
Normal file
40
.dockerignore
Normal file
@@ -0,0 +1,40 @@
|
||||
# Ignore everything by default, selectively add things to context
|
||||
*
|
||||
|
||||
# AutoGPT
|
||||
!autogpt/autogpt/
|
||||
!autogpt/pyproject.toml
|
||||
!autogpt/poetry.lock
|
||||
!autogpt/README.md
|
||||
!autogpt/tests/
|
||||
|
||||
# Benchmark
|
||||
!benchmark/agbenchmark/
|
||||
!benchmark/pyproject.toml
|
||||
!benchmark/poetry.lock
|
||||
!benchmark/README.md
|
||||
|
||||
# Forge
|
||||
!forge/forge/
|
||||
!forge/pyproject.toml
|
||||
!forge/poetry.lock
|
||||
!forge/README.md
|
||||
|
||||
# Frontend
|
||||
!frontend/build/web/
|
||||
|
||||
# rnd
|
||||
!rnd/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
# rnd
|
||||
rnd/autogpt_builder/.next/
|
||||
rnd/autogpt_builder/node_modules
|
||||
rnd/autogpt_builder/.env.example
|
||||
rnd/autogpt_builder/.env.local
|
||||
rnd/autogpt_server/.env
|
||||
rnd/autogpt_server/.venv/
|
||||
|
||||
rnd/market/.env
|
||||
123
.env.template
123
.env.template
@@ -1,123 +0,0 @@
|
||||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
# EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False)
|
||||
EXECUTE_LOCAL_COMMANDS=False
|
||||
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
|
||||
BROWSE_CHUNK_MAX_LENGTH=8192
|
||||
# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website
|
||||
BROWSE_SUMMARY_MAX_TOKEN=300
|
||||
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
# OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
# TEMPERATURE - Sets temperature in OpenAI (Default: 1)
|
||||
# USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
TEMPERATURE=1
|
||||
USE_AZURE=False
|
||||
|
||||
### AZURE
|
||||
# OPENAI_AZURE_API_BASE - OpenAI API base URL for Azure (Example: https://my-azure-openai-url.com)
|
||||
# OPENAI_AZURE_API_VERSION - OpenAI API version for Azure (Example: v1)
|
||||
# OPENAI_AZURE_DEPLOYMENT_ID - OpenAI deployment ID for Azure (Example: my-deployment-id)
|
||||
# OPENAI_AZURE_CHAT_DEPLOYMENT_ID - OpenAI deployment ID for Azure Chat (Example: my-deployment-id-for-azure-chat)
|
||||
# OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID - OpenAI deployment ID for Embedding (Example: my-deployment-id-for-azure-embeddigs)
|
||||
OPENAI_AZURE_API_BASE=your-base-url-for-azure
|
||||
OPENAI_AZURE_API_VERSION=api-version-for-azure
|
||||
OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure
|
||||
OPENAI_AZURE_CHAT_DEPLOYMENT_ID=deployment-id-for-azure-chat
|
||||
OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID=deployment-id-for-azure-embeddigs
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
# SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
# FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
SMART_LLM_MODEL=gpt-4
|
||||
FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
|
||||
### LLM MODEL SETTINGS
|
||||
# FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
# SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
# When using --gpt3onlythis needs to be set to 4000.
|
||||
FAST_TOKEN_LIMIT=4000
|
||||
SMART_TOKEN_LIMIT=8000
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
# MEMORY_BACKEND - Memory backend type (Default: local)
|
||||
MEMORY_BACKEND=local
|
||||
|
||||
### PINECONE
|
||||
# PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
# PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
PINECONE_API_KEY=your-pinecone-api-key
|
||||
PINECONE_ENV=your-pinecone-region
|
||||
|
||||
### REDIS
|
||||
# REDIS_HOST - Redis host (Default: localhost)
|
||||
# REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PASSWORD - Redis password (Default: "")
|
||||
# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False)
|
||||
# MEMORY_INDEX - Name of index created in Redis database (Default: auto-gpt)
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=
|
||||
WIPE_REDIS_ON_START=False
|
||||
MEMORY_INDEX=auto-gpt
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPEN AI
|
||||
# IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
IMAGE_PROVIDER=dalle
|
||||
|
||||
### HUGGINGFACE
|
||||
# STABLE DIFFUSION
|
||||
# (Default URL: https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4)
|
||||
# Set in image_gen.py)
|
||||
# HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
|
||||
################################################################################
|
||||
### SEARCH PROVIDER
|
||||
################################################################################
|
||||
|
||||
### GOOGLE
|
||||
# GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
# CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
GOOGLE_API_KEY=your-google-api-key
|
||||
CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
|
||||
################################################################################
|
||||
### TTS PROVIDER
|
||||
################################################################################
|
||||
|
||||
### MAC OS
|
||||
# USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
USE_MAC_OS_TTS=False
|
||||
|
||||
### STREAMELEMENTS
|
||||
# USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
USE_BRIAN_TTS=False
|
||||
|
||||
### ELEVENLABS
|
||||
# ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
# ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
# ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
12
.flake8
Normal file
12
.flake8
Normal file
@@ -0,0 +1,12 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
*.pyc,
|
||||
.env
|
||||
venv*/*,
|
||||
.venv/*,
|
||||
reports/*,
|
||||
dist/*,
|
||||
data/*,
|
||||
10
.gitattributes
vendored
Normal file
10
.gitattributes
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
frontend/build/** linguist-generated
|
||||
|
||||
**/poetry.lock linguist-generated
|
||||
|
||||
docs/_javascript/** linguist-vendored
|
||||
|
||||
# Exclude VCR cassettes from stats
|
||||
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
* text=auto
|
||||
7
.github/CODEOWNERS
vendored
Normal file
7
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
* @Significant-Gravitas/maintainers
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
forge/ @Significant-Gravitas/forge-maintainers
|
||||
benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
rnd/infra @Significant-Gravitas/devops
|
||||
.github/CODEOWNERS @Significant-Gravitas/admins
|
||||
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -1,3 +0,0 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: Torantulino
|
||||
198
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
198
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -1,51 +1,173 @@
|
||||
name: Bug report 🐛
|
||||
description: Create a bug report for Auto-GPT.
|
||||
description: Create a bug report for AutoGPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Thoroughly search the [existing issues] before creating a new one**
|
||||
* Read our [wiki page on Contributing]
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we spend building AutoGPT.
|
||||
|
||||
Please help us help you:
|
||||
- Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)?
|
||||
- Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)?
|
||||
- Search for existing issues, "add comment" is tidier than "new issue"
|
||||
- Ask on our Discord (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
description: >
|
||||
Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues)
|
||||
to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
required: true
|
||||
- type: textarea
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
label: Steps to reproduce 🕹
|
||||
description: |
|
||||
**⚠️ Issues that we can't reproduce will be closed.**
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current behavior 😯
|
||||
description: Describe what happens instead of the expected behavior.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior 🤔
|
||||
description: Describe what should happen.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your prompt 📝
|
||||
description: |
|
||||
If applicable please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml.
|
||||
value: |
|
||||
```yaml
|
||||
# Paste your prompt here
|
||||
```
|
||||
Please confirm that the issue you have is described well and precise in the title above ⬆️.
|
||||
A good rule of thumb: What would you type if you were searching for the issue?
|
||||
|
||||
For example:
|
||||
BAD - my AutoGPT keeps looping
|
||||
GOOD - After performing execute_python_file, AutoGPT goes into a loop where it keeps trying to execute the file.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we can spend building AutoGPT.
|
||||
|
||||
Please help us help you by following these steps:
|
||||
- Search for existing issues, adding a comment when you have the same or similar issue is tidier than "new issue" and
|
||||
newer issues will not be reviewed earlier, this is dependent on the current priorities set by our wonderful team
|
||||
- Ask on our Discord if your issue is known when you are unsure (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it) if possible
|
||||
- If it's a pip/packages issue, mention this in the title and provide pip version, python version
|
||||
- If it's a crash, provide traceback and describe the error you got as precise as possible in the title.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which Operating System are you using?
|
||||
description: >
|
||||
Please select the operating system you were using to run AutoGPT when this problem occurred.
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
- MacOS
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the system
|
||||
description: Please specify the system you are working on.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of AutoGPT are you using?
|
||||
description: |
|
||||
Please select which version of AutoGPT you were using when this issue occurred.
|
||||
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/AutoGPT/releases/) make sure you were using the latest code.
|
||||
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/AutoGPT/releases/)**.
|
||||
If installed with git you can run `git branch` to see which version of AutoGPT you are running.
|
||||
options:
|
||||
- Latest Release
|
||||
- Stable (branch)
|
||||
- Master (branch)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: What LLM Provider do you use?
|
||||
description: >
|
||||
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- Azure
|
||||
- Groq
|
||||
- Anthropic
|
||||
- Llamafile
|
||||
- Other (detail in issue)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which area covers your issue best?
|
||||
description: >
|
||||
Select the area related to the issue you are reporting.
|
||||
options:
|
||||
- Installation and setup
|
||||
- Memory
|
||||
- Performance
|
||||
- Prompt
|
||||
- Commands
|
||||
- Plugins
|
||||
- AI Model Limitations
|
||||
- Challenges
|
||||
- Documentation
|
||||
- Logging
|
||||
- Agents
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
autolabels: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: What commit or version are you using?
|
||||
description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe your issue.
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
validations:
|
||||
required: true
|
||||
|
||||
#Following are optional file content uploads
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main AutoGPT folder."
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
Upload the activity log content, this can help us understand the issue better.
|
||||
To do this, go to the folder logs in your main AutoGPT folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to AutoGPT by you in prompt or input as well as
|
||||
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
Upload the error log content, this will help us understand the issue better.
|
||||
To do this, go to the folder logs in your main AutoGPT folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as
|
||||
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
9
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
@@ -1,17 +1,16 @@
|
||||
name: Feature request 🚀
|
||||
description: Suggest a new idea for Auto-GPT.
|
||||
description: Suggest a new idea for AutoGPT!
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
Thanks for contributing by creating an issue! ❤️
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
|
||||
description: Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues) to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
@@ -26,4 +25,4 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation 🔦
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
|
||||
50
.github/PULL_REQUEST_TEMPLATE.md
vendored
50
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,33 +1,31 @@
|
||||
<!-- 📢 Announcement
|
||||
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
|
||||
|
||||
Focus on a single, specific change.
|
||||
Do not include any unrelated or "extra" modifications.
|
||||
Provide clear documentation and explanations of the changes made.
|
||||
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
||||
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
||||
|
||||
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
||||
|
||||
### Background
|
||||
<!-- Provide a concise overview of the rationale behind this change. Include relevant context, prior discussions, or links to related issues. Ensure that the change aligns with the project's overall direction. -->
|
||||
|
||||
### Changes
|
||||
<!-- Describe the specific, focused change made in this pull request. Detail the modifications clearly and avoid any unrelated or "extra" changes. -->
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Documentation
|
||||
<!-- Explain how your changes are documented, such as in-code comments or external documentation. Ensure that the documentation is clear, concise, and easy to understand. -->
|
||||
### Changes 🏗️
|
||||
|
||||
### Test Plan
|
||||
<!-- Describe how you tested this functionality. Include steps to reproduce, relevant test cases, and any other pertinent information. -->
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### PR Quality Checklist
|
||||
- [ ] My pull request is atomic and focuses on a single change.
|
||||
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have considered potential risks and mitigations for my changes.
|
||||
- [ ] I have documented my changes clearly and comprehensively.
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
### PR Quality Scorecard ✨
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
<!--
|
||||
Check out our contribution guide:
|
||||
https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
|
||||
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guide lines. -->
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. Also consider contributing something other than code; see the [contribution guide]
|
||||
for options.
|
||||
3. Clearly explain your changes.
|
||||
4. Avoid making unnecessary changes, especially if they're purely based on personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
-->
|
||||
|
||||
- [x] Have you used the PR description template?   `+2 pts`
|
||||
- [ ] Is your pull request atomic, focusing on a single change?   `+5 pts`
|
||||
- [ ] Have you linked the GitHub issue(s) that this PR addresses?   `+5 pts`
|
||||
- [ ] Have you documented your changes clearly and comprehensively?   `+5 pts`
|
||||
- [ ] Have you changed or added a feature?   `-4 pts`
|
||||
- [ ] Have you added/updated corresponding documentation?   `+4 pts`
|
||||
- [ ] Have you added/updated corresponding integration tests?   `+5 pts`
|
||||
- [ ] Have you changed the behavior of AutoGPT?   `-5 pts`
|
||||
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance?   `+10 pts`
|
||||
|
||||
27
.github/labeler.yml
vendored
Normal file
27
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
AutoGPT Agent:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt/**
|
||||
|
||||
Forge:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: forge/**
|
||||
|
||||
Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: benchmark/**
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: frontend/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
Builder:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_builder/**
|
||||
|
||||
Server:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_server/**
|
||||
23
.github/workflows/auto_format.yml
vendored
23
.github/workflows/auto_format.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: auto-format
|
||||
on: pull_request
|
||||
jobs:
|
||||
format:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: autopep8
|
||||
uses: peter-evans/autopep8@v1
|
||||
with:
|
||||
args: --exit-code --recursive --in-place --aggressive --aggressive .
|
||||
- name: Check for modified files
|
||||
id: git-check
|
||||
run: echo "modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)" >> $GITHUB_ENV
|
||||
- name: Push changes
|
||||
if: steps.git-check.outputs.modified == 'true'
|
||||
run: |
|
||||
git config --global user.name 'Torantulino'
|
||||
git config --global user.email 'toran.richards@gmail.com'
|
||||
git remote set
|
||||
41
.github/workflows/autogpt-builder-ci.yml
vendored
Normal file
41
.github/workflows/autogpt-builder-ci.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: AutoGPT Builder CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_builder
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '21'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Check formatting with Prettier
|
||||
run: |
|
||||
npx prettier --check .
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
npm run lint
|
||||
138
.github/workflows/autogpt-ci.yml
vendored
Normal file
138
.github/workflows/autogpt-ci.yml
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
name: AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: autogpt/logs/
|
||||
59
.github/workflows/autogpt-docker-cache-clean.yml
vendored
Normal file
59
.github/workflows/autogpt-docker-cache-clean.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Purge Auto-GPT Docker CI cache
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: development
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
162
.github/workflows/autogpt-docker-ci.yml
vendored
Normal file
162
.github/workflows/autogpt-docker-ci.yml
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
name: AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: autogpt
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
|
||||
DEV_IMAGE_TAG: latest-dev
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:edge-cicd
|
||||
options: >
|
||||
--name=minio
|
||||
--health-interval=10s --health-timeout=5s --health-retries=3
|
||||
--health-cmd="curl -f http://localhost:9000/minio/health/live"
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- if: github.event_name == 'push'
|
||||
name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://minio:9000
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
run: |
|
||||
set +e
|
||||
docker run --env CI --env OPENAI_API_KEY \
|
||||
--network container:minio \
|
||||
--env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
|
||||
--entrypoint poetry ${{ env.IMAGE_NAME }} run \
|
||||
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
--numprocesses=4 --durations=10 \
|
||||
tests/unit tests/integration 2>&1 | tee test_output.txt
|
||||
|
||||
test_failure=${PIPESTATUS[0]}
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$(cat test_output.txt)
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
|
||||
- if: github.event_name == 'push' && github.ref_name == 'master'
|
||||
name: Push image to Docker Hub
|
||||
run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
86
.github/workflows/autogpt-docker-release.yml
vendored
Normal file
86
.github/workflows/autogpt-docker-release.yml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
name: AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published, edited ]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
no_cache:
|
||||
type: boolean
|
||||
description: 'Build from scratch, without using cached layers'
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
56
.github/workflows/autogpt-infra-ci.yml
vendored
Normal file
56
.github/workflows/autogpt-infra-ci.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: AutoGPT Builder Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
155
.github/workflows/autogpt-server-ci.yml
vendored
Normal file
155
.github/workflows/autogpt-server-ci.yml
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
name: AutoGPT Server CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, development, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_server
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Setup PostgreSQL
|
||||
uses: ikalnytskyi/action-setup-postgres@v6
|
||||
with:
|
||||
username: ${{ secrets.DB_USER || 'postgres' }}
|
||||
password: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
database: postgres
|
||||
port: 5432
|
||||
id: postgres
|
||||
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: "."
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
|
||||
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
DB_NAME: postgres
|
||||
DB_PORT: 5432
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: autogpt-server,${{ runner.os }}
|
||||
97
.github/workflows/autogpts-benchmark.yml
vendored
Normal file
97
.github/workflows/autogpts-benchmark.yml
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
name: AutoGPTs Nightly Benchmark
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
REPORTS_BRANCH: data/benchmark-reports
|
||||
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Prepare reports folder
|
||||
run: mkdir -p ${{ env.REPORTS_FOLDER }}
|
||||
|
||||
- run: poetry -C benchmark install
|
||||
|
||||
- name: Benchmark ${{ matrix.agent-name }}
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Do not quit on non-zero exit codes
|
||||
poetry run agbenchmark run -N 3 \
|
||||
--test=ReadFile \
|
||||
--test=BasicRetrieval --test=RevenueRetrieval2 \
|
||||
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
|
||||
--test=UrlShortener --test=TicTacToe --test=Battleship \
|
||||
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
|
||||
--test=WebArenaTask_134 --test=WebArenaTask_163
|
||||
|
||||
# Convert exit code 1 (some challenges failed) to exit code 0
|
||||
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
|
||||
exit 0
|
||||
else
|
||||
exit $?
|
||||
fi
|
||||
env:
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
|
||||
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
|
||||
- name: Push reports to data branch
|
||||
run: |
|
||||
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
|
||||
rm ${{ env.REPORTS_FOLDER }}/*.json
|
||||
|
||||
# Find folder with newest (untracked) report in it
|
||||
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
|
||||
| xargs -I {} dirname {} \
|
||||
| xargs -I {} git ls-files --others --exclude-standard {} \
|
||||
| xargs -I {} dirname {} \
|
||||
| sort -u)
|
||||
json_report_file="$report_subfolder/report.json"
|
||||
|
||||
# Convert JSON report to Markdown
|
||||
markdown_report_file="$report_subfolder/report.md"
|
||||
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
|
||||
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
git config --global user.name 'GitHub Actions'
|
||||
git config --global user.email 'github-actions@agpt.co'
|
||||
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
|
||||
&& git checkout ${{ env.REPORTS_BRANCH }} \
|
||||
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
|
||||
git reset --hard
|
||||
git add ${{ env.REPORTS_FOLDER }}
|
||||
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
|
||||
&& git push origin ${{ env.REPORTS_BRANCH }}
|
||||
71
.github/workflows/autogpts-ci.yml
vendored
Normal file
71
.github/workflows/autogpts-ci.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: Agent smoke tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
HELICONE_CACHE_ENABLED: false
|
||||
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
||||
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
169
.github/workflows/benchmark-ci.yml
vendored
Normal file
169
.github/workflows/benchmark-ci.yml
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
name: AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- 'benchmark/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
- '!benchmark/reports/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- 'benchmark/**'
|
||||
- '!benchmark/reports/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: benchmark
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
|
||||
self-test-with-agent:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ forge ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: .
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Ignore non-zero exit codes and continue execution
|
||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||
poetry run agbenchmark --maintain --mock
|
||||
EXIT_CODE=$?
|
||||
set -e # Stop ignoring non-zero exit codes
|
||||
# Check if the exit code was 5, and if so, exit with 0 instead
|
||||
if [ $EXIT_CODE -eq 5 ]; then
|
||||
echo "regression_tests.json is empty."
|
||||
fi
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock"
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
||||
poetry run agbenchmark --mock --category=data
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
55
.github/workflows/benchmark_publish_package.yml
vendored
Normal file
55
.github/workflows/benchmark_publish_package.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./benchmark/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build project for distribution
|
||||
working-directory: ./benchmark/
|
||||
run: poetry build
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ./benchmark/
|
||||
run: poetry install
|
||||
|
||||
- name: Check Version
|
||||
working-directory: ./benchmark/
|
||||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "benchmark/dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: false
|
||||
tag: agbenchmark-v${{ steps.check-version.outputs.version }}
|
||||
commit: master
|
||||
|
||||
- name: Build and publish
|
||||
working-directory: ./benchmark/
|
||||
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}
|
||||
44
.github/workflows/ci.yml
vendored
44
.github/workflows/ci.yml
vendored
@@ -1,44 +0,0 @@
|
||||
name: Python CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.8]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Lint with flake8
|
||||
continue-on-error: false
|
||||
run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
|
||||
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
coverage run --source=scripts -m unittest discover tests
|
||||
|
||||
- name: Generate coverage report
|
||||
run: |
|
||||
coverage report
|
||||
coverage xml
|
||||
34
.github/workflows/close-stale-issues.yml
vendored
Normal file
34
.github/workflows/close-stale-issues.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: 'Close stale issues'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
This issue has automatically been marked as _stale_ because it has not had
|
||||
any activity in the last 50 days. You can _unstale_ it by commenting or
|
||||
removing the label. Otherwise, this issue will be closed in 10 days.
|
||||
stale-pr-message: >
|
||||
This pull request has automatically been marked as _stale_ because it has
|
||||
not had any activity in the last 50 days. You can _unstale_ it by commenting
|
||||
or removing the label.
|
||||
close-issue-message: >
|
||||
This issue was closed automatically because it has been stale for 10 days
|
||||
with no activity.
|
||||
days-before-stale: 50
|
||||
days-before-close: 10
|
||||
# Do not touch meta issues:
|
||||
exempt-issue-labels: meta,fridge,project management
|
||||
# Do not affect pull requests:
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
236
.github/workflows/forge-ci.yml
vendored
Normal file
236
.github/workflows/forge-ci.yml
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
name: Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: forge
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
forge
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: forge/logs/
|
||||
60
.github/workflows/frontend-ci.yml
vendored
Normal file
60
.github/workflows/frontend-ci.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- development
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
add-paths: frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
133
.github/workflows/hackathon.yml
vendored
Normal file
133
.github/workflows/hackathon.yml
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
name: Hackathon
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
agents:
|
||||
description: "Agents to run (comma-separated)"
|
||||
required: false
|
||||
default: "autogpt" # Default agents if none are specified
|
||||
|
||||
jobs:
|
||||
matrix-setup:
|
||||
runs-on: ubuntu-latest
|
||||
# Service containers to run with `matrix-setup`
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
env-name: ${{ steps.set-matrix.outputs.env-name }}
|
||||
steps:
|
||||
- id: set-matrix
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "schedule" ]; then
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::[ 'irrelevant']"
|
||||
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
|
||||
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::$matrix_string"
|
||||
else
|
||||
echo "::set-output name=env-name::testing"
|
||||
echo "::set-output name=matrix::[ 'irrelevant' ]"
|
||||
fi
|
||||
|
||||
tests:
|
||||
environment:
|
||||
name: "${{ needs.matrix-setup.outputs.env-name }}"
|
||||
needs: matrix-setup
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
name: "${{ matrix.agent-name }}"
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
timeout-minutes: 50
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
|
||||
steps:
|
||||
- name: Print Environment Name
|
||||
run: |
|
||||
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
|
||||
|
||||
- name: Check Docker Container
|
||||
id: check
|
||||
run: docker ps
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: v18.15
|
||||
|
||||
- name: Run benchmark
|
||||
run: |
|
||||
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
|
||||
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
|
||||
git clone "$link" -b "$branch" "$AGENT_NAME"
|
||||
cd $AGENT_NAME
|
||||
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
|
||||
./run agent start $AGENT_NAME
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
poetry run agbenchmark --no-dep
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
|
||||
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
66
.github/workflows/pr-label.yml
vendored
Normal file
66
.github/workflows/pr-label.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: "Pull Request auto-label"
|
||||
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, development, release-* ]
|
||||
paths-ignore:
|
||||
- 'forge/tests/vcr_cassettes'
|
||||
- 'benchmark/reports/**'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
pull_request_target:
|
||||
types: [ opened, synchronize ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
conflicts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Update PRs with conflict labels
|
||||
uses: eps1lon/actions-label-merge-conflict@releases/2.x
|
||||
with:
|
||||
dirtyLabel: "conflicts"
|
||||
#removeOnDirtyLabel: "PR: ready to ship"
|
||||
repoToken: "${{ secrets.GITHUB_TOKEN }}"
|
||||
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
|
||||
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
|
||||
|
||||
size:
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: codelytv/pr-size-labeler@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
xs_label: 'size/xs'
|
||||
xs_max_size: 2
|
||||
s_label: 'size/s'
|
||||
s_max_size: 10
|
||||
m_label: 'size/m'
|
||||
m_max_size: 100
|
||||
l_label: 'size/l'
|
||||
l_max_size: 500
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl:
|
||||
|
||||
scope:
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
sync-labels: true
|
||||
151
.github/workflows/python-checks.yml
vendored
Normal file
151
.github/workflows/python-checks.yml
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
name: Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
get-changed-parts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: changes-in
|
||||
name: Determine affected subprojects
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
autogpt:
|
||||
- autogpt/autogpt/**
|
||||
- autogpt/tests/**
|
||||
- autogpt/poetry.lock
|
||||
forge:
|
||||
- forge/forge/**
|
||||
- forge/tests/**
|
||||
- forge/poetry.lock
|
||||
benchmark:
|
||||
- benchmark/agbenchmark/**
|
||||
- benchmark/tests/**
|
||||
- benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
lint:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
20
.github/workflows/repo-stats.yml
vendored
Normal file
20
.github/workflows/repo-stats.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: github-repo-stats
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run this once per day, towards the end of the day for keeping the most
|
||||
# recent data point most meaningful (hours are interpreted in UTC).
|
||||
- cron: "0 23 * * *"
|
||||
workflow_dispatch: # Allow for running this manually.
|
||||
|
||||
jobs:
|
||||
j1:
|
||||
name: github-repo-stats
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run-ghrs
|
||||
# Use latest release.
|
||||
uses: jgehrcke/github-repo-stats@HEAD
|
||||
with:
|
||||
ghtoken: ${{ secrets.ghrs_github_api_token }}
|
||||
|
||||
98
.github/workflows/scripts/docker-ci-summary.sh
vendored
Executable file
98
.github/workflows/scripts/docker-ci-summary.sh
vendored
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
|
||||
head_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$current_ref/" <<< $compare_url_template)
|
||||
ref_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$commit_hash/" <<< $compare_url_template)
|
||||
|
||||
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
|
||||
|
||||
cat << $EOF
|
||||
# Docker Build summary 🔨
|
||||
|
||||
**Source:** branch \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
|
||||
|
||||
**Build type:** \`$build_type\`
|
||||
|
||||
**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
|
||||
|
||||
## Image details
|
||||
|
||||
**Tags:**
|
||||
$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
|
||||
|
||||
<details>
|
||||
<summary><h3>Layers</h3></summary>
|
||||
|
||||
| Age | Size | Created by instruction |
|
||||
| --------- | ------ | ---------------------- |
|
||||
$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
|
||||
| grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
|
||||
| cut -f-3 `# yeet Comment column`\
|
||||
| sed 's/ ago//' `# fix Layer age`\
|
||||
| sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
|
||||
| sed 's/\$/\\$/g' `# escape variable and shell expansions`\
|
||||
| sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
|
||||
| column -t -s$'\t' -o' | ' `# align columns and add separator`\
|
||||
| sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><h3>ENV</h3></summary>
|
||||
|
||||
| Variable | Value |
|
||||
| -------- | -------- |
|
||||
$(jq -r \
|
||||
'.Config.Env
|
||||
| map(
|
||||
split("=")
|
||||
| "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
|
||||
)
|
||||
| map("| \(.) |")
|
||||
| .[]' <<< $meta
|
||||
)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Raw metadata</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$meta
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
## Build details
|
||||
**Build trigger:** $push_forced_label $event_name \`$event_ref\`
|
||||
|
||||
<details>
|
||||
<summary><code>github</code> context</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$github_context_json
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
### Source
|
||||
**HEAD:** [$repository@\`${commit_hash:0:7}\`]($source_url) on branch [$current_ref]($ref_compare_url)
|
||||
|
||||
**Diff with previous HEAD:** $head_compare_url
|
||||
|
||||
#### New commits
|
||||
$(jq -r 'map([
|
||||
"**Commit [`\(.id[0:7])`](\(.url)) by \(if .author.username then "@"+.author.username else .author.name end):**",
|
||||
.message,
|
||||
(if .committer.name != .author.name then "\n> <sub>**Committer:** \(.committer.name) <\(.committer.email)></sub>" else "" end),
|
||||
"<sub>**Timestamp:** \(.timestamp)</sub>"
|
||||
] | map("> \(.)\n") | join("")) | join("\n")' <<< $new_commits_json)
|
||||
|
||||
### Job environment
|
||||
|
||||
#### \`vars\` context:
|
||||
\`\`\`JSON
|
||||
$vars_json
|
||||
\`\`\`
|
||||
|
||||
#### \`env\` context:
|
||||
\`\`\`JSON
|
||||
$job_env_json
|
||||
\`\`\`
|
||||
|
||||
$EOF
|
||||
85
.github/workflows/scripts/docker-release-summary.sh
vendored
Executable file
85
.github/workflows/scripts/docker-release-summary.sh
vendored
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/bin/bash
|
||||
meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
|
||||
|
||||
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
|
||||
|
||||
cat << $EOF
|
||||
# Docker Release Build summary 🚀🔨
|
||||
|
||||
**Source:** $ref_type \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
|
||||
|
||||
**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
|
||||
|
||||
## Image details
|
||||
|
||||
**Tags:**
|
||||
$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
|
||||
|
||||
<details>
|
||||
<summary><h3>Layers</h3></summary>
|
||||
|
||||
| Age | Size | Created by instruction |
|
||||
| --------- | ------ | ---------------------- |
|
||||
$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
|
||||
| grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
|
||||
| cut -f-3 `# yeet Comment column`\
|
||||
| sed 's/ ago//' `# fix Layer age`\
|
||||
| sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
|
||||
| sed 's/\$/\\$/g' `# escape variable and shell expansions`\
|
||||
| sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
|
||||
| column -t -s$'\t' -o' | ' `# align columns and add separator`\
|
||||
| sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><h3>ENV</h3></summary>
|
||||
|
||||
| Variable | Value |
|
||||
| -------- | -------- |
|
||||
$(jq -r \
|
||||
'.Config.Env
|
||||
| map(
|
||||
split("=")
|
||||
| "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
|
||||
)
|
||||
| map("| \(.) |")
|
||||
| .[]' <<< $meta
|
||||
)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Raw metadata</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$meta
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
## Build details
|
||||
**Build trigger:** $event_name \`$current_ref\`
|
||||
|
||||
| Parameter | Value |
|
||||
| -------------- | ------------ |
|
||||
| \`no_cache\` | \`$inputs_no_cache\` |
|
||||
|
||||
<details>
|
||||
<summary><code>github</code> context</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$github_context_json
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
### Job environment
|
||||
|
||||
#### \`vars\` context:
|
||||
\`\`\`JSON
|
||||
$vars_json
|
||||
\`\`\`
|
||||
|
||||
#### \`env\` context:
|
||||
\`\`\`JSON
|
||||
$job_env_json
|
||||
\`\`\`
|
||||
|
||||
$EOF
|
||||
31
.github/workflows/workflow-checker.yml
vendored
Normal file
31
.github/workflows/workflow-checker.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
name: Check PR Status
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# - name: Wait some time for all actions to start
|
||||
# run: sleep 30
|
||||
- uses: actions/checkout@v4
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install requests
|
||||
- name: Check PR Status
|
||||
run: |
|
||||
echo "Current directory before running Python script:"
|
||||
pwd
|
||||
echo "Attempting to run Python script:"
|
||||
python check_actions_status.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
175
.gitignore
vendored
175
.gitignore
vendored
@@ -1,28 +1,173 @@
|
||||
scripts/keys.py
|
||||
scripts/*json
|
||||
scripts/node_modules/
|
||||
scripts/__pycache__/keys.cpython-310.pyc
|
||||
package-lock.json
|
||||
*.pyc
|
||||
## Original ignores
|
||||
.github_access_token
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
*venv/*
|
||||
outputs/*
|
||||
ai_settings.yaml
|
||||
last_run_ai_settings.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
log.txt
|
||||
log-ingestion.txt
|
||||
logs
|
||||
/logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
|
||||
# Coverage reports
|
||||
.coverage
|
||||
coverage.xml
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# For Macs Dev Environs: ignoring .Desktop Services_Store
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
site/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv*/
|
||||
ENV/
|
||||
env.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
llama-*
|
||||
vicuna-*
|
||||
|
||||
# mac
|
||||
.DS_Store
|
||||
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
|
||||
|
||||
# Allow for locally private items
|
||||
# private
|
||||
pri*
|
||||
# ignore
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
rnd/autogpt_server/settings.py
|
||||
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "forge/tests/vcr_cassettes"]
|
||||
path = forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
6
.pr_agent.toml
Normal file
6
.pr_agent.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
[pr_reviewer]
|
||||
num_code_suggestions=0
|
||||
|
||||
[pr_code_suggestions]
|
||||
commitable_code_suggestions=false
|
||||
num_code_suggestions=0
|
||||
127
.pre-commit-config.yaml
Normal file
127
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,127 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
args: ["--maxkb=500"]
|
||||
- id: fix-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
hooks:
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C autogpt run isort
|
||||
files: ^autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C forge run isort
|
||||
files: ^forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C benchmark run isort
|
||||
files: ^benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.12.1
|
||||
# Black has sensible defaults, doesn't need package context, and ignores
|
||||
# everything in .gitignore, so it works fine without any config or arguments.
|
||||
hooks:
|
||||
- id: black
|
||||
name: Lint (Black)
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
# To have flake8 load the config of the individual subprojects, we have to call
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
files: ^autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
files: ^forge/(forge|tests)/
|
||||
args: [--config=forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
entry: poetry -C forge run pyright
|
||||
args: [-p, forge, forge]
|
||||
files: ^forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^forge/(forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
61
.vscode/all-projects.code-workspace
vendored
Normal file
61
.vscode/all-projects.code-workspace
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "autogpt",
|
||||
"path": "../autogpt"
|
||||
},
|
||||
{
|
||||
"name": "benchmark",
|
||||
"path": "../benchmark"
|
||||
},
|
||||
{
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
{
|
||||
"name": "forge",
|
||||
"path": "../forge"
|
||||
},
|
||||
{
|
||||
"name": "frontend",
|
||||
"path": "../frontend"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_server",
|
||||
"path": "../rnd/autogpt_server"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_builder",
|
||||
"path": "../rnd/autogpt_builder"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
"path": "../rnd/market"
|
||||
},
|
||||
{
|
||||
"name": "lib",
|
||||
"path": "../rnd/autogpt_libs"
|
||||
},
|
||||
{
|
||||
"name": "infra",
|
||||
"path": "../rnd/infra"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
},
|
||||
"extensions": {
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
"dart-code.flutter",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.vscode-pylance",
|
||||
"prisma.prisma",
|
||||
"qwtel.sqlite-viewer"
|
||||
]
|
||||
}
|
||||
}
|
||||
21
CITATION.cff
Normal file
21
CITATION.cff
Normal file
@@ -0,0 +1,21 @@
|
||||
# This CITATION.cff file was generated with cffinit.
|
||||
# Visit https://bit.ly/cffinit to generate yours today!
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: AutoGPT
|
||||
message: >-
|
||||
If you use this software, please cite it using the
|
||||
metadata from this file.
|
||||
type: software
|
||||
authors:
|
||||
- name: Significant Gravitas
|
||||
website: 'https://agpt.co'
|
||||
repository-code: 'https://github.com/Significant-Gravitas/AutoGPT'
|
||||
url: 'https://agpt.co'
|
||||
abstract: >-
|
||||
A collection of tools and experimental open-source attempts to make GPT-4 fully
|
||||
autonomous.
|
||||
keywords:
|
||||
- AI
|
||||
- Agent
|
||||
license: MIT
|
||||
182
CLI-USAGE.md
Executable file
182
CLI-USAGE.md
Executable file
@@ -0,0 +1,182 @@
|
||||
## CLI Documentation
|
||||
|
||||
This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
|
||||
|
||||
### 1. Entry Point for the CLI
|
||||
|
||||
Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
|
||||
|
||||
```sh
|
||||
./run
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
agent Commands to create, start and stop agents
|
||||
benchmark Commands to start the benchmark and list tests and categories
|
||||
setup Installs dependencies needed for your system.
|
||||
```
|
||||
|
||||
If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
|
||||
|
||||
```sh
|
||||
./run COMMAND --help
|
||||
```
|
||||
|
||||
This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
|
||||
|
||||
### 2. Setup Command
|
||||
|
||||
```sh
|
||||
./run setup
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Setup initiated
|
||||
Installation has been completed.
|
||||
```
|
||||
|
||||
This command initializes the setup of the project.
|
||||
|
||||
### 3. Agents Commands
|
||||
|
||||
**a. List All Agents**
|
||||
|
||||
```sh
|
||||
./run agent list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available agents: 🤖
|
||||
🐙 forge
|
||||
🐙 autogpt
|
||||
```
|
||||
|
||||
Lists all the available agents.
|
||||
|
||||
**b. Create a New Agent**
|
||||
|
||||
```sh
|
||||
./run agent create my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
|
||||
```
|
||||
|
||||
Creates a new agent named 'my_agent'.
|
||||
|
||||
**c. Start an Agent**
|
||||
|
||||
```sh
|
||||
./run agent start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
... (ASCII Art representing the agent startup)
|
||||
[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
|
||||
[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
|
||||
```
|
||||
|
||||
Starts the 'my_agent' and displays startup ASCII art and logs.
|
||||
|
||||
**d. Stop an Agent**
|
||||
|
||||
```sh
|
||||
./run agent stop
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Agent stopped
|
||||
```
|
||||
|
||||
Stops the running agent.
|
||||
|
||||
### 4. Benchmark Commands
|
||||
|
||||
**a. List Benchmark Categories**
|
||||
|
||||
```sh
|
||||
./run benchmark categories list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available categories: 📚
|
||||
📖 code
|
||||
📖 safety
|
||||
📖 memory
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark categories.
|
||||
|
||||
**b. List Benchmark Tests**
|
||||
|
||||
```sh
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available tests: 📚
|
||||
📖 interface
|
||||
🔬 Search - TestSearch
|
||||
🔬 Write File - TestWriteFile
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark tests.
|
||||
|
||||
**c. Show Details of a Benchmark Test**
|
||||
|
||||
```sh
|
||||
./run benchmark tests details TestWriteFile
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
TestWriteFile
|
||||
-------------
|
||||
|
||||
Category: interface
|
||||
Task: Write the word 'Washington' to a .txt file
|
||||
... (and other details)
|
||||
```
|
||||
|
||||
Displays the details of the 'TestWriteFile' benchmark test.
|
||||
|
||||
**d. Start Benchmark for the Agent**
|
||||
|
||||
```sh
|
||||
./run benchmark start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
(more details about the testing process shown whilst the test are running)
|
||||
============= 13 failed, 1 passed in 0.97s ============...
|
||||
```
|
||||
|
||||
Displays the results of the benchmark tests on 'my_agent'.
|
||||
40
CODE_OF_CONDUCT.md
Normal file
40
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Code of Conduct for AutoGPT
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The purpose of this Code of Conduct is to provide guidelines for contributors to the AutoGPT projects on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
|
||||
|
||||
## 2. Scope
|
||||
|
||||
This Code of Conduct applies to all contributors, maintainers, and users of the AutoGPT project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
|
||||
|
||||
## 3. Our Standards
|
||||
|
||||
We encourage the following behavior:
|
||||
|
||||
* Being respectful and considerate to others
|
||||
* Actively seeking diverse perspectives
|
||||
* Providing constructive feedback and assistance
|
||||
* Demonstrating empathy and understanding
|
||||
|
||||
We discourage the following behavior:
|
||||
|
||||
* Harassment or discrimination of any kind
|
||||
* Disrespectful, offensive, or inappropriate language or content
|
||||
* Personal attacks or insults
|
||||
* Unwarranted criticism or negativity
|
||||
|
||||
## 4. Reporting and Enforcement
|
||||
|
||||
If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary.
|
||||
|
||||
Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations.
|
||||
|
||||
## 5. Acknowledgements
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
|
||||
|
||||
## 6. Contact
|
||||
|
||||
If you have any questions or concerns, please contact the project maintainers on Discord:
|
||||
https://discord.gg/autogpt
|
||||
@@ -1,56 +1,38 @@
|
||||
# AutoGPT Contribution Guide
|
||||
If you are reading this, you are probably looking for the full **[contribution guide]**,
|
||||
which is part of our [wiki].
|
||||
|
||||
To contribute to this GitHub project, you can follow these steps:
|
||||
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
|
||||
<!-- You can find our immediate priorities and their progress on our public [kanban board]. -->
|
||||
|
||||
1. Fork the repository you want to contribute to by clicking the "Fork" button on the project page.
|
||||
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
|
||||
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
|
||||
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
|
||||
2. Clone the repository to your local machine using the following command:
|
||||
## In short
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. We encourage you to collaborate with fellow community members on some of our bigger
|
||||
[todo's][roadmap]!
|
||||
* We highly recommend to post your idea and discuss it in the [dev channel].
|
||||
3. Create a draft PR when starting work on bigger changes.
|
||||
4. Adhere to the [Code Guidelines]
|
||||
5. Clearly explain your changes when submitting a PR.
|
||||
6. Don't submit broken code: test/validate your changes.
|
||||
7. Avoid making unnecessary changes, especially if they're purely based on your personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
8. Please also consider contributing something other than code; see the
|
||||
[contribution guide] for options.
|
||||
|
||||
```
|
||||
git clone https://github.com/<YOUR-GITHUB-USERNAME>/Auto-GPT
|
||||
```
|
||||
3. Create a new branch for your changes using the following command:
|
||||
[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
|
||||
[code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines
|
||||
|
||||
```
|
||||
git checkout -b "branch-name"
|
||||
```
|
||||
4. Make your changes to the code or documentation.
|
||||
- Example: Improve User Interface or Add Documentation.
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the
|
||||
wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing).
|
||||
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and
|
||||
hop on our Discord. See you there! :-)
|
||||
|
||||
5. Add the changes to the staging area using the following command:
|
||||
```
|
||||
git add .
|
||||
```
|
||||
|
||||
6. Commit the changes with a meaningful commit message using the following command:
|
||||
```
|
||||
git commit -m "your commit message"
|
||||
```
|
||||
7. Push the changes to your forked repository using the following command:
|
||||
```
|
||||
git push origin branch-name
|
||||
```
|
||||
8. Go to the GitHub website and navigate to your forked repository.
|
||||
|
||||
9. Click the "New pull request" button.
|
||||
|
||||
10. Select the branch you just pushed to and the branch you want to merge into on the original repository.
|
||||
|
||||
11. Add a description of your changes and click the "Create pull request" button.
|
||||
|
||||
12. Wait for the project maintainer to review your changes and provide feedback.
|
||||
|
||||
13. Make any necessary changes based on feedback and repeat steps 5-12 until your changes are accepted and merged into the main project.
|
||||
|
||||
14. Once your changes are merged, you can update your forked repository and local copy of the repository with the following commands:
|
||||
|
||||
```
|
||||
git fetch upstream
|
||||
git checkout master
|
||||
git merge upstream/master
|
||||
```
|
||||
Finally, delete the branch you created with the following command:
|
||||
```
|
||||
git branch -d branch-name
|
||||
```
|
||||
That's it you made it 🐣⭐⭐
|
||||
❤️ & 🔆
|
||||
The team @ AutoGPT
|
||||
https://discord.gg/autogpt
|
||||
|
||||
23
Dockerfile
23
Dockerfile
@@ -1,23 +0,0 @@
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Create a non-root user and set permissions
|
||||
RUN useradd --create-home appuser
|
||||
WORKDIR /home/appuser
|
||||
RUN chown appuser:appuser /home/appuser
|
||||
USER appuser
|
||||
|
||||
# Copy the requirements.txt file and install the requirements
|
||||
COPY --chown=appuser:appuser requirements.txt .
|
||||
RUN pip install --no-cache-dir --user -r requirements.txt
|
||||
|
||||
# Copy the application files
|
||||
COPY --chown=appuser:appuser scripts/ .
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["python", "main.py"]
|
||||
61
Dockerfile.autogpt
Normal file
61
Dockerfile.autogpt
Normal file
@@ -0,0 +1,61 @@
|
||||
# 'dev' or 'release' container build
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver ca-certificates gcc \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl jq wget git \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_VIRTUALENVS_PATH="/venv" \
|
||||
POETRY_VIRTUALENVS_IN_PROJECT=0 \
|
||||
POETRY_NO_INTERACTION=1
|
||||
|
||||
# Install and configure Poetry
|
||||
RUN curl -sSL https://install.python-poetry.org | python3 -
|
||||
ENV PATH="$POETRY_HOME/bin:$PATH"
|
||||
RUN poetry config installer.max-workers 10
|
||||
|
||||
WORKDIR /app/autogpt
|
||||
COPY autogpt/pyproject.toml autogpt/poetry.lock ./
|
||||
|
||||
# Include forge so it can be used as a path dependency
|
||||
COPY forge/ ../forge
|
||||
|
||||
# Include frontend
|
||||
COPY frontend/ ../frontend
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["poetry", "run", "autogpt"]
|
||||
CMD []
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
RUN poetry install --no-cache --no-root \
|
||||
&& rm -rf $(poetry env info --path)/src
|
||||
ONBUILD COPY autogpt/ ./
|
||||
|
||||
# release build -> include bare minimum
|
||||
FROM autogpt-base as autogpt-release
|
||||
RUN poetry install --no-cache --no-root --without dev \
|
||||
&& rm -rf $(poetry env info --path)/src
|
||||
ONBUILD COPY autogpt/autogpt/ ./autogpt
|
||||
ONBUILD COPY autogpt/scripts/ ./scripts
|
||||
ONBUILD COPY autogpt/plugins/ ./plugins
|
||||
ONBUILD COPY autogpt/README.md ./README.md
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS autogpt
|
||||
RUN poetry install --only-root
|
||||
173
FORGE-QUICKSTART.md
Normal file
173
FORGE-QUICKSTART.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# Quickstart Guide
|
||||
|
||||
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
|
||||
|
||||
Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
|
||||
|
||||
## System Requirements
|
||||
|
||||
This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
|
||||
|
||||
## Getting Setup
|
||||
1. **Fork the Repository**
|
||||
To fork the repository, follow these steps:
|
||||
- Navigate to the main page of the repository.
|
||||
|
||||

|
||||
- In the top-right corner of the page, click Fork.
|
||||
|
||||

|
||||
- On the next page, select your GitHub account to create the fork.
|
||||
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
|
||||
|
||||
2. **Clone the Repository**
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
- Open your terminal.
|
||||
- Navigate to the directory where you want to clone the repository.
|
||||
- Run the git clone command for the fork you just created
|
||||
|
||||

|
||||
|
||||
- Then open your project in your ide
|
||||
|
||||

|
||||
|
||||
4. **Setup the Project**
|
||||
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
|
||||
It can be accessed by running the `run` command by typing `./run` in the terminal.
|
||||
|
||||
The first command you need to use is `./run setup.` This will guide you through setting up your system.
|
||||
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
|
||||
|
||||

|
||||
|
||||
### For Windows Users
|
||||
|
||||
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
|
||||
|
||||
#### Update WSL
|
||||
Run the following command in Powershell or Command Prompt:
|
||||
1. Enable the optional WSL and Virtual Machine Platform components.
|
||||
2. Download and install the latest Linux kernel.
|
||||
3. Set WSL 2 as the default.
|
||||
4. Download and install the Ubuntu Linux distribution (a reboot may be required).
|
||||
|
||||
```shell
|
||||
wsl --install
|
||||
```
|
||||
|
||||
For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
|
||||
|
||||
#### Resolve FileNotFoundError or "No such file or directory" Errors
|
||||
When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
|
||||
|
||||
To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script:
|
||||
|
||||
```shell
|
||||
sudo apt update
|
||||
sudo apt install dos2unix
|
||||
dos2unix ./run
|
||||
```
|
||||
|
||||
After executing the above commands, running `./run setup` should work successfully.
|
||||
|
||||
#### Store Project Files within the WSL File System
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
|
||||
|
||||
You can keep running the command to get feedback on where you are up to with your setup.
|
||||
When setup has been completed, the command will return an output like this:
|
||||
|
||||

|
||||
|
||||
## Creating Your Agent
|
||||
|
||||
After completing the setup, the next step is to create your agent template.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
|
||||
|
||||
Tips for naming your agent:
|
||||
* Give it its own unique name, or name it after yourself
|
||||
* Include an important aspect of your agent in the name, such as its purpose
|
||||
|
||||
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
|
||||
|
||||

|
||||
|
||||
## Running your Agent
|
||||
|
||||
Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
|
||||
|
||||
This starts the agent on the URL: `http://localhost:8000/`
|
||||
|
||||

|
||||
|
||||
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
|
||||
|
||||

|
||||
|
||||
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
|
||||
|
||||

|
||||
|
||||
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
|
||||
|
||||
If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
|
||||
|
||||
## Benchmarking your Agent
|
||||
|
||||
The benchmarking system can also be accessed using the CLI too:
|
||||
|
||||
```bash
|
||||
agpt % ./run benchmark
|
||||
Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Commands to start the benchmark and list tests and categories
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
categories Benchmark categories group command
|
||||
start Starts the benchmark command
|
||||
tests Benchmark tests group command
|
||||
agpt % ./run benchmark categories
|
||||
Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark categories group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
list List benchmark categories command
|
||||
agpt % ./run benchmark tests
|
||||
Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark tests group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
details Benchmark test details command
|
||||
list List benchmark tests command
|
||||
```
|
||||
|
||||
The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
|
||||
```bash
|
||||
./run benchmark categories list
|
||||
# And what tests are available with
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||

|
||||
|
||||
|
||||
Finally, you can run the benchmark with
|
||||
|
||||
```bash
|
||||
./run benchmark start YOUR_AGENT_NAME
|
||||
|
||||
```
|
||||
|
||||
>
|
||||
541
README.md
541
README.md
@@ -1,422 +1,133 @@
|
||||
# Auto-GPT: An Autonomous GPT-4 Experiment
|
||||
# AutoGPT: Build & Use AI Agents
|
||||
|
||||

|
||||
[](https://twitter.com/SigGravitas)
|
||||
[](https://discord.gg/autogpt)
|
||||
[](https://github.com/Torantulino/Auto-GPT/actions/workflows/ci.yml)
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://twitter.com/Auto_GPT)  
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
|
||||
**AutoGPT** is a powerful tool that lets you create and run intelligent agents. These agents can perform various tasks automatically, making your life easier.
|
||||
|
||||
### Demo (30/03/2023):
|
||||
## How to Get Started
|
||||
|
||||
https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
|
||||
https://github.com/user-attachments/assets/8508f4dc-b362-4cab-900f-644964a96cdf
|
||||
|
||||
### 🧱 AutoGPT Builder
|
||||
|
||||
The AutoGPT Builder is the frontend. It allows you to design agents using an easy flowchart style. You build your agent by connecting blocks, where each block performs a single action. It's simple and intuitive!
|
||||
|
||||
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
|
||||
|
||||
### 💽 AutoGPT Server
|
||||
|
||||
The AutoGPT Server is the backend. This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously.
|
||||
|
||||
### 🐙 Example Agents
|
||||
|
||||
Here are two examples of what you can do with AutoGPT:
|
||||
|
||||
1. **Reddit Marketing Agent**
|
||||
- This agent reads comments on Reddit.
|
||||
- It looks for people asking about your product.
|
||||
- It then automatically responds to them.
|
||||
|
||||
2. **YouTube Content Repurposing Agent**
|
||||
- This agent subscribes to your YouTube channel.
|
||||
- When you post a new video, it transcribes it.
|
||||
- It uses AI to write a search engine optimized blog post.
|
||||
- Then, it publishes this blog post to your Medium account.
|
||||
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT!
|
||||
|
||||
---
|
||||
Our mission is to provide the tools, so that you can focus on what matters:
|
||||
|
||||
- 🏗️ **Building** - Lay the foundation for something amazing.
|
||||
- 🧪 **Testing** - Fine-tune your agent to perfection.
|
||||
- 🤝 **Delegating** - Let AI work for you, and have your ideas come to life.
|
||||
|
||||
Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI innovation.
|
||||
|
||||
**📖 [Documentation](https://docs.agpt.co)**
|
||||
 | 
|
||||
**🚀 [Contributing](CONTRIBUTING.md)**
|
||||
|
||||
|
||||
---
|
||||
## 🤖 AutoGPT Classic
|
||||
> Below is information about the classic version of AutoGPT.
|
||||
|
||||
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
|
||||
### 🏗️ Forge
|
||||
|
||||
**Forge your own agent!** – Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
|
||||
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/tutorials/001_getting_started.md) –
|
||||
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
|
||||
|
||||
### 🎯 Benchmark
|
||||
|
||||
**Measure your agent's performance!** The `agbenchmark` can be used with any agent that supports the agent protocol, and the integration with the project's [CLI] makes it even easier to use with AutoGPT and forge-based agents. The benchmark offers a stringent testing environment. Our framework allows for autonomous, objective performance evaluations, ensuring your agents are primed for real-world action.
|
||||
|
||||
<!-- TODO: insert visual demonstrating the benchmark -->
|
||||
|
||||
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
|
||||
 | 
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
|
||||
|
||||
### 💻 UI
|
||||
|
||||
**Makes agents easy to use!** The `frontend` gives you a user-friendly interface to control and monitor your agents. It connects to agents through the [agent protocol](#-agent-protocol), ensuring compatibility with many agents from both inside and outside of our ecosystem.
|
||||
|
||||
<!-- TODO: insert screenshot of front end -->
|
||||
|
||||
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
|
||||
|
||||
### ⌨️ CLI
|
||||
|
||||
[CLI]: #-cli
|
||||
|
||||
To make it as easy as possible to use all of the tools offered by the repository, a CLI is included at the root of the repo:
|
||||
|
||||
```shell
|
||||
$ ./run
|
||||
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
agent Commands to create, start and stop agents
|
||||
benchmark Commands to start the benchmark and list tests and categories
|
||||
setup Installs dependencies needed for your system.
|
||||
```
|
||||
|
||||
Just clone the repo, install dependencies with `./run setup`, and you should be good to go!
|
||||
|
||||
## 🤔 Questions? Problems? Suggestions?
|
||||
|
||||
### Get help - [Discord 💬](https://discord.gg/autogpt)
|
||||
|
||||
[](https://discord.gg/autogpt)
|
||||
|
||||
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn’t created an issue for the same topic.
|
||||
|
||||
## 🤝 Sister projects
|
||||
|
||||
### 🔄 Agent Protocol
|
||||
|
||||
To maintain a uniform standard and ensure seamless compatibility with many current and future applications, AutoGPT employs the [agent protocol](https://agentprotocol.ai/) standard by the AI Engineer Foundation. This standardizes the communication pathways from your agent to the frontend and benchmark.
|
||||
|
||||
---
|
||||
|
||||
<h2 align="center"> 💖 Help Fund Auto-GPT's Development 💖</h2>
|
||||
<p align="center">
|
||||
If you can spare a coffee, you can help to cover the API costs of developing Auto-GPT and help push the boundaries of fully autonomous AI!
|
||||
A full day of development can easily cost as much as $20 in API costs, which for a free project is quite limiting.
|
||||
Your support is greatly appreciated
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date&theme=dark" />
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" />
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" />
|
||||
</picture>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
Development of this free, open-source project is made possible by all the <a href="https://github.com/Torantulino/Auto-GPT/graphs/contributors">contributors</a> and <a href="https://github.com/sponsors/Torantulino">sponsors</a>. If you'd like to sponsor this project and have your avatar or company logo appear below <a href="https://github.com/sponsors/Torantulino">click here</a>.
|
||||
|
||||
<h3 align="center">Individual Sponsors</h3>
|
||||
<p align="center">
|
||||
<a href="https://github.com/robinicus"><img src="https://github.com/robinicus.png" width="50px" alt="robinicus" /></a> <a href="https://github.com/prompthero"><img src="https://github.com/prompthero.png" width="50px" alt="prompthero" /></a> <a href="https://github.com/crizzler"><img src="https://github.com/crizzler.png" width="50px" alt="crizzler" /></a> <a href="https://github.com/tob-le-rone"><img src="https://github.com/tob-le-rone.png" width="50px" alt="tob-le-rone" /></a> <a href="https://github.com/FSTatSBS"><img src="https://github.com/FSTatSBS.png" width="50px" alt="FSTatSBS" /></a> <a href="https://github.com/toverly1"><img src="https://github.com/toverly1.png" width="50px" alt="toverly1" /></a> <a href="https://github.com/ddtarazona"><img src="https://github.com/ddtarazona.png" width="50px" alt="ddtarazona" /></a> <a href="https://github.com/Nalhos"><img src="https://github.com/Nalhos.png" width="50px" alt="Nalhos" /></a> <a href="https://github.com/Kazamario"><img src="https://github.com/Kazamario.png" width="50px" alt="Kazamario" /></a> <a href="https://github.com/pingbotan"><img src="https://github.com/pingbotan.png" width="50px" alt="pingbotan" /></a> <a href="https://github.com/indoor47"><img src="https://github.com/indoor47.png" width="50px" alt="indoor47" /></a> <a href="https://github.com/AuroraHolding"><img src="https://github.com/AuroraHolding.png" width="50px" alt="AuroraHolding" /></a> <a href="https://github.com/kreativai"><img src="https://github.com/kreativai.png" width="50px" alt="kreativai" /></a> <a href="https://github.com/hunteraraujo"><img src="https://github.com/hunteraraujo.png" width="50px" alt="hunteraraujo" /></a> <a href="https://github.com/Explorergt92"><img src="https://github.com/Explorergt92.png" width="50px" alt="Explorergt92" /></a> <a href="https://github.com/judegomila"><img src="https://github.com/judegomila.png" width="50px" alt="judegomila" /></a>
|
||||
<a href="https://github.com/thepok"><img src="https://github.com/thepok.png" width="50px" alt="thepok" /></a>
|
||||
<a href="https://github.com/SpacingLily"><img src="https://github.com/SpacingLily.png" width="50px" alt="SpacingLily" /></a> <a href="https://github.com/merwanehamadi"><img src="https://github.com/merwanehamadi.png" width="50px" alt="merwanehamadi" /></a> <a href="https://github.com/m"><img src="https://github.com/m.png" width="50px" alt="m" /></a> <a href="https://github.com/zkonduit"><img src="https://github.com/zkonduit.png" width="50px" alt="zkonduit" /></a> <a href="https://github.com/maxxflyer"><img src="https://github.com/maxxflyer.png" width="50px" alt="maxxflyer" /></a> <a href="https://github.com/tekelsey"><img src="https://github.com/tekelsey.png" width="50px" alt="tekelsey" /></a> <a href="https://github.com/digisomni"><img src="https://github.com/digisomni.png" width="50px" alt="digisomni" /></a> <a href="https://github.com/nocodeclarity"><img src="https://github.com/nocodeclarity.png" width="50px" alt="nocodeclarity" /></a> <a href="https://github.com/tjarmain"><img src="https://github.com/tjarmain.png" width="50px" alt="tjarmain" /></a>
|
||||
</p>
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Auto-GPT: An Autonomous GPT-4 Experiment](#auto-gpt-an-autonomous-gpt-4-experiment)
|
||||
- [Demo (30/03/2023):](#demo-30032023)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [🚀 Features](#-features)
|
||||
- [📋 Requirements](#-requirements)
|
||||
- [💾 Installation](#-installation)
|
||||
- [🔧 Usage](#-usage)
|
||||
- [Logs](#logs)
|
||||
- [🗣️ Speech Mode](#️-speech-mode)
|
||||
- [🔍 Google API Keys Configuration](#-google-api-keys-configuration)
|
||||
- [Setting up environment variables](#setting-up-environment-variables)
|
||||
- [Redis Setup](#redis-setup)
|
||||
- [🌲 Pinecone API Key Setup](#-pinecone-api-key-setup)
|
||||
- [Setting up environment variables](#setting-up-environment-variables-1)
|
||||
- [Setting Your Cache Type](#setting-your-cache-type)
|
||||
- [View Memory Usage](#view-memory-usage)
|
||||
- [🧠 Memory pre-seeding](#memory-pre-seeding)
|
||||
- [💀 Continuous Mode ⚠️](#-continuous-mode-️)
|
||||
- [GPT3.5 ONLY Mode](#gpt35-only-mode)
|
||||
- [🖼 Image Generation](#-image-generation)
|
||||
- [⚠️ Limitations](#️-limitations)
|
||||
- [🛡 Disclaimer](#-disclaimer)
|
||||
- [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter)
|
||||
- [Run tests](#run-tests)
|
||||
- [Run linter](#run-linter)
|
||||
|
||||
## 🚀 Features
|
||||
|
||||
- 🌐 Internet access for searches and information gathering
|
||||
- 💾 Long-Term and Short-Term memory management
|
||||
- 🧠 GPT-4 instances for text generation
|
||||
- 🔗 Access to popular websites and platforms
|
||||
- 🗃️ File storage and summarization with GPT-3.5
|
||||
|
||||
## 📋 Requirements
|
||||
|
||||
- environments(just choose one)
|
||||
- [vscode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers): It has been configured in the .devcontainer folder and can be used directly
|
||||
- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
|
||||
- [OpenAI API key](https://platform.openai.com/account/api-keys)
|
||||
|
||||
|
||||
Optional:
|
||||
|
||||
- [PINECONE API key](https://www.pinecone.io/) (If you want Pinecone backed memory)
|
||||
- ElevenLabs Key (If you want the AI to speak)
|
||||
|
||||
## 💾 Installation
|
||||
|
||||
To install Auto-GPT, follow these steps:
|
||||
|
||||
1. Make sure you have all the **requirements** above, if not, install/get them.
|
||||
|
||||
_The following commands should be executed in a CMD, Bash or Powershell window. To do this, go to a folder on your computer, click in the folder path at the top and type CMD, then press enter._
|
||||
|
||||
2. Clone the repository:
|
||||
For this step you need Git installed, but you can just download the zip file instead by clicking the button at the top of this page ☝️
|
||||
|
||||
```
|
||||
git clone https://github.com/Torantulino/Auto-GPT.git
|
||||
```
|
||||
|
||||
3. Navigate to the project directory:
|
||||
_(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)_
|
||||
|
||||
```
|
||||
cd 'Auto-GPT'
|
||||
```
|
||||
|
||||
4. Install the required dependencies:
|
||||
_(Again, type this into your CMD window)_
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
5. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
|
||||
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
|
||||
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
|
||||
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then:
|
||||
- Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all of the deployment ids for the relevant models in the `azure_model_map` section:
|
||||
- `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment id
|
||||
- `smart_llm_model_deployment_id` - your gpt-4 deployment id
|
||||
- `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment id
|
||||
- Please specify all of these values as double quoted strings
|
||||
- details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model.
|
||||
|
||||
## 🔧 Usage
|
||||
|
||||
1. Run the `main.py` Python script in your terminal:
|
||||
_(Type this into your CMD window)_
|
||||
|
||||
```
|
||||
python scripts/main.py
|
||||
```
|
||||
|
||||
2. After each of action, enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter additional feedback for the AI.
|
||||
|
||||
|
||||
### Logs
|
||||
|
||||
You will find activity and error logs in the folder `./output/logs`
|
||||
|
||||
To output debug logs:
|
||||
|
||||
```
|
||||
python scripts/main.py --debug
|
||||
```
|
||||
### Command Line Arguments
|
||||
Here are some common arguments you can use when running Auto-GPT:
|
||||
> Replace anything in angled brackets (<>) to a value you want to specify
|
||||
* `python scripts/main.py --help` to see a list of all available command line arguments.
|
||||
* `python scripts/main.py --ai-settings <filename>` to run Auto-GPT with a different AI Settings file.
|
||||
* `python scripts/main.py --use-memory <memory-backend>` to specify one of 3 memory backends: `local`, `redis`, `pinecone` or 'no_memory'.
|
||||
|
||||
> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python scripts/main.py --help` for more information
|
||||
|
||||
## 🗣️ Speech Mode
|
||||
|
||||
Use this to use TTS for Auto-GPT
|
||||
|
||||
```
|
||||
python scripts/main.py --speak
|
||||
```
|
||||
|
||||
## 🔍 Google API Keys Configuration
|
||||
|
||||
This section is optional, use the official google api if you are having issues with error 429 when running a google search.
|
||||
To use the `google_official_search` command, you need to set up your Google API keys in your environment variables.
|
||||
|
||||
1. Go to the [Google Cloud Console](https://console.cloud.google.com/).
|
||||
2. If you don't already have an account, create one and log in.
|
||||
3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". Give it a name and click "Create".
|
||||
4. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard) and click "Enable APIs and Services". Search for "Custom Search API" and click on it, then click "Enable".
|
||||
5. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page and click "Create Credentials". Choose "API Key".
|
||||
6. Copy the API key and set it as an environment variable named `GOOGLE_API_KEY` on your machine. See setting up environment variables below.
|
||||
7. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate)
|
||||
8. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page and click "Add".
|
||||
9. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites.
|
||||
10. Once you've created your search engine, click on "Control Panel" and then "Basics". Copy the "Search engine ID" and set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine. See setting up environment variables below.
|
||||
|
||||
_Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._
|
||||
|
||||
### Setting up environment variables
|
||||
|
||||
For Windows Users:
|
||||
|
||||
```
|
||||
setx GOOGLE_API_KEY "YOUR_GOOGLE_API_KEY"
|
||||
setx CUSTOM_SEARCH_ENGINE_ID "YOUR_CUSTOM_SEARCH_ENGINE_ID"
|
||||
|
||||
```
|
||||
|
||||
For macOS and Linux users:
|
||||
|
||||
```
|
||||
export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"
|
||||
export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID"
|
||||
|
||||
```
|
||||
|
||||
## Redis Setup
|
||||
|
||||
Install docker desktop.
|
||||
|
||||
Run:
|
||||
|
||||
```
|
||||
docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
|
||||
```
|
||||
|
||||
See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration.
|
||||
|
||||
Set the following environment variables:
|
||||
|
||||
```
|
||||
MEMORY_BACKEND=redis
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=
|
||||
```
|
||||
|
||||
Note that this is not intended to be run facing the internet and is not secure, do not expose redis to the internet without a password or at all really.
|
||||
|
||||
You can optionally set
|
||||
|
||||
```
|
||||
WIPE_REDIS_ON_START=False
|
||||
```
|
||||
|
||||
To persist memory stored in Redis.
|
||||
|
||||
You can specify the memory index for redis using the following:
|
||||
|
||||
```
|
||||
MEMORY_INDEX=whatever
|
||||
```
|
||||
|
||||
## 🌲 Pinecone API Key Setup
|
||||
|
||||
Pinecone enables the storage of vast amounts of vector-based memory, allowing for only relevant memories to be loaded for the agent at any given time.
|
||||
|
||||
1. Go to [pinecone](https://app.pinecone.io/) and make an account if you don't already have one.
|
||||
2. Choose the `Starter` plan to avoid being charged.
|
||||
3. Find your API key and region under the default project in the left sidebar.
|
||||
|
||||
### Setting up environment variables
|
||||
|
||||
In the `.env` file set:
|
||||
- `PINECONE_API_KEY`
|
||||
- `PINECONE_ENV` (something like: us-east4-gcp)
|
||||
- `MEMORY_BACKEND=pinecone`
|
||||
|
||||
Alternatively, you can set them from the command line (advanced):
|
||||
|
||||
For Windows Users:
|
||||
|
||||
```
|
||||
setx PINECONE_API_KEY "YOUR_PINECONE_API_KEY"
|
||||
setx PINECONE_ENV "Your pinecone region" # something like: us-east4-gcp
|
||||
setx MEMORY_BACKEND "pinecone"
|
||||
```
|
||||
|
||||
For macOS and Linux users:
|
||||
|
||||
```
|
||||
export PINECONE_API_KEY="YOUR_PINECONE_API_KEY"
|
||||
export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp
|
||||
export MEMORY_BACKEND="pinecone"
|
||||
```
|
||||
|
||||
## Setting Your Cache Type
|
||||
|
||||
By default Auto-GPT is going to use LocalCache instead of redis or Pinecone.
|
||||
|
||||
To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want:
|
||||
|
||||
`local` (default) uses a local JSON cache file
|
||||
`pinecone` uses the Pinecone.io account you configured in your ENV settings
|
||||
`redis` will use the redis cache that you configured
|
||||
|
||||
## View Memory Usage
|
||||
|
||||
1. View memory usage by using the `--debug` flag :)
|
||||
|
||||
|
||||
## 🧠 Memory pre-seeding
|
||||
|
||||
```
|
||||
# python scripts/data_ingestion.py -h
|
||||
usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH]
|
||||
|
||||
Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script.
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--file FILE The file to ingest.
|
||||
--dir DIR The directory containing the files to ingest.
|
||||
--init Init the memory and wipe its content (default: False)
|
||||
--overlap OVERLAP The overlap size between chunks when ingesting files (default: 200)
|
||||
--max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000
|
||||
|
||||
# python scripts/data_ingestion.py --dir seed_data --init --overlap 200 --max_length 1000
|
||||
```
|
||||
|
||||
This script located at scripts/data_ingestion.py, allows you to ingest files into memory and pre-seed it before running Auto-GPT.
|
||||
|
||||
Memory pre-seeding is a technique that involves ingesting relevant documents or data into the AI's memory so that it can use this information to generate more informed and accurate responses.
|
||||
|
||||
To pre-seed the memory, the content of each document is split into chunks of a specified maximum length with a specified overlap between chunks, and then each chunk is added to the memory backend set in the .env file. When the AI is prompted to recall information, it can then access those pre-seeded memories to generate more informed and accurate responses.
|
||||
|
||||
This technique is particularly useful when working with large amounts of data or when there is specific information that the AI needs to be able to access quickly.
|
||||
By pre-seeding the memory, the AI can retrieve and use this information more efficiently, saving time, API call and improving the accuracy of its responses.
|
||||
|
||||
You could for example download the documentation of an API, a Github repository, etc. and ingest it into memory before running Auto-GPT.
|
||||
|
||||
⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the WIPE_REDIS_ON_START set to False in your .env file.
|
||||
|
||||
⚠️For other memory backend, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backend, you can call the data_ingestion.py script anytime during an Auto-GPT run.
|
||||
|
||||
Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running.
|
||||
|
||||
In the example above, the script initializes the memory, ingests all files within the seed_data directory into memory with an overlap between chunks of 200 and a maximum length of each chunk of 4000.
|
||||
Note that you can also use the --file argument to ingest a single file into memory and that the script will only ingest files within the auto_gpt_workspace directory.
|
||||
|
||||
You can adjust the max_length and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory:
|
||||
|
||||
- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests.
|
||||
- Reducing the max_length value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks.
|
||||
- Increasing the max_length value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI.
|
||||
|
||||
## 💀 Continuous Mode ⚠️
|
||||
|
||||
Run the AI **without** user authorisation, 100% automated.
|
||||
Continuous mode is not recommended.
|
||||
It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise.
|
||||
Use at your own risk.
|
||||
|
||||
1. Run the `main.py` Python script in your terminal:
|
||||
|
||||
```
|
||||
python scripts/main.py --continuous
|
||||
|
||||
```
|
||||
|
||||
2. To exit the program, press Ctrl + C
|
||||
|
||||
## GPT3.5 ONLY Mode
|
||||
|
||||
If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT!
|
||||
|
||||
```
|
||||
python scripts/main.py --gpt3only
|
||||
```
|
||||
|
||||
It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data.
|
||||
|
||||
## 🖼 Image Generation
|
||||
|
||||
By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [HuggingFace API Token](https://huggingface.co/settings/tokens) is required.
|
||||
|
||||
Once you have a token, set these variables in your `.env`:
|
||||
|
||||
```
|
||||
IMAGE_PROVIDER=sd
|
||||
HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN"
|
||||
```
|
||||
|
||||
## ⚠️ Limitations
|
||||
|
||||
This experiment aims to showcase the potential of GPT-4 but comes with some limitations:
|
||||
|
||||
1. Not a polished application or product, just an experiment
|
||||
2. May not perform well in complex, real-world business scenarios. In fact, if it actually does, please share your results!
|
||||
3. Quite expensive to run, so set and monitor your API key limits with OpenAI!
|
||||
|
||||
## 🛡 Disclaimer
|
||||
|
||||
Disclaimer
|
||||
This project, Auto-GPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise.
|
||||
|
||||
The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by Auto-GPT.
|
||||
|
||||
**Please note that the use of the GPT-4 language model can be expensive due to its token usage.** By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges.
|
||||
|
||||
As an autonomous experiment, Auto-GPT may generate content or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made based on the output of this software comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software.
|
||||
|
||||
By using Auto-GPT, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms.
|
||||
|
||||
## 🐦 Connect with Us on Twitter
|
||||
|
||||
Stay up-to-date with the latest news, updates, and insights about Auto-GPT by following our Twitter accounts. Engage with the developer and the AI's own account for interesting discussions, project updates, and more.
|
||||
|
||||
- **Developer**: Follow [@siggravitas](https://twitter.com/siggravitas) for insights into the development process, project updates, and related topics from the creator of Entrepreneur-GPT.
|
||||
- **Entrepreneur-GPT**: Join the conversation with the AI itself by following [@En_GPT](https://twitter.com/En_GPT). Share your experiences, discuss the AI's outputs, and engage with the growing community of users.
|
||||
|
||||
We look forward to connecting with you and hearing your thoughts, ideas, and experiences with Auto-GPT. Join us on Twitter and let's explore the future of AI together!
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Torantulino/auto-gpt&Date">
|
||||
<img src="https://api.star-history.com/svg?repos=Torantulino/auto-gpt&type=Date" alt="Star History Chart">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## Run tests
|
||||
|
||||
To run tests, run the following command:
|
||||
|
||||
```
|
||||
python -m unittest discover tests
|
||||
```
|
||||
|
||||
To run tests and see coverage, run the following command:
|
||||
|
||||
```
|
||||
coverage run -m unittest discover tests
|
||||
```
|
||||
|
||||
## Run linter
|
||||
|
||||
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information.
|
||||
|
||||
To run the linter, run the following command:
|
||||
|
||||
```
|
||||
flake8 scripts/ tests/
|
||||
|
||||
# Or, if you want to run flake8 with the same configuration as the CI:
|
||||
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
|
||||
```
|
||||
|
||||
66
SECURITY.md
Normal file
66
SECURITY.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Security Policy
|
||||
|
||||
- [**Using AutoGPT Securely**](#using-AutoGPT-securely)
|
||||
- [Restrict Workspace](#restrict-workspace)
|
||||
- [Untrusted inputs](#untrusted-inputs)
|
||||
- [Data privacy](#data-privacy)
|
||||
- [Untrusted environments or networks](#untrusted-environments-or-networks)
|
||||
- [Multi-Tenant environments](#multi-tenant-environments)
|
||||
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
|
||||
|
||||
## Using AutoGPT Securely
|
||||
|
||||
### Restrict Workspace
|
||||
|
||||
Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
|
||||
|
||||
Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
|
||||
|
||||
### Untrusted inputs
|
||||
|
||||
When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
|
||||
|
||||
For maximum security when handling untrusted inputs, you may need to employ the following:
|
||||
|
||||
* Sandboxing: Isolate the process.
|
||||
* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
|
||||
* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
|
||||
* Validation: Enforce strict rules on allowed characters and data types.
|
||||
* Filtering: Remove potentially malicious scripts or code fragments.
|
||||
* Encoding: Convert special characters into safe representations.
|
||||
* Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
|
||||
|
||||
### Data privacy
|
||||
|
||||
To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
|
||||
|
||||
### Untrusted environments or networks
|
||||
|
||||
Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
|
||||
Additionally, running it on an untrusted network can expose your data to potential network attacks.
|
||||
|
||||
However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
|
||||
|
||||
### Multi-Tenant environments
|
||||
|
||||
If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
|
||||
|
||||
The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
|
||||
|
||||
- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenant’s identity.
|
||||
|
||||
- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
|
||||
|
||||
- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
|
||||
|
||||
- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
|
||||
|
||||
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||
|
||||
Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
|
||||
|
||||
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.
|
||||
23
TROUBLESHOOTING.md
Normal file
23
TROUBLESHOOTING.md
Normal file
@@ -0,0 +1,23 @@
|
||||
This page is a list of issues you could encounter along with their fixes.
|
||||
|
||||
# Forge
|
||||
**Poetry configuration invalid**
|
||||
|
||||
The poetry configuration is invalid:
|
||||
- Additional properties are not allowed ('group' was unexpected)
|
||||
<img width="487" alt="Screenshot 2023-09-22 at 5 42 59 PM" src="https://github.com/Significant-Gravitas/AutoGPT/assets/9652976/dd451e6b-8114-44de-9928-075f5f06d661">
|
||||
|
||||
**Pydantic Validation Error**
|
||||
|
||||
Remove your sqlite agent.db file. it's probably because some of your data is not complying with the new spec (we will create migrations soon to avoid this problem)
|
||||
|
||||
|
||||
*Solution*
|
||||
|
||||
Update poetry
|
||||
|
||||
# Benchmark
|
||||
TODO
|
||||
|
||||
# Frontend
|
||||
TODO
|
||||
BIN
assets/gpt_dark_RGB.icns
Normal file
BIN
assets/gpt_dark_RGB.icns
Normal file
Binary file not shown.
BIN
assets/gpt_dark_RGB.ico
Normal file
BIN
assets/gpt_dark_RGB.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 MiB |
BIN
assets/gpt_dark_RGB.png
Normal file
BIN
assets/gpt_dark_RGB.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 49 KiB |
2
autogpt/.coveragerc
Normal file
2
autogpt/.coveragerc
Normal file
@@ -0,0 +1,2 @@
|
||||
[run]
|
||||
relative_files = true
|
||||
13
autogpt/.devcontainer/Dockerfile
Normal file
13
autogpt/.devcontainer/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr \
|
||||
ca-certificates
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get install -y curl jq wget git
|
||||
|
||||
# Declare working directory
|
||||
WORKDIR /workspace/AutoGPT
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"build": {
|
||||
"dockerfile": "./Dockerfile",
|
||||
"context": "."
|
||||
},
|
||||
"dockerComposeFile": "./docker-compose.yml",
|
||||
"service": "auto-gpt",
|
||||
"workspaceFolder": "/workspace/AutoGPT",
|
||||
"shutdownAction": "stopCompose",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||
"installZsh": "true",
|
||||
@@ -11,6 +11,8 @@
|
||||
"userGid": "1000",
|
||||
"upgradePackages": "true"
|
||||
},
|
||||
"ghcr.io/devcontainers/features/desktop-lite:1": {},
|
||||
"ghcr.io/devcontainers/features/github-cli:1": {},
|
||||
"ghcr.io/devcontainers/features/python:1": "none",
|
||||
"ghcr.io/devcontainers/features/node:1": "none",
|
||||
"ghcr.io/devcontainers/features/git:1": {
|
||||
@@ -24,16 +26,31 @@
|
||||
"vscode": {
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python"
|
||||
}
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python",
|
||||
"python.testing.pytestEnabled": true,
|
||||
"python.testing.unittestEnabled": false
|
||||
},
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"VisualStudioExptTeam.vscodeintellicode",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.isort",
|
||||
"GitHub.vscode-pull-request-github",
|
||||
"GitHub.copilot",
|
||||
"github.vscode-github-actions"
|
||||
]
|
||||
}
|
||||
},
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
// "postCreateCommand": "poetry install",
|
||||
|
||||
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "vscode"
|
||||
"remoteUser": "vscode",
|
||||
|
||||
// Add the freshly containerized repo to the list of safe repositories
|
||||
"postCreateCommand": "git config --global --add safe.directory /workspace/AutoGPT && poetry install"
|
||||
}
|
||||
12
autogpt/.devcontainer/docker-compose.yml
Normal file
12
autogpt/.devcontainer/docker-compose.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
# To boot the app run the following:
|
||||
# docker-compose run auto-gpt
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
tty: true
|
||||
volumes:
|
||||
- ../:/workspace/AutoGPT
|
||||
179
autogpt/.env.template
Normal file
179
autogpt/.env.template
Normal file
@@ -0,0 +1,179 @@
|
||||
################################################################################
|
||||
### AutoGPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# OPENAI_API_KEY=
|
||||
|
||||
## ANTHROPIC_API_KEY - Anthropic API Key (Example: sk-ant-api03-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# ANTHROPIC_API_KEY=
|
||||
|
||||
## GROQ_API_KEY - Groq API Key (Example: gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# GROQ_API_KEY=
|
||||
|
||||
## LLAMAFILE_API_BASE - Llamafile API base URL
|
||||
# LLAMAFILE_API_BASE=http://localhost:8080/v1
|
||||
|
||||
## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry.
|
||||
## This helps us to spot and solve problems earlier & faster. (Default: DISABLED)
|
||||
# TELEMETRY_OPT_IN=true
|
||||
|
||||
## COMPONENT_CONFIG_FILE - Path to the json config file (Default: None)
|
||||
# COMPONENT_CONFIG_FILE=
|
||||
|
||||
### Workspace ###
|
||||
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./data/agents/<agent_id>/workspace (Default: True)
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## DISABLED_COMMANDS - The comma separated list of commands that are disabled (Default: None)
|
||||
# DISABLED_COMMANDS=
|
||||
|
||||
## FILE_STORAGE_BACKEND - Choose a storage backend for contents
|
||||
## Options: local, gcs, s3
|
||||
# FILE_STORAGE_BACKEND=local
|
||||
|
||||
## STORAGE_BUCKET - GCS/S3 Bucket to store contents in
|
||||
# STORAGE_BUCKET=autogpt
|
||||
|
||||
## GCS Credentials
|
||||
# see https://cloud.google.com/storage/docs/authentication#libauth
|
||||
|
||||
## AWS/S3 Credentials
|
||||
# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
|
||||
|
||||
## S3_ENDPOINT_URL - If you're using non-AWS S3, set your endpoint here.
|
||||
# S3_ENDPOINT_URL=
|
||||
|
||||
### Miscellaneous ###
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
## EXIT_KEY - Key to exit AutoGPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# TEMPERATURE=0
|
||||
|
||||
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
|
||||
# the following is an example:
|
||||
# OPENAI_API_BASE_URL=http://localhost:443/v1
|
||||
|
||||
# OPENAI_API_TYPE=
|
||||
# OPENAI_API_VERSION=
|
||||
|
||||
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
## Note: this feature is only supported by OpenAI's newer models.
|
||||
# OPENAI_FUNCTIONS=False
|
||||
|
||||
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
# USE_AZURE=False
|
||||
|
||||
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the folder containing this file. (Default: azure.yaml)
|
||||
# AZURE_CONFIG_FILE=azure.yaml
|
||||
|
||||
# AZURE_OPENAI_AD_TOKEN=
|
||||
# AZURE_OPENAI_ENDPOINT=
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM - Smart language model (Default: gpt-4-turbo)
|
||||
# SMART_LLM=gpt-4-turbo
|
||||
|
||||
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM=gpt-3.5-turbo
|
||||
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-3-small
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
# SD_WEBUI_AUTH=
|
||||
|
||||
################################################################################
|
||||
### GITHUB
|
||||
################################################################################
|
||||
|
||||
## GITHUB_API_KEY - Github API key / PAT (Default: None)
|
||||
# GITHUB_API_KEY=
|
||||
|
||||
## GITHUB_USERNAME - Github username (Default: None)
|
||||
# GITHUB_USERNAME=
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
|
||||
# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
|
||||
|
||||
################################################################################
|
||||
### TEXT TO SPEECH PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
|
||||
## Options: gtts, streamelements, elevenlabs, macos
|
||||
# TEXT_TO_SPEECH_PROVIDER=gtts
|
||||
|
||||
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
|
||||
# STREAMELEMENTS_VOICE=Brian
|
||||
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
|
||||
# ELEVENLABS_API_KEY=
|
||||
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
|
||||
################################################################################
|
||||
### LOGGING
|
||||
################################################################################
|
||||
|
||||
## LOG_LEVEL - Set the minimum level to filter log output by. Setting this to DEBUG implies LOG_FORMAT=debug, unless LOG_FORMAT is set explicitly.
|
||||
## Options: DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
# LOG_LEVEL=INFO
|
||||
|
||||
## LOG_FORMAT - The format in which to log messages to the console (and log files).
|
||||
## Options: simple, debug, structured_google_cloud
|
||||
# LOG_FORMAT=simple
|
||||
|
||||
## LOG_FILE_FORMAT - Normally follows the LOG_FORMAT setting, but can be set separately.
|
||||
## Note: Log file output is disabled if LOG_FORMAT=structured_google_cloud.
|
||||
# LOG_FILE_FORMAT=simple
|
||||
|
||||
## PLAIN_OUTPUT - Disables animated typing and the spinner in the console output. (Default: False)
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
|
||||
################################################################################
|
||||
### Agent Protocol Server Settings
|
||||
################################################################################
|
||||
## AP_SERVER_PORT - Specifies what port the agent protocol server will listen on. (Default: 8000)
|
||||
## AP_SERVER_DB_URL - Specifies what connection url the agent protocol database will connect to (Default: Internal SQLite)
|
||||
## AP_SERVER_CORS_ALLOWED_ORIGINS - Comma separated list of allowed origins for CORS. (Default: http://localhost:{AP_SERVER_PORT})
|
||||
# AP_SERVER_PORT=8000
|
||||
# AP_SERVER_DB_URL=sqlite:///data/ap_server.db
|
||||
# AP_SERVER_CORS_ALLOWED_ORIGINS=
|
||||
4
autogpt/.envrc
Normal file
4
autogpt/.envrc
Normal file
@@ -0,0 +1,4 @@
|
||||
# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
|
||||
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use AutoGPT.
|
||||
|
||||
[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
|
||||
14
autogpt/.flake8
Normal file
14
autogpt/.flake8
Normal file
@@ -0,0 +1,14 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
# Ignore rules that conflict with Black code style
|
||||
extend-ignore = E203, W503
|
||||
exclude =
|
||||
.git,
|
||||
__pycache__/,
|
||||
*.pyc,
|
||||
.pytest_cache/,
|
||||
venv*/,
|
||||
.venv/,
|
||||
data/,
|
||||
logs/,
|
||||
tests/unit/data/,
|
||||
167
autogpt/.gitignore
vendored
Normal file
167
autogpt/.gitignore
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
## Original ignores
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
log.txt
|
||||
log-ingestion.txt
|
||||
/logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
data/*
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
/plugins/*
|
||||
plugins_config.yaml
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
site/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv*/
|
||||
ENV/
|
||||
env.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
llama-*
|
||||
vicuna-*
|
||||
|
||||
# mac
|
||||
.DS_Store
|
||||
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
package.json
|
||||
|
||||
# Keep
|
||||
!.keep
|
||||
71
autogpt/.sourcery.yaml
Normal file
71
autogpt/.sourcery.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
# 🪄 This is your project's Sourcery configuration file.
|
||||
|
||||
# You can use it to get Sourcery working in the way you want, such as
|
||||
# ignoring specific refactorings, skipping directories in your project,
|
||||
# or writing custom rules.
|
||||
|
||||
# 📚 For a complete reference to this file, see the documentation at
|
||||
# https://docs.sourcery.ai/Configuration/Project-Settings/
|
||||
|
||||
# This file was auto-generated by Sourcery on 2023-02-25 at 21:07.
|
||||
|
||||
version: '1' # The schema version of this config file
|
||||
|
||||
ignore: # A list of paths or files which Sourcery will ignore.
|
||||
- .git
|
||||
- venv
|
||||
- .venv
|
||||
- build
|
||||
- dist
|
||||
- env
|
||||
- .env
|
||||
- .tox
|
||||
|
||||
rule_settings:
|
||||
enable:
|
||||
- default
|
||||
- gpsg
|
||||
disable: [] # A list of rule IDs Sourcery will never suggest.
|
||||
rule_types:
|
||||
- refactoring
|
||||
- suggestion
|
||||
- comment
|
||||
python_version: '3.10' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
|
||||
|
||||
# rules: # A list of custom rules Sourcery will include in its analysis.
|
||||
# - id: no-print-statements
|
||||
# description: Do not use print statements in the test directory.
|
||||
# pattern: print(...)
|
||||
# language: python
|
||||
# replacement:
|
||||
# condition:
|
||||
# explanation:
|
||||
# paths:
|
||||
# include:
|
||||
# - test
|
||||
# exclude:
|
||||
# - conftest.py
|
||||
# tests: []
|
||||
# tags: []
|
||||
|
||||
# rule_tags: {} # Additional rule tags.
|
||||
|
||||
# metrics:
|
||||
# quality_threshold: 25.0
|
||||
|
||||
# github:
|
||||
# labels: []
|
||||
# ignore_labels:
|
||||
# - sourcery-ignore
|
||||
# request_review: author
|
||||
# sourcery_branch: sourcery/{base_branch}
|
||||
|
||||
# clone_detection:
|
||||
# min_lines: 3
|
||||
# min_duplicates: 2
|
||||
# identical_clones_only: false
|
||||
|
||||
# proxy:
|
||||
# url:
|
||||
# ssl_certs_file:
|
||||
# no_ssl_verify: false
|
||||
3
autogpt/.vscode/settings.json
vendored
Normal file
3
autogpt/.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
}
|
||||
13
autogpt/BULLETIN.md
Normal file
13
autogpt/BULLETIN.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co/autogpt.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing.
|
||||
|
||||
# v0.5.0 RELEASE HIGHLIGHTS! 🚀🚀
|
||||
# -------------------------------
|
||||
Cloud-readiness, a new UI, support for the newest Agent Protocol version, and much more:
|
||||
*v0.5.0 is our biggest release yet!*
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog:
|
||||
https://github.com/Significant-Gravitas/AutoGPT/releases.
|
||||
160
autogpt/README.md
Normal file
160
autogpt/README.md
Normal file
@@ -0,0 +1,160 @@
|
||||
# AutoGPT: An Autonomous GPT-4 Experiment
|
||||
|
||||
[📖 **Documentation**][docs]
|
||||
 | 
|
||||
[🚀 **Contributing**](../../CONTRIBUTING.md)
|
||||
|
||||
AutoGPT is an experimental open-source application showcasing the capabilities of modern Large Language Models. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, AutoGPT pushes the boundaries of what is possible with AI.
|
||||
|
||||
<h2 align="center"> Demo April 16th 2023 </h2>
|
||||
|
||||
https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
|
||||
|
||||
Demo made by <a href=https://twitter.com/BlakeWerlinger>Blake Werlinger</a>
|
||||
|
||||
## 🚀 Features
|
||||
|
||||
- 🔌 Agent Protocol ([docs](https://agentprotocol.ai))
|
||||
- 💻 Easy to use UI
|
||||
- 🌐 Internet access for searches and information gathering
|
||||
- 🧠 Powered by a mix of GPT-4 and GPT-3.5 Turbo
|
||||
- 🔗 Access to popular websites and platforms
|
||||
- 🗃️ File generation and editing capabilities
|
||||
- 🔌 Extensibility with Plugins
|
||||
<!-- - 💾 Long-term and short-term memory management -->
|
||||
|
||||
## Setting up AutoGPT
|
||||
1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys)
|
||||
2. Copy `.env.template` to `.env` and set `OPENAI_API_KEY`
|
||||
3. Make sure you have Poetry [installed](https://python-poetry.org/docs/#installation)
|
||||
|
||||
For more ways to run AutoGPT, more detailed instructions, and more configuration options,
|
||||
see the [setup guide][docs/setup].
|
||||
|
||||
## Running AutoGPT
|
||||
The CLI should be self-documenting:
|
||||
```shell
|
||||
$ ./autogpt.sh --help
|
||||
Usage: python -m autogpt [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
run Sets up and runs an agent, based on the task specified by the...
|
||||
serve Starts an Agent Protocol compliant AutoGPT server, which creates...
|
||||
```
|
||||
When run without a sub-command, it will default to `run` for legacy reasons.
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
<code>$ ./autogpt.sh run --help</code>
|
||||
</summary>
|
||||
|
||||
The `run` sub-command starts AutoGPT with the legacy CLI interface:
|
||||
|
||||
```shell
|
||||
$ ./autogpt.sh run --help
|
||||
Usage: python -m autogpt run [OPTIONS]
|
||||
|
||||
Sets up and runs an agent, based on the task specified by the user, or
|
||||
resumes an existing agent.
|
||||
|
||||
Options:
|
||||
-c, --continuous Enable Continuous Mode
|
||||
-y, --skip-reprompt Skips the re-prompting messages at the
|
||||
beginning of the script
|
||||
-l, --continuous-limit INTEGER Defines the number of times to run in
|
||||
continuous mode
|
||||
--speak Enable Speak Mode
|
||||
--debug Enable Debug Mode
|
||||
--skip-news Specifies whether to suppress the output of
|
||||
latest news on startup.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--ai-name TEXT AI name override
|
||||
--ai-role TEXT AI role override
|
||||
--constraint TEXT Add or override AI constraints to include in
|
||||
the prompt; may be used multiple times to
|
||||
pass multiple constraints
|
||||
--resource TEXT Add or override AI resources to include in
|
||||
the prompt; may be used multiple times to
|
||||
pass multiple resources
|
||||
--best-practice TEXT Add or override AI best practices to include
|
||||
in the prompt; may be used multiple times to
|
||||
pass multiple best practices
|
||||
--override-directives If specified, --constraint, --resource and
|
||||
--best-practice will override the AI's
|
||||
directives instead of being appended to them
|
||||
--component-config-file TEXT Path to the json configuration file.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
<code>$ ./autogpt.sh serve --help</code>
|
||||
</summary>
|
||||
|
||||
The `serve` sub-command starts AutoGPT wrapped in an Agent Protocol server:
|
||||
|
||||
```shell
|
||||
$ ./autogpt.sh serve --help
|
||||
Usage: python -m autogpt serve [OPTIONS]
|
||||
|
||||
Starts an Agent Protocol compliant AutoGPT server, which creates a custom
|
||||
agent for every task.
|
||||
|
||||
Options:
|
||||
--debug Enable Debug Mode
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
|
||||
With `serve`, the application exposes an Agent Protocol compliant API and serves a frontend,
|
||||
by default on `http://localhost:8000`.
|
||||
|
||||
For more comprehensive instructions, see the [user guide][docs/usage].
|
||||
|
||||
[docs]: https://docs.agpt.co/autogpt
|
||||
[docs/setup]: https://docs.agpt.co/autogpt/setup
|
||||
[docs/usage]: https://docs.agpt.co/autogpt/usage
|
||||
[docs/plugins]: https://docs.agpt.co/autogpt/plugins
|
||||
|
||||
## 📚 Resources
|
||||
* 📔 AutoGPT [project wiki](https://github.com/Significant-Gravitas/AutoGPT/wiki)
|
||||
* 🧮 AutoGPT [project kanban](https://github.com/orgs/Significant-Gravitas/projects/1)
|
||||
* 🌃 AutoGPT [roadmap](https://github.com/orgs/Significant-Gravitas/projects/2)
|
||||
|
||||
## ⚠️ Limitations
|
||||
|
||||
This experiment aims to showcase the potential of GPT-4 but comes with some limitations:
|
||||
|
||||
1. Not a polished application or product, just an experiment
|
||||
2. May not perform well in complex, real-world business scenarios. In fact, if it actually does, please share your results!
|
||||
3. Quite expensive to run, so set and monitor your API key limits with OpenAI!
|
||||
|
||||
## 🛡 Disclaimer
|
||||
|
||||
This project, AutoGPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise.
|
||||
|
||||
The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by AutoGPT.
|
||||
|
||||
**Please note that the use of the GPT-4 language model can be expensive due to its token usage.** By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges.
|
||||
|
||||
As an autonomous experiment, AutoGPT may generate content or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made based on the output of this software comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software.
|
||||
|
||||
By using AutoGPT, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms.
|
||||
|
||||
---
|
||||
|
||||
In Q2 of 2023, AutoGPT became the fastest growing open-source project in history. Now that the dust has settled, we're committed to continued sustainable development and growth of the project.
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT&Date">
|
||||
<img src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" alt="Star History Chart">
|
||||
</a>
|
||||
</p>
|
||||
3
autogpt/agbenchmark_config/.gitignore
vendored
Normal file
3
autogpt/agbenchmark_config/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
logs/
|
||||
reports/
|
||||
temp_folder/
|
||||
143
autogpt/agbenchmark_config/analyze_reports.py
Normal file
143
autogpt/agbenchmark_config/analyze_reports.py
Normal file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
info = "-v" in sys.argv
|
||||
debug = "-vv" in sys.argv
|
||||
granular = "--granular" in sys.argv
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if debug else logging.INFO if info else logging.WARNING
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get a list of all JSON files in the directory
|
||||
report_files = [
|
||||
report_file
|
||||
for dir in (Path(__file__).parent / "reports").iterdir()
|
||||
if re.match(r"^\d{8}T\d{6}_", dir.name)
|
||||
and (report_file := dir / "report.json").is_file()
|
||||
]
|
||||
|
||||
labels = list[str]()
|
||||
runs_per_label = defaultdict[str, int](lambda: 0)
|
||||
suite_names = list[str]()
|
||||
test_names = list[str]()
|
||||
|
||||
# Create a dictionary to store grouped success values by suffix and test
|
||||
grouped_success_values = defaultdict[str, list[str]](list[str])
|
||||
|
||||
# Loop through each JSON file to collect suffixes and success values
|
||||
for report_file in sorted(report_files):
|
||||
with open(report_file) as f:
|
||||
logger.info(f"Loading {report_file}...")
|
||||
|
||||
data = json.load(f)
|
||||
if "tests" in data:
|
||||
test_tree = data["tests"]
|
||||
label = data["agent_git_commit_sha"].rsplit("/", 1)[1][:7] # commit hash
|
||||
else:
|
||||
# Benchmark run still in progress
|
||||
test_tree = data
|
||||
label = report_file.parent.name.split("_", 1)[1]
|
||||
logger.info(f"Run '{label}' seems to be in progress")
|
||||
|
||||
runs_per_label[label] += 1
|
||||
|
||||
def process_test(test_name: str, test_data: dict):
|
||||
result_group = grouped_success_values[f"{label}|{test_name}"]
|
||||
|
||||
if "tests" in test_data:
|
||||
logger.debug(f"{test_name} is a test suite")
|
||||
|
||||
# Test suite
|
||||
suite_attempted = any(
|
||||
test["metrics"]["attempted"] for test in test_data["tests"].values()
|
||||
)
|
||||
logger.debug(f"suite_attempted: {suite_attempted}")
|
||||
if not suite_attempted:
|
||||
return
|
||||
|
||||
if test_name not in test_names:
|
||||
test_names.append(test_name)
|
||||
|
||||
if test_data["metrics"]["percentage"] == 0:
|
||||
result_indicator = "❌"
|
||||
else:
|
||||
highest_difficulty = test_data["metrics"]["highest_difficulty"]
|
||||
result_indicator = {
|
||||
"interface": "🔌",
|
||||
"novice": "🌑",
|
||||
"basic": "🌒",
|
||||
"intermediate": "🌓",
|
||||
"advanced": "🌔",
|
||||
"hard": "🌕",
|
||||
}[highest_difficulty]
|
||||
|
||||
logger.debug(f"result group: {result_group}")
|
||||
logger.debug(f"runs_per_label: {runs_per_label[label]}")
|
||||
if len(result_group) + 1 < runs_per_label[label]:
|
||||
result_group.extend(
|
||||
["❔"] * (runs_per_label[label] - len(result_group) - 1)
|
||||
)
|
||||
result_group.append(result_indicator)
|
||||
logger.debug(f"result group (after): {result_group}")
|
||||
|
||||
if granular:
|
||||
for test_name, test in test_data["tests"].items():
|
||||
process_test(test_name, test)
|
||||
return
|
||||
|
||||
test_metrics = test_data["metrics"]
|
||||
result_indicator = "❔"
|
||||
|
||||
if "attempted" not in test_metrics:
|
||||
return
|
||||
elif test_metrics["attempted"]:
|
||||
if test_name not in test_names:
|
||||
test_names.append(test_name)
|
||||
|
||||
success_value = test_metrics["success"]
|
||||
result_indicator = {True: "✅", False: "❌"}[success_value]
|
||||
|
||||
if len(result_group) + 1 < runs_per_label[label]:
|
||||
result_group.extend(
|
||||
[" "] * (runs_per_label[label] - len(result_group) - 1)
|
||||
)
|
||||
result_group.append(result_indicator)
|
||||
|
||||
for test_name, suite in test_tree.items():
|
||||
try:
|
||||
process_test(test_name, suite)
|
||||
except KeyError:
|
||||
print(f"{test_name}.metrics: {suite['metrics']}")
|
||||
raise
|
||||
|
||||
if label not in labels:
|
||||
labels.append(label)
|
||||
|
||||
# Create headers
|
||||
headers = ["Test Name"] + list(labels)
|
||||
|
||||
# Prepare data for tabulation
|
||||
table_data = list[list[str]]()
|
||||
for test_name in test_names:
|
||||
row = [test_name]
|
||||
for label in labels:
|
||||
results = grouped_success_values.get(f"{label}|{test_name}", ["❔"])
|
||||
if len(results) < runs_per_label[label]:
|
||||
results.extend(["❔"] * (runs_per_label[label] - len(results)))
|
||||
if len(results) > 1 and all(r == "❔" for r in results):
|
||||
results.clear()
|
||||
row.append(" ".join(results))
|
||||
table_data.append(row)
|
||||
|
||||
# Print tabulated data
|
||||
print(tabulate(table_data, headers=headers, tablefmt="grid"))
|
||||
8
autogpt/agbenchmark_config/config.json
Normal file
8
autogpt/agbenchmark_config/config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"workspace": {
|
||||
"input": "agbenchmark_config/workspace",
|
||||
"output": "agbenchmark_config/workspace"
|
||||
},
|
||||
"entry_path": "agbenchmark.benchmarks",
|
||||
"host": "http://localhost:8000"
|
||||
}
|
||||
27
autogpt/autogpt.bat
Normal file
27
autogpt/autogpt.bat
Normal file
@@ -0,0 +1,27 @@
|
||||
@echo off
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
:FindPythonCommand
|
||||
for %%A in (python3 python) do (
|
||||
where /Q %%A
|
||||
if !errorlevel! EQU 0 (
|
||||
set "PYTHON_CMD=%%A"
|
||||
goto :Found
|
||||
)
|
||||
)
|
||||
|
||||
echo Python not found. Please install Python.
|
||||
pause
|
||||
exit /B 1
|
||||
|
||||
:Found
|
||||
%PYTHON_CMD% scripts/check_requirements.py
|
||||
if errorlevel 1 (
|
||||
echo
|
||||
poetry install --without dev
|
||||
echo
|
||||
echo Finished installing packages! Starting AutoGPT...
|
||||
echo
|
||||
)
|
||||
poetry run autogpt %*
|
||||
pause
|
||||
29
autogpt/autogpt.sh
Executable file
29
autogpt/autogpt.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function find_python_command() {
|
||||
if command -v python3 &> /dev/null
|
||||
then
|
||||
echo "python3"
|
||||
elif command -v python &> /dev/null
|
||||
then
|
||||
echo "python"
|
||||
else
|
||||
echo "Python not found. Please install Python."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
PYTHON_CMD=$(find_python_command)
|
||||
|
||||
if $PYTHON_CMD -c "import sys; sys.exit(sys.version_info < (3, 10))"; then
|
||||
if ! $PYTHON_CMD scripts/check_requirements.py; then
|
||||
echo
|
||||
poetry install --without dev
|
||||
echo
|
||||
echo "Finished installing packages! Starting AutoGPT..."
|
||||
echo
|
||||
fi
|
||||
poetry run autogpt "$@"
|
||||
else
|
||||
echo "Python 3.10 or higher is required to run Auto GPT."
|
||||
fi
|
||||
7
autogpt/autogpt/__init__.py
Normal file
7
autogpt/autogpt/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
|
||||
print("Setting random seed to 42")
|
||||
random.seed(42)
|
||||
5
autogpt/autogpt/__main__.py
Normal file
5
autogpt/autogpt/__main__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""AutoGPT: A GPT powered AI Assistant"""
|
||||
import autogpt.app.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.app.cli.cli()
|
||||
108
autogpt/autogpt/agent_factory/configurators.py
Normal file
108
autogpt/autogpt/agent_factory/configurators.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
|
||||
def create_agent(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
directives: Optional[AIDirectives] = None,
|
||||
) -> Agent:
|
||||
if not task:
|
||||
raise ValueError("No task specified for new agent")
|
||||
ai_profile = ai_profile or AIProfile()
|
||||
directives = directives or AIDirectives()
|
||||
|
||||
agent = _configure_agent(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
def configure_agent_with_state(
|
||||
state: AgentSettings,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
return _configure_agent(
|
||||
state=state,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
|
||||
def _configure_agent(
|
||||
app_config: AppConfig,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
agent_id: str = "",
|
||||
task: str = "",
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
directives: Optional[AIDirectives] = None,
|
||||
state: Optional[AgentSettings] = None,
|
||||
) -> Agent:
|
||||
if state:
|
||||
agent_state = state
|
||||
elif agent_id and task and ai_profile and directives:
|
||||
agent_state = state or create_agent_state(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
app_config=app_config,
|
||||
)
|
||||
else:
|
||||
raise TypeError(
|
||||
"Either (state) or (agent_id, task, ai_profile, directives)"
|
||||
" must be specified"
|
||||
)
|
||||
|
||||
return Agent(
|
||||
settings=agent_state,
|
||||
llm_provider=llm_provider,
|
||||
file_storage=file_storage,
|
||||
app_config=app_config,
|
||||
)
|
||||
|
||||
|
||||
def create_agent_state(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
) -> AgentSettings:
|
||||
return AgentSettings(
|
||||
agent_id=agent_id,
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
config=AgentConfiguration(
|
||||
fast_llm=app_config.fast_llm,
|
||||
smart_llm=app_config.smart_llm,
|
||||
allow_fs_access=not app_config.restrict_to_workspace,
|
||||
use_functions_api=app_config.openai_functions,
|
||||
),
|
||||
history=Agent.default_settings.history.model_copy(deep=True),
|
||||
)
|
||||
36
autogpt/autogpt/agent_factory/generators.py
Normal file
36
autogpt/autogpt/agent_factory/generators.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.app.config import AppConfig
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from .configurators import _configure_agent
|
||||
from .profile_generator import generate_agent_profile_for_task
|
||||
|
||||
|
||||
async def generate_agent_for_task(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
ai_profile, task_directives = await generate_agent_profile_for_task(
|
||||
task=task,
|
||||
app_config=app_config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
return _configure_agent(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=task_directives,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
241
autogpt/autogpt/agent_factory/profile_generator.py
Normal file
241
autogpt/autogpt/agent_factory/profile_generator.py
Normal file
@@ -0,0 +1,241 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.llm.providers.schema import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
llm_classification: LanguageModelClassification = UserConfigurable(
|
||||
default=LanguageModelClassification.SMART_MODEL
|
||||
)
|
||||
_example_call: object = {
|
||||
"name": "create_agent",
|
||||
"arguments": {
|
||||
"name": "CMOGPT",
|
||||
"description": (
|
||||
"a professional digital marketer AI that assists Solopreneurs "
|
||||
"in growing their businesses by providing "
|
||||
"world-class expertise in solving marketing problems "
|
||||
"for SaaS, content products, agencies, and more."
|
||||
),
|
||||
"directives": {
|
||||
"best_practices": [
|
||||
(
|
||||
"Engage in effective problem-solving, prioritization, "
|
||||
"planning, and supporting execution to address your "
|
||||
"marketing needs as your virtual "
|
||||
"Chief Marketing Officer."
|
||||
),
|
||||
(
|
||||
"Provide specific, actionable, and concise advice to "
|
||||
"help you make informed decisions without the use of "
|
||||
"platitudes or overly wordy explanations."
|
||||
),
|
||||
(
|
||||
"Identify and prioritize quick wins and cost-effective "
|
||||
"campaigns that maximize results with minimal time and "
|
||||
"budget investment."
|
||||
),
|
||||
(
|
||||
"Proactively take the lead in guiding you and offering "
|
||||
"suggestions when faced with unclear information or "
|
||||
"uncertainty to ensure your marketing strategy remains "
|
||||
"on track."
|
||||
),
|
||||
],
|
||||
"constraints": [
|
||||
"Do not suggest illegal or unethical plans or strategies.",
|
||||
"Take reasonable budgetary limits into account.",
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
system_prompt: str = UserConfigurable(
|
||||
default=(
|
||||
"Your job is to respond to a user-defined task, given in triple quotes, by "
|
||||
"invoking the `create_agent` function to generate an autonomous agent to "
|
||||
"complete the task. "
|
||||
"You should supply a role-based name for the agent (_GPT), "
|
||||
"an informative description for what the agent does, and 1 to 5 directives "
|
||||
"in each of the categories Best Practices and Constraints, "
|
||||
"that are optimally aligned with the successful completion "
|
||||
"of its assigned task.\n"
|
||||
"\n"
|
||||
"Example Input:\n"
|
||||
'"""Help me with marketing my business"""\n\n'
|
||||
"Example Call:\n"
|
||||
"```\n"
|
||||
f"{json.dumps(_example_call, indent=4)}"
|
||||
"\n```"
|
||||
)
|
||||
)
|
||||
user_prompt_template: str = UserConfigurable(default='"""{user_objective}"""')
|
||||
create_agent_function: dict = UserConfigurable(
|
||||
default=CompletionModelFunction(
|
||||
name="create_agent",
|
||||
description="Create a new autonomous AI agent to complete a given task.",
|
||||
parameters={
|
||||
"name": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="A short role-based name for an autonomous agent.",
|
||||
required=True,
|
||||
),
|
||||
"description": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description=(
|
||||
"An informative one sentence description "
|
||||
"of what the AI agent does"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
"directives": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"best_practices": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
minItems=1,
|
||||
maxItems=5,
|
||||
items=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
),
|
||||
description=(
|
||||
"One to five highly effective best practices "
|
||||
"that are optimally aligned with the completion "
|
||||
"of the given task"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
"constraints": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
minItems=1,
|
||||
maxItems=5,
|
||||
items=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
),
|
||||
description=(
|
||||
"One to five reasonable and efficacious constraints "
|
||||
"that are optimally aligned with the completion "
|
||||
"of the given task"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
).model_dump()
|
||||
)
|
||||
|
||||
|
||||
class AgentProfileGenerator(PromptStrategy):
|
||||
default_configuration: AgentProfileGeneratorConfiguration = (
|
||||
AgentProfileGeneratorConfiguration()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: dict,
|
||||
):
|
||||
self._llm_classification = llm_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = CompletionModelFunction.model_validate(
|
||||
create_agent_function
|
||||
)
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return self._llm_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
|
||||
system_message = ChatMessage.system(self._system_prompt_message)
|
||||
user_message = ChatMessage.user(
|
||||
self._user_prompt_template.format(
|
||||
user_objective=user_objective,
|
||||
)
|
||||
)
|
||||
prompt = ChatPrompt(
|
||||
messages=[system_message, user_message],
|
||||
functions=[self._create_agent_function],
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response: AssistantChatMessage,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
"""
|
||||
try:
|
||||
if not response.tool_calls:
|
||||
raise ValueError(
|
||||
f"LLM did not call {self._create_agent_function.name} function; "
|
||||
"agent profile creation failed"
|
||||
)
|
||||
arguments: object = response.tool_calls[0].function.arguments
|
||||
ai_profile = AIProfile(
|
||||
ai_name=arguments.get("name"), # type: ignore
|
||||
ai_role=arguments.get("description"), # type: ignore
|
||||
)
|
||||
ai_directives = AIDirectives(
|
||||
best_practices=arguments.get("directives", {}).get("best_practices"),
|
||||
constraints=arguments.get("directives", {}).get("constraints"),
|
||||
resources=[],
|
||||
)
|
||||
except KeyError:
|
||||
logger.debug(f"Failed to parse this response content: {response}")
|
||||
raise
|
||||
return ai_profile, ai_directives
|
||||
|
||||
|
||||
async def generate_agent_profile_for_task(
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
llm_provider: MultiProvider,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
agent_profile_generator = AgentProfileGenerator(
|
||||
**AgentProfileGenerator.default_configuration.model_dump() # HACK
|
||||
)
|
||||
|
||||
prompt = agent_profile_generator.build_prompt(task)
|
||||
|
||||
# Call LLM with the string as user input
|
||||
output = await llm_provider.create_chat_completion(
|
||||
prompt.messages,
|
||||
model_name=app_config.smart_llm,
|
||||
functions=prompt.functions,
|
||||
completion_parser=agent_profile_generator.parse_response_content,
|
||||
)
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output.response}")
|
||||
|
||||
return output.parsed_result
|
||||
37
autogpt/autogpt/agents/README.md
Normal file
37
autogpt/autogpt/agents/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# 🤖 Agents
|
||||
|
||||
Agent is composed of [🧩 Components](./components.md) and responsible for executing pipelines and some additional logic. The base class for all agents is `BaseAgent`, it has the necessary logic to collect components and execute protocols.
|
||||
|
||||
## Important methods
|
||||
|
||||
`BaseAgent` provides two abstract methods needed for any agent to work properly:
|
||||
1. `propose_action`: This method is responsible for proposing an action based on the current state of the agent, it returns `ThoughtProcessOutput`.
|
||||
2. `execute`: This method is responsible for executing the proposed action, returns `ActionResult`.
|
||||
|
||||
## AutoGPT Agent
|
||||
|
||||
`Agent` is the main agent provided by AutoGPT. It's a subclass of `BaseAgent`. It has all the [Built-in Components](./built-in-components.md). `Agent` implements the essential abstract methods from `BaseAgent`: `propose_action` and `execute`.
|
||||
|
||||
## Building your own Agent
|
||||
|
||||
The easiest way to build your own agent is to extend the `Agent` class and add additional components. By doing this you can reuse the existing components and the default logic for executing [⚙️ Protocols](./protocols.md).
|
||||
|
||||
```py
|
||||
class MyComponent(AgentComponent):
|
||||
pass
|
||||
|
||||
class MyAgent(Agent):
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
# Call the parent constructor to bring in the default components
|
||||
super().__init__(settings, llm_provider, file_storage, app_config)
|
||||
# Add your custom component
|
||||
self.my_component = MyComponent()
|
||||
```
|
||||
|
||||
For more customization, you can override the `propose_action` and `execute` or even subclass `BaseAgent` directly. This way you can have full control over the agent's components and behavior. Have a look at the [implementation of Agent](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt/autogpt/agents/agent.py) for more details.
|
||||
9
autogpt/autogpt/agents/__init__.py
Normal file
9
autogpt/autogpt/agents/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from .agent import Agent
|
||||
from .agent_manager import AgentManager
|
||||
from .prompt_strategies.one_shot import OneShotAgentActionProposal
|
||||
|
||||
__all__ = [
|
||||
"AgentManager",
|
||||
"Agent",
|
||||
"OneShotAgentActionProposal",
|
||||
]
|
||||
313
autogpt/autogpt/agents/agent.py
Normal file
313
autogpt/autogpt/agents/agent.py
Normal file
@@ -0,0 +1,313 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, Optional
|
||||
|
||||
import sentry_sdk
|
||||
from forge.agent.base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
|
||||
from forge.agent.protocols import (
|
||||
AfterExecute,
|
||||
AfterParse,
|
||||
CommandProvider,
|
||||
DirectiveProvider,
|
||||
MessageProvider,
|
||||
)
|
||||
from forge.command.command import Command
|
||||
from forge.components.action_history import (
|
||||
ActionHistoryComponent,
|
||||
EpisodicActionHistory,
|
||||
)
|
||||
from forge.components.action_history.action_history import ActionHistoryConfiguration
|
||||
from forge.components.code_executor.code_executor import (
|
||||
CodeExecutorComponent,
|
||||
CodeExecutorConfiguration,
|
||||
)
|
||||
from forge.components.context.context import AgentContext, ContextComponent
|
||||
from forge.components.file_manager import FileManagerComponent
|
||||
from forge.components.git_operations import GitOperationsComponent
|
||||
from forge.components.image_gen import ImageGeneratorComponent
|
||||
from forge.components.system import SystemComponent
|
||||
from forge.components.user_interaction import UserInteractionComponent
|
||||
from forge.components.watchdog import WatchdogComponent
|
||||
from forge.components.web import WebSearchComponent, WebSeleniumComponent
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.prompting.schema import ChatPrompt
|
||||
from forge.llm.prompting.utils import dump_prompt
|
||||
from forge.llm.providers import (
|
||||
AssistantFunctionCall,
|
||||
ChatMessage,
|
||||
ChatModelResponse,
|
||||
MultiProvider,
|
||||
)
|
||||
from forge.llm.providers.utils import function_specs_from_commands
|
||||
from forge.models.action import (
|
||||
ActionErrorResult,
|
||||
ActionInterruptedByHuman,
|
||||
ActionResult,
|
||||
ActionSuccessResult,
|
||||
)
|
||||
from forge.models.config import Configurable
|
||||
from forge.utils.exceptions import (
|
||||
AgentException,
|
||||
AgentTerminated,
|
||||
CommandExecutionError,
|
||||
UnknownCommandError,
|
||||
)
|
||||
from pydantic import Field
|
||||
|
||||
from .prompt_strategies.one_shot import (
|
||||
OneShotAgentActionProposal,
|
||||
OneShotAgentPromptStrategy,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentConfiguration(BaseAgentConfiguration):
|
||||
pass
|
||||
|
||||
|
||||
class AgentSettings(BaseAgentSettings):
|
||||
config: AgentConfiguration = Field( # type: ignore
|
||||
default_factory=AgentConfiguration
|
||||
)
|
||||
|
||||
history: EpisodicActionHistory[OneShotAgentActionProposal] = Field(
|
||||
default_factory=EpisodicActionHistory[OneShotAgentActionProposal]
|
||||
)
|
||||
"""(STATE) The action history of the agent."""
|
||||
|
||||
context: AgentContext = Field(default_factory=AgentContext)
|
||||
|
||||
|
||||
class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
default_settings: ClassVar[AgentSettings] = AgentSettings(
|
||||
name="Agent",
|
||||
description=__doc__ if __doc__ else "",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
super().__init__(settings)
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
prompt_config.use_functions_api = (
|
||||
settings.config.use_functions_api
|
||||
# Anthropic currently doesn't support tools + prefilling :(
|
||||
and self.llm.provider_name != "anthropic"
|
||||
)
|
||||
self.prompt_strategy = OneShotAgentPromptStrategy(prompt_config, logger)
|
||||
self.commands: list[Command] = []
|
||||
|
||||
# Components
|
||||
self.system = SystemComponent()
|
||||
self.history = (
|
||||
ActionHistoryComponent(
|
||||
settings.history,
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
llm_provider,
|
||||
ActionHistoryConfiguration(
|
||||
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
|
||||
),
|
||||
)
|
||||
.run_after(WatchdogComponent)
|
||||
.run_after(SystemComponent)
|
||||
)
|
||||
if not app_config.noninteractive_mode:
|
||||
self.user_interaction = UserInteractionComponent()
|
||||
self.file_manager = FileManagerComponent(file_storage, settings)
|
||||
self.code_executor = CodeExecutorComponent(
|
||||
self.file_manager.workspace,
|
||||
CodeExecutorConfiguration(
|
||||
docker_container_name=f"{settings.agent_id}_sandbox"
|
||||
),
|
||||
)
|
||||
self.git_ops = GitOperationsComponent()
|
||||
self.image_gen = ImageGeneratorComponent(self.file_manager.workspace)
|
||||
self.web_search = WebSearchComponent()
|
||||
self.web_selenium = WebSeleniumComponent(
|
||||
llm_provider,
|
||||
app_config.app_data_dir,
|
||||
)
|
||||
self.context = ContextComponent(self.file_manager.workspace, settings.context)
|
||||
self.watchdog = WatchdogComponent(settings.config, settings.history).run_after(
|
||||
ContextComponent
|
||||
)
|
||||
|
||||
self.event_history = settings.history
|
||||
self.app_config = app_config
|
||||
|
||||
async def propose_action(self) -> OneShotAgentActionProposal:
|
||||
"""Proposes the next action to execute, based on the task and current state.
|
||||
|
||||
Returns:
|
||||
The command name and arguments, if any, and the agent's thoughts.
|
||||
"""
|
||||
self.reset_trace()
|
||||
|
||||
# Get directives
|
||||
resources = await self.run_pipeline(DirectiveProvider.get_resources)
|
||||
constraints = await self.run_pipeline(DirectiveProvider.get_constraints)
|
||||
best_practices = await self.run_pipeline(DirectiveProvider.get_best_practices)
|
||||
|
||||
directives = self.state.directives.model_copy(deep=True)
|
||||
directives.resources += resources
|
||||
directives.constraints += constraints
|
||||
directives.best_practices += best_practices
|
||||
|
||||
# Get commands
|
||||
self.commands = await self.run_pipeline(CommandProvider.get_commands)
|
||||
self._remove_disabled_commands()
|
||||
|
||||
# Get messages
|
||||
messages = await self.run_pipeline(MessageProvider.get_messages)
|
||||
|
||||
include_os_info = (
|
||||
self.code_executor.config.execute_local_commands
|
||||
if hasattr(self, "code_executor")
|
||||
else False
|
||||
)
|
||||
|
||||
prompt: ChatPrompt = self.prompt_strategy.build_prompt(
|
||||
messages=messages,
|
||||
task=self.state.task,
|
||||
ai_profile=self.state.ai_profile,
|
||||
ai_directives=directives,
|
||||
commands=function_specs_from_commands(self.commands),
|
||||
include_os_info=include_os_info,
|
||||
)
|
||||
|
||||
logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
|
||||
output = await self.complete_and_parse(prompt)
|
||||
self.config.cycle_count += 1
|
||||
|
||||
return output
|
||||
|
||||
async def complete_and_parse(
|
||||
self, prompt: ChatPrompt, exception: Optional[Exception] = None
|
||||
) -> OneShotAgentActionProposal:
|
||||
if exception:
|
||||
prompt.messages.append(ChatMessage.system(f"Error: {exception}"))
|
||||
|
||||
response: ChatModelResponse[
|
||||
OneShotAgentActionProposal
|
||||
] = await self.llm_provider.create_chat_completion(
|
||||
prompt.messages,
|
||||
model_name=self.llm.name,
|
||||
completion_parser=self.prompt_strategy.parse_response_content,
|
||||
functions=prompt.functions,
|
||||
prefill_response=prompt.prefill_response,
|
||||
)
|
||||
result = response.parsed_result
|
||||
|
||||
await self.run_pipeline(AfterParse.after_parse, result)
|
||||
|
||||
return result
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
proposal: OneShotAgentActionProposal,
|
||||
user_feedback: str = "",
|
||||
) -> ActionResult:
|
||||
tool = proposal.use_tool
|
||||
|
||||
# Get commands
|
||||
self.commands = await self.run_pipeline(CommandProvider.get_commands)
|
||||
self._remove_disabled_commands()
|
||||
|
||||
try:
|
||||
return_value = await self._execute_tool(tool)
|
||||
|
||||
result = ActionSuccessResult(outputs=return_value)
|
||||
except AgentTerminated:
|
||||
raise
|
||||
except AgentException as e:
|
||||
result = ActionErrorResult.from_exception(e)
|
||||
logger.warning(f"{tool} raised an error: {e}")
|
||||
sentry_sdk.capture_exception(e)
|
||||
|
||||
result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name)
|
||||
if result_tlength > self.send_token_limit // 3:
|
||||
result = ActionErrorResult(
|
||||
reason=f"Command {tool.name} returned too much output. "
|
||||
"Do not execute this command again with the same arguments."
|
||||
)
|
||||
|
||||
await self.run_pipeline(AfterExecute.after_execute, result)
|
||||
|
||||
logger.debug("\n".join(self.trace))
|
||||
|
||||
return result
|
||||
|
||||
async def do_not_execute(
|
||||
self, denied_proposal: OneShotAgentActionProposal, user_feedback: str
|
||||
) -> ActionResult:
|
||||
result = ActionInterruptedByHuman(feedback=user_feedback)
|
||||
|
||||
await self.run_pipeline(AfterExecute.after_execute, result)
|
||||
|
||||
logger.debug("\n".join(self.trace))
|
||||
|
||||
return result
|
||||
|
||||
async def _execute_tool(self, tool_call: AssistantFunctionCall) -> Any:
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
tool_call (AssistantFunctionCall): The tool call to execute
|
||||
|
||||
Returns:
|
||||
str: The execution result
|
||||
"""
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
command = self._get_command(tool_call.name)
|
||||
try:
|
||||
result = command(**tool_call.arguments)
|
||||
if inspect.isawaitable(result):
|
||||
return await result
|
||||
return result
|
||||
except AgentException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise CommandExecutionError(str(e))
|
||||
|
||||
def _get_command(self, command_name: str) -> Command:
|
||||
for command in reversed(self.commands):
|
||||
if command_name in command.names:
|
||||
return command
|
||||
|
||||
raise UnknownCommandError(
|
||||
f"Cannot execute command '{command_name}': unknown command."
|
||||
)
|
||||
|
||||
def _remove_disabled_commands(self) -> None:
|
||||
self.commands = [
|
||||
command
|
||||
for command in self.commands
|
||||
if not any(
|
||||
name in self.app_config.disabled_commands for name in command.names
|
||||
)
|
||||
]
|
||||
|
||||
def find_obscured_commands(self) -> list[Command]:
|
||||
seen_names = set()
|
||||
obscured_commands = []
|
||||
for command in reversed(self.commands):
|
||||
# If all of the command's names have been seen, it's obscured
|
||||
if seen_names.issuperset(command.names):
|
||||
obscured_commands.append(command)
|
||||
else:
|
||||
seen_names.update(command.names)
|
||||
return list(reversed(obscured_commands))
|
||||
46
autogpt/autogpt/agents/agent_manager.py
Normal file
46
autogpt/autogpt/agents/agent_manager.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
from autogpt.agents.agent import AgentSettings
|
||||
|
||||
|
||||
class AgentManager:
|
||||
def __init__(self, file_storage: FileStorage):
|
||||
self.file_manager = file_storage.clone_with_subroot("agents")
|
||||
|
||||
@staticmethod
|
||||
def generate_id(agent_name: str) -> str:
|
||||
"""Generate a unique ID for an agent given agent name."""
|
||||
unique_id = str(uuid.uuid4())[:8]
|
||||
return f"{agent_name}-{unique_id}"
|
||||
|
||||
def list_agents(self) -> list[str]:
|
||||
"""Return all agent directories within storage."""
|
||||
agent_dirs: list[str] = []
|
||||
for file_path in self.file_manager.list_files():
|
||||
if len(file_path.parts) == 2 and file_path.name == "state.json":
|
||||
agent_dirs.append(file_path.parent.name)
|
||||
return agent_dirs
|
||||
|
||||
def get_agent_dir(self, agent_id: str) -> Path:
|
||||
"""Return the directory of the agent with the given ID."""
|
||||
assert len(agent_id) > 0
|
||||
agent_dir: Path | None = None
|
||||
if self.file_manager.exists(agent_id):
|
||||
agent_dir = self.file_manager.root / agent_id
|
||||
else:
|
||||
raise FileNotFoundError(f"No agent with ID '{agent_id}'")
|
||||
return agent_dir
|
||||
|
||||
def load_agent_state(self, agent_id: str) -> AgentSettings:
|
||||
"""Load the state of the agent with the given ID."""
|
||||
state_file_path = Path(agent_id) / "state.json"
|
||||
if not self.file_manager.exists(state_file_path):
|
||||
raise FileNotFoundError(f"Agent with ID '{agent_id}' has no state.json")
|
||||
|
||||
state = self.file_manager.read_file(state_file_path)
|
||||
return AgentSettings.parse_raw(state)
|
||||
281
autogpt/autogpt/agents/prompt_strategies/one_shot.py
Normal file
281
autogpt/autogpt/agents/prompt_strategies/one_shot.py
Normal file
@@ -0,0 +1,281 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import platform
|
||||
import re
|
||||
from logging import Logger
|
||||
|
||||
import distro
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.json.parsing import extract_dict_from_json
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.prompting.utils import format_numbered_list
|
||||
from forge.llm.providers.schema import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from forge.models.action import ActionProposal
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.exceptions import InvalidAgentResponseError
|
||||
from pydantic import Field
|
||||
|
||||
_RESPONSE_INTERFACE_NAME = "AssistantResponse"
|
||||
|
||||
|
||||
class AssistantThoughts(ModelWithSummary):
|
||||
observations: str = Field(
|
||||
description="Relevant observations from your last action (if any)"
|
||||
)
|
||||
text: str = Field(description="Thoughts")
|
||||
reasoning: str = Field(description="Reasoning behind the thoughts")
|
||||
self_criticism: str = Field(description="Constructive self-criticism")
|
||||
plan: list[str] = Field(description="Short list that conveys the long-term plan")
|
||||
speak: str = Field(description="Summary of thoughts, to say to user")
|
||||
|
||||
def summary(self) -> str:
|
||||
return self.text
|
||||
|
||||
|
||||
class OneShotAgentActionProposal(ActionProposal):
|
||||
thoughts: AssistantThoughts # type: ignore
|
||||
|
||||
|
||||
class OneShotAgentPromptConfiguration(SystemConfiguration):
|
||||
DEFAULT_BODY_TEMPLATE: str = (
|
||||
"## Constraints\n"
|
||||
"You operate within the following constraints:\n"
|
||||
"{constraints}\n"
|
||||
"\n"
|
||||
"## Resources\n"
|
||||
"You can leverage access to the following resources:\n"
|
||||
"{resources}\n"
|
||||
"\n"
|
||||
"## Commands\n"
|
||||
"These are the ONLY commands you can use."
|
||||
" Any action you perform must be possible through one of these commands:\n"
|
||||
"{commands}\n"
|
||||
"\n"
|
||||
"## Best practices\n"
|
||||
"{best_practices}"
|
||||
)
|
||||
|
||||
DEFAULT_CHOOSE_ACTION_INSTRUCTION: str = (
|
||||
"Determine exactly one command to use next based on the given goals "
|
||||
"and the progress you have made so far, "
|
||||
"and respond using the JSON schema specified previously:"
|
||||
)
|
||||
|
||||
body_template: str = UserConfigurable(default=DEFAULT_BODY_TEMPLATE)
|
||||
choose_action_instruction: str = UserConfigurable(
|
||||
default=DEFAULT_CHOOSE_ACTION_INSTRUCTION
|
||||
)
|
||||
use_functions_api: bool = UserConfigurable(default=False)
|
||||
|
||||
#########
|
||||
# State #
|
||||
#########
|
||||
# progress_summaries: dict[tuple[int, int], str] = Field(
|
||||
# default_factory=lambda: {(0, 0): ""}
|
||||
# )
|
||||
|
||||
|
||||
class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
default_configuration: OneShotAgentPromptConfiguration = (
|
||||
OneShotAgentPromptConfiguration()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
configuration: OneShotAgentPromptConfiguration,
|
||||
logger: Logger,
|
||||
):
|
||||
self.config = configuration
|
||||
self.response_schema = JSONSchema.from_dict(
|
||||
OneShotAgentActionProposal.model_json_schema()
|
||||
)
|
||||
self.logger = logger
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
*,
|
||||
messages: list[ChatMessage],
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
include_os_info: bool,
|
||||
**extras,
|
||||
) -> ChatPrompt:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
3. `cycle_instruction`
|
||||
"""
|
||||
system_prompt, response_prefill = self.build_system_prompt(
|
||||
ai_profile=ai_profile,
|
||||
ai_directives=ai_directives,
|
||||
commands=commands,
|
||||
include_os_info=include_os_info,
|
||||
)
|
||||
|
||||
final_instruction_msg = ChatMessage.user(self.config.choose_action_instruction)
|
||||
|
||||
return ChatPrompt(
|
||||
messages=[
|
||||
ChatMessage.system(system_prompt),
|
||||
ChatMessage.user(f'"""{task}"""'),
|
||||
*messages,
|
||||
final_instruction_msg,
|
||||
],
|
||||
prefill_response=response_prefill,
|
||||
functions=commands if self.config.use_functions_api else [],
|
||||
)
|
||||
|
||||
def build_system_prompt(
|
||||
self,
|
||||
ai_profile: AIProfile,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
include_os_info: bool,
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
Builds the system prompt.
|
||||
|
||||
Returns:
|
||||
str: The system prompt body
|
||||
str: The desired start for the LLM's response; used to steer the output
|
||||
"""
|
||||
response_fmt_instruction, response_prefill = self.response_format_instruction(
|
||||
self.config.use_functions_api
|
||||
)
|
||||
system_prompt_parts = (
|
||||
self._generate_intro_prompt(ai_profile)
|
||||
+ (self._generate_os_info() if include_os_info else [])
|
||||
+ [
|
||||
self.config.body_template.format(
|
||||
constraints=format_numbered_list(ai_directives.constraints),
|
||||
resources=format_numbered_list(ai_directives.resources),
|
||||
commands=self._generate_commands_list(commands),
|
||||
best_practices=format_numbered_list(ai_directives.best_practices),
|
||||
)
|
||||
]
|
||||
+ [
|
||||
"## Your Task\n"
|
||||
"The user will specify a task for you to execute, in triple quotes,"
|
||||
" in the next message. Your job is to complete the task while following"
|
||||
" your directives as given above, and terminate when your task is done."
|
||||
]
|
||||
+ ["## RESPONSE FORMAT\n" + response_fmt_instruction]
|
||||
)
|
||||
|
||||
# Join non-empty parts together into paragraph format
|
||||
return (
|
||||
"\n\n".join(filter(None, system_prompt_parts)).strip("\n"),
|
||||
response_prefill,
|
||||
)
|
||||
|
||||
def response_format_instruction(self, use_functions_api: bool) -> tuple[str, str]:
|
||||
response_schema = self.response_schema.model_copy(deep=True)
|
||||
assert response_schema.properties
|
||||
if use_functions_api and "use_tool" in response_schema.properties:
|
||||
del response_schema.properties["use_tool"]
|
||||
|
||||
# Unindent for performance
|
||||
response_format = re.sub(
|
||||
r"\n\s+",
|
||||
"\n",
|
||||
response_schema.to_typescript_object_interface(_RESPONSE_INTERFACE_NAME),
|
||||
)
|
||||
response_prefill = f'{{\n "{list(response_schema.properties.keys())[0]}":'
|
||||
|
||||
return (
|
||||
(
|
||||
f"YOU MUST ALWAYS RESPOND WITH A JSON OBJECT OF THE FOLLOWING TYPE:\n"
|
||||
f"{response_format}"
|
||||
+ ("\n\nYOU MUST ALSO INVOKE A TOOL!" if use_functions_api else "")
|
||||
),
|
||||
response_prefill,
|
||||
)
|
||||
|
||||
def _generate_intro_prompt(self, ai_profile: AIProfile) -> list[str]:
|
||||
"""Generates the introduction part of the prompt.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of strings forming the introduction part of the prompt.
|
||||
"""
|
||||
return [
|
||||
f"You are {ai_profile.ai_name}, {ai_profile.ai_role.rstrip('.')}.",
|
||||
"Your decisions must always be made independently without seeking "
|
||||
"user assistance. Play to your strengths as an LLM and pursue "
|
||||
"simple strategies with no legal complications.",
|
||||
]
|
||||
|
||||
def _generate_os_info(self) -> list[str]:
|
||||
"""Generates the OS information part of the prompt.
|
||||
|
||||
Params:
|
||||
config (Config): The configuration object.
|
||||
|
||||
Returns:
|
||||
str: The OS information part of the prompt.
|
||||
"""
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return [f"The OS you are running on is: {os_info}"]
|
||||
|
||||
def _generate_commands_list(self, commands: list[CompletionModelFunction]) -> str:
|
||||
"""Lists the commands available to the agent.
|
||||
|
||||
Params:
|
||||
agent: The agent for which the commands are being listed.
|
||||
|
||||
Returns:
|
||||
str: A string containing a numbered list of commands.
|
||||
"""
|
||||
try:
|
||||
return format_numbered_list([cmd.fmt_line() for cmd in commands])
|
||||
except AttributeError:
|
||||
self.logger.warning(f"Formatting commands failed. {commands}")
|
||||
raise
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response: AssistantChatMessage,
|
||||
) -> OneShotAgentActionProposal:
|
||||
if not response.content:
|
||||
raise InvalidAgentResponseError("Assistant response has no text content")
|
||||
|
||||
self.logger.debug(
|
||||
"LLM response content:"
|
||||
+ (
|
||||
f"\n{response.content}"
|
||||
if "\n" in response.content
|
||||
else f" '{response.content}'"
|
||||
)
|
||||
)
|
||||
assistant_reply_dict = extract_dict_from_json(response.content)
|
||||
self.logger.debug(
|
||||
"Parsing object extracted from LLM response:\n"
|
||||
f"{json.dumps(assistant_reply_dict, indent=4)}"
|
||||
)
|
||||
if self.config.use_functions_api:
|
||||
if not response.tool_calls:
|
||||
raise InvalidAgentResponseError("Assistant did not use a tool")
|
||||
assistant_reply_dict["use_tool"] = response.tool_calls[0].function
|
||||
|
||||
parsed_response = OneShotAgentActionProposal.model_validate(
|
||||
assistant_reply_dict
|
||||
)
|
||||
parsed_response.raw_message = response.copy()
|
||||
return parsed_response
|
||||
6
autogpt/autogpt/app/__init__.py
Normal file
6
autogpt/autogpt/app/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load the users .env file into environment variables
|
||||
load_dotenv(verbose=True, override=True)
|
||||
|
||||
del load_dotenv
|
||||
479
autogpt/autogpt/app/agent_protocol_server.py
Normal file
479
autogpt/autogpt/app/agent_protocol_server.py
Normal file
@@ -0,0 +1,479 @@
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
from collections import defaultdict
|
||||
from io import BytesIO
|
||||
from uuid import uuid4
|
||||
|
||||
import orjson
|
||||
from fastapi import APIRouter, FastAPI, UploadFile
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import RedirectResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from forge.agent_protocol.api_router import base_router
|
||||
from forge.agent_protocol.database import AgentDB
|
||||
from forge.agent_protocol.middlewares import AgentMiddleware
|
||||
from forge.agent_protocol.models import (
|
||||
Artifact,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
Task,
|
||||
TaskArtifactsListResponse,
|
||||
TaskListResponse,
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.llm.providers import ModelProviderBudget, MultiProvider
|
||||
from forge.models.action import ActionErrorResult, ActionSuccessResult
|
||||
from forge.utils.const import ASK_COMMAND, FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentFinished, NotFoundError
|
||||
from hypercorn.asyncio import serve as hypercorn_serve
|
||||
from hypercorn.config import Config as HypercornConfig
|
||||
from sentry_sdk import set_user
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.app.config import AppConfig
|
||||
from autogpt.app.utils import is_port_free
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProtocolServer:
|
||||
_task_budgets: dict[str, ModelProviderBudget]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app_config: AppConfig,
|
||||
database: AgentDB,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
):
|
||||
self.app_config = app_config
|
||||
self.db = database
|
||||
self.file_storage = file_storage
|
||||
self.llm_provider = llm_provider
|
||||
self.agent_manager = AgentManager(file_storage)
|
||||
self._task_budgets = defaultdict(ModelProviderBudget)
|
||||
|
||||
async def start(self, port: int = 8000, router: APIRouter = base_router):
|
||||
"""Start the agent server."""
|
||||
logger.debug("Starting the agent server...")
|
||||
if not is_port_free(port):
|
||||
logger.error(f"Port {port} is already in use.")
|
||||
logger.info(
|
||||
"You can specify a port by either setting the AP_SERVER_PORT "
|
||||
"environment variable or defining AP_SERVER_PORT in the .env file."
|
||||
)
|
||||
return
|
||||
|
||||
config = HypercornConfig()
|
||||
config.bind = [f"localhost:{port}"]
|
||||
app = FastAPI(
|
||||
title="AutoGPT Server",
|
||||
description="Forked from AutoGPT Forge; "
|
||||
"Modified version of The Agent Protocol.",
|
||||
version="v0.4",
|
||||
)
|
||||
|
||||
# Configure CORS middleware
|
||||
default_origins = [f"http://localhost:{port}"] # Default only local access
|
||||
configured_origins = [
|
||||
origin
|
||||
for origin in os.getenv("AP_SERVER_CORS_ALLOWED_ORIGINS", "").split(",")
|
||||
if origin # Empty list if not configured
|
||||
]
|
||||
origins = configured_origins or default_origins
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.include_router(router, prefix="/ap/v1")
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
frontend_path = (
|
||||
pathlib.Path(script_dir).joinpath("../../../frontend/build/web").resolve()
|
||||
)
|
||||
|
||||
if os.path.exists(frontend_path):
|
||||
app.mount("/app", StaticFiles(directory=frontend_path), name="app")
|
||||
|
||||
@app.get("/", include_in_schema=False)
|
||||
async def root():
|
||||
return RedirectResponse(url="/app/index.html", status_code=307)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Frontend not found. {frontend_path} does not exist. "
|
||||
"The frontend will not be available."
|
||||
)
|
||||
|
||||
# Used to access the methods on this class from API route handlers
|
||||
app.add_middleware(AgentMiddleware, agent=self)
|
||||
|
||||
config.loglevel = "ERROR"
|
||||
config.bind = [f"0.0.0.0:{port}"]
|
||||
|
||||
logger.info(f"AutoGPT server starting on http://localhost:{port}")
|
||||
await hypercorn_serve(app, config) # type: ignore
|
||||
|
||||
async def create_task(self, task_request: TaskRequestBody) -> Task:
|
||||
"""
|
||||
Create a task for the agent.
|
||||
"""
|
||||
if user_id := (task_request.additional_input or {}).get("user_id"):
|
||||
set_user({"id": user_id})
|
||||
|
||||
task = await self.db.create_task(
|
||||
input=task_request.input,
|
||||
additional_input=task_request.additional_input,
|
||||
)
|
||||
# TODO: re-evaluate performance benefit of task-oriented profiles
|
||||
# logger.debug(f"Creating agent for task: '{task.input}'")
|
||||
# task_agent = await generate_agent_for_task(
|
||||
task_agent = create_agent(
|
||||
agent_id=task_agent_id(task.task_id),
|
||||
task=task.input,
|
||||
app_config=self.app_config,
|
||||
file_storage=self.file_storage,
|
||||
llm_provider=self._get_task_llm_provider(task),
|
||||
)
|
||||
await task_agent.file_manager.save_state()
|
||||
|
||||
return task
|
||||
|
||||
async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse:
|
||||
"""
|
||||
List all tasks that the agent has created.
|
||||
"""
|
||||
logger.debug("Listing all tasks...")
|
||||
tasks, pagination = await self.db.list_tasks(page, pageSize)
|
||||
response = TaskListResponse(tasks=tasks, pagination=pagination)
|
||||
return response
|
||||
|
||||
async def get_task(self, task_id: str) -> Task:
|
||||
"""
|
||||
Get a task by ID.
|
||||
"""
|
||||
logger.debug(f"Getting task with ID: {task_id}...")
|
||||
task = await self.db.get_task(task_id)
|
||||
return task
|
||||
|
||||
async def list_steps(
|
||||
self, task_id: str, page: int = 1, pageSize: int = 10
|
||||
) -> TaskStepsListResponse:
|
||||
"""
|
||||
List the IDs of all steps that the task has created.
|
||||
"""
|
||||
logger.debug(f"Listing all steps created by task with ID: {task_id}...")
|
||||
steps, pagination = await self.db.list_steps(task_id, page, pageSize)
|
||||
response = TaskStepsListResponse(steps=steps, pagination=pagination)
|
||||
return response
|
||||
|
||||
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
|
||||
"""Create a step for the task."""
|
||||
logger.debug(f"Creating a step for task with ID: {task_id}...")
|
||||
|
||||
# Restore Agent instance
|
||||
task = await self.get_task(task_id)
|
||||
agent = configure_agent_with_state(
|
||||
state=self.agent_manager.load_agent_state(task_agent_id(task_id)),
|
||||
app_config=self.app_config,
|
||||
file_storage=self.file_storage,
|
||||
llm_provider=self._get_task_llm_provider(task),
|
||||
)
|
||||
|
||||
if user_id := (task.additional_input or {}).get("user_id"):
|
||||
set_user({"id": user_id})
|
||||
|
||||
# According to the Agent Protocol spec, the first execute_step request contains
|
||||
# the same task input as the parent create_task request.
|
||||
# To prevent this from interfering with the agent's process, we ignore the input
|
||||
# of this first step request, and just generate the first step proposal.
|
||||
is_init_step = not bool(agent.event_history)
|
||||
last_proposal, tool_result = None, None
|
||||
execute_approved = False
|
||||
|
||||
# HACK: only for compatibility with AGBenchmark
|
||||
if step_request.input == "y":
|
||||
step_request.input = ""
|
||||
|
||||
user_input = step_request.input if not is_init_step else ""
|
||||
|
||||
if (
|
||||
not is_init_step
|
||||
and agent.event_history.current_episode
|
||||
and not agent.event_history.current_episode.result
|
||||
):
|
||||
last_proposal = agent.event_history.current_episode.action
|
||||
execute_approved = not user_input
|
||||
|
||||
logger.debug(
|
||||
f"Agent proposed command {last_proposal.use_tool}."
|
||||
f" User input/feedback: {repr(user_input)}"
|
||||
)
|
||||
|
||||
# Save step request
|
||||
step = await self.db.create_step(
|
||||
task_id=task_id,
|
||||
input=step_request,
|
||||
is_last=(
|
||||
last_proposal is not None
|
||||
and last_proposal.use_tool.name == FINISH_COMMAND
|
||||
and execute_approved
|
||||
),
|
||||
)
|
||||
agent.llm_provider = self._get_task_llm_provider(task, step.step_id)
|
||||
|
||||
# Execute previously proposed action
|
||||
if last_proposal:
|
||||
agent.file_manager.workspace.on_write_file = (
|
||||
lambda path: self._on_agent_write_file(
|
||||
task=task, step=step, relative_path=path
|
||||
)
|
||||
)
|
||||
|
||||
if last_proposal.use_tool.name == ASK_COMMAND:
|
||||
tool_result = ActionSuccessResult(outputs=user_input)
|
||||
agent.event_history.register_result(tool_result)
|
||||
elif execute_approved:
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="running",
|
||||
)
|
||||
|
||||
try:
|
||||
# Execute previously proposed action
|
||||
tool_result = await agent.execute(last_proposal)
|
||||
except AgentFinished:
|
||||
additional_output = {}
|
||||
task_total_cost = agent.llm_provider.get_incurred_cost()
|
||||
if task_total_cost > 0:
|
||||
additional_output["task_total_cost"] = task_total_cost
|
||||
logger.info(
|
||||
f"Total LLM cost for task {task_id}: "
|
||||
f"${round(task_total_cost, 2)}"
|
||||
)
|
||||
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
output=last_proposal.use_tool.arguments["reason"],
|
||||
additional_output=additional_output,
|
||||
)
|
||||
await agent.file_manager.save_state()
|
||||
return step
|
||||
else:
|
||||
assert user_input
|
||||
tool_result = await agent.do_not_execute(last_proposal, user_input)
|
||||
|
||||
# Propose next action
|
||||
try:
|
||||
assistant_response = await agent.propose_action()
|
||||
next_tool_to_use = assistant_response.use_tool
|
||||
logger.debug(f"AI output: {assistant_response.thoughts}")
|
||||
except Exception as e:
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="completed",
|
||||
output=f"An error occurred while proposing the next action: {e}",
|
||||
)
|
||||
return step
|
||||
|
||||
# Format step output
|
||||
output = (
|
||||
(
|
||||
f"`{last_proposal.use_tool}` returned:"
|
||||
+ ("\n\n" if "\n" in str(tool_result) else " ")
|
||||
+ f"{tool_result}\n\n"
|
||||
)
|
||||
if last_proposal and last_proposal.use_tool.name != ASK_COMMAND
|
||||
else ""
|
||||
)
|
||||
output += f"{assistant_response.thoughts.speak}\n\n"
|
||||
output += (
|
||||
f"Next Command: {next_tool_to_use}"
|
||||
if next_tool_to_use.name != ASK_COMMAND
|
||||
else next_tool_to_use.arguments["question"]
|
||||
)
|
||||
|
||||
additional_output = {
|
||||
**(
|
||||
{
|
||||
"last_action": {
|
||||
"name": last_proposal.use_tool.name,
|
||||
"args": last_proposal.use_tool.arguments,
|
||||
"result": (
|
||||
""
|
||||
if tool_result is None
|
||||
else (
|
||||
orjson.loads(tool_result.model_dump_json())
|
||||
if not isinstance(tool_result, ActionErrorResult)
|
||||
else {
|
||||
"error": str(tool_result.error),
|
||||
"reason": tool_result.reason,
|
||||
}
|
||||
)
|
||||
),
|
||||
},
|
||||
}
|
||||
if last_proposal and tool_result
|
||||
else {}
|
||||
),
|
||||
**assistant_response.model_dump(),
|
||||
}
|
||||
|
||||
task_cumulative_cost = agent.llm_provider.get_incurred_cost()
|
||||
if task_cumulative_cost > 0:
|
||||
additional_output["task_cumulative_cost"] = task_cumulative_cost
|
||||
logger.debug(
|
||||
f"Running total LLM cost for task {task_id}: "
|
||||
f"${round(task_cumulative_cost, 3)}"
|
||||
)
|
||||
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="completed",
|
||||
output=output,
|
||||
additional_output=additional_output,
|
||||
)
|
||||
|
||||
await agent.file_manager.save_state()
|
||||
return step
|
||||
|
||||
async def _on_agent_write_file(
|
||||
self, task: Task, step: Step, relative_path: pathlib.Path
|
||||
) -> None:
|
||||
"""
|
||||
Creates an Artifact for the written file, or updates the Artifact if it exists.
|
||||
"""
|
||||
if relative_path.is_absolute():
|
||||
raise ValueError(f"File path '{relative_path}' is not relative")
|
||||
for a in task.artifacts or []:
|
||||
if a.relative_path == str(relative_path):
|
||||
logger.debug(f"Updating Artifact after writing to existing file: {a}")
|
||||
if not a.agent_created:
|
||||
await self.db.update_artifact(a.artifact_id, agent_created=True)
|
||||
break
|
||||
else:
|
||||
logger.debug(f"Creating Artifact for new file '{relative_path}'")
|
||||
await self.db.create_artifact(
|
||||
task_id=step.task_id,
|
||||
step_id=step.step_id,
|
||||
file_name=relative_path.parts[-1],
|
||||
agent_created=True,
|
||||
relative_path=str(relative_path),
|
||||
)
|
||||
|
||||
async def get_step(self, task_id: str, step_id: str) -> Step:
|
||||
"""
|
||||
Get a step by ID.
|
||||
"""
|
||||
step = await self.db.get_step(task_id, step_id)
|
||||
return step
|
||||
|
||||
async def list_artifacts(
|
||||
self, task_id: str, page: int = 1, pageSize: int = 10
|
||||
) -> TaskArtifactsListResponse:
|
||||
"""
|
||||
List the artifacts that the task has created.
|
||||
"""
|
||||
artifacts, pagination = await self.db.list_artifacts(task_id, page, pageSize)
|
||||
return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination)
|
||||
|
||||
async def create_artifact(
|
||||
self, task_id: str, file: UploadFile, relative_path: str
|
||||
) -> Artifact:
|
||||
"""
|
||||
Create an artifact for the task.
|
||||
"""
|
||||
file_name = file.filename or str(uuid4())
|
||||
data = b""
|
||||
while contents := file.file.read(1024 * 1024):
|
||||
data += contents
|
||||
# Check if relative path ends with filename
|
||||
if relative_path.endswith(file_name):
|
||||
file_path = relative_path
|
||||
else:
|
||||
file_path = os.path.join(relative_path, file_name)
|
||||
|
||||
workspace = self._get_task_agent_file_workspace(task_id)
|
||||
await workspace.write_file(file_path, data)
|
||||
|
||||
artifact = await self.db.create_artifact(
|
||||
task_id=task_id,
|
||||
file_name=file_name,
|
||||
relative_path=relative_path,
|
||||
agent_created=False,
|
||||
)
|
||||
return artifact
|
||||
|
||||
async def get_artifact(self, task_id: str, artifact_id: str) -> StreamingResponse:
|
||||
"""
|
||||
Download a task artifact by ID.
|
||||
"""
|
||||
try:
|
||||
workspace = self._get_task_agent_file_workspace(task_id)
|
||||
artifact = await self.db.get_artifact(artifact_id)
|
||||
if artifact.file_name not in artifact.relative_path:
|
||||
file_path = os.path.join(artifact.relative_path, artifact.file_name)
|
||||
else:
|
||||
file_path = artifact.relative_path
|
||||
retrieved_artifact = workspace.read_file(file_path, binary=True)
|
||||
except NotFoundError:
|
||||
raise
|
||||
except FileNotFoundError:
|
||||
raise
|
||||
|
||||
return StreamingResponse(
|
||||
BytesIO(retrieved_artifact),
|
||||
media_type="application/octet-stream",
|
||||
headers={
|
||||
"Content-Disposition": f'attachment; filename="{artifact.file_name}"'
|
||||
},
|
||||
)
|
||||
|
||||
def _get_task_agent_file_workspace(self, task_id: str | int) -> FileStorage:
|
||||
agent_id = task_agent_id(task_id)
|
||||
return self.file_storage.clone_with_subroot(f"agents/{agent_id}/workspace")
|
||||
|
||||
def _get_task_llm_provider(self, task: Task, step_id: str = "") -> MultiProvider:
|
||||
"""
|
||||
Configures the LLM provider with headers to link outgoing requests to the task.
|
||||
"""
|
||||
task_llm_budget = self._task_budgets[task.task_id]
|
||||
|
||||
task_llm_provider_config = self.llm_provider._configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
_extra_request_headers = task_llm_provider_config.extra_request_headers
|
||||
_extra_request_headers["AP-TaskID"] = task.task_id
|
||||
if step_id:
|
||||
_extra_request_headers["AP-StepID"] = step_id
|
||||
if task.additional_input and (user_id := task.additional_input.get("user_id")):
|
||||
_extra_request_headers["AutoGPT-UserID"] = user_id
|
||||
|
||||
settings = self.llm_provider._settings.model_copy()
|
||||
settings.budget = task_llm_budget
|
||||
settings.configuration = task_llm_provider_config
|
||||
task_llm_provider = self.llm_provider.__class__(
|
||||
settings=settings,
|
||||
logger=logger.getChild(
|
||||
f"Task-{task.task_id}_{self.llm_provider.__class__.__name__}"
|
||||
),
|
||||
)
|
||||
self._task_budgets[task.task_id] = task_llm_provider._budget # type: ignore
|
||||
|
||||
return task_llm_provider
|
||||
|
||||
|
||||
def task_agent_id(task_id: str | int) -> str:
|
||||
return f"AutoGPT-{task_id}"
|
||||
216
autogpt/autogpt/app/cli.py
Normal file
216
autogpt/autogpt/app/cli.py
Normal file
@@ -0,0 +1,216 @@
|
||||
"""Main script for the autogpt package."""
|
||||
from logging import _nameToLevel as logLevelMap
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
from forge.logging.config import LogFormatName
|
||||
|
||||
from .telemetry import setup_telemetry
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.pass_context
|
||||
def cli(ctx: click.Context):
|
||||
setup_telemetry()
|
||||
|
||||
# Invoke `run` by default
|
||||
if ctx.invoked_subcommand is None:
|
||||
ctx.invoke(run)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
help="AI name override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-role",
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--constraint",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI constraints to include in the prompt;"
|
||||
" may be used multiple times to pass multiple constraints"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--resource",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI resources to include in the prompt;"
|
||||
" may be used multiple times to pass multiple resources"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--best-practice",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI best practices to include in the prompt;"
|
||||
" may be used multiple times to pass multiple best practices"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--override-directives",
|
||||
is_flag=True,
|
||||
help=(
|
||||
"If specified, --constraint, --resource and --best-practice will override"
|
||||
" the AI's directives instead of being appended to them"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug"
|
||||
)
|
||||
@click.option("--log-level", type=click.Choice([*logLevelMap.keys()]))
|
||||
@click.option(
|
||||
"--log-format",
|
||||
help=(
|
||||
"Choose a log format; defaults to 'simple'."
|
||||
" Also implies --log-file-format, unless it is specified explicitly."
|
||||
" Using the 'structured_google_cloud' format disables log file output."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--log-file-format",
|
||||
help=(
|
||||
"Override the format used for the log file output."
|
||||
" Defaults to the application's global --log-format."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--component-config-file",
|
||||
help="Path to a json configuration file",
|
||||
type=click.Path(exists=True, dir_okay=False, resolve_path=True, path_type=Path),
|
||||
)
|
||||
def run(
|
||||
continuous: bool,
|
||||
continuous_limit: Optional[int],
|
||||
speak: bool,
|
||||
install_plugin_deps: bool,
|
||||
skip_news: bool,
|
||||
skip_reprompt: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
resource: tuple[str],
|
||||
constraint: tuple[str],
|
||||
best_practice: tuple[str],
|
||||
override_directives: bool,
|
||||
debug: bool,
|
||||
log_level: Optional[str],
|
||||
log_format: Optional[str],
|
||||
log_file_format: Optional[str],
|
||||
component_config_file: Optional[Path],
|
||||
) -> None:
|
||||
"""
|
||||
Sets up and runs an agent, based on the task specified by the user, or resumes an
|
||||
existing agent.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt
|
||||
|
||||
run_auto_gpt(
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
skip_news=skip_news,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
override_ai_name=ai_name,
|
||||
override_ai_role=ai_role,
|
||||
resources=list(resource),
|
||||
constraints=list(constraint),
|
||||
best_practices=list(best_practice),
|
||||
override_directives=override_directives,
|
||||
component_config_file=component_config_file,
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug"
|
||||
)
|
||||
@click.option("--log-level", type=click.Choice([*logLevelMap.keys()]))
|
||||
@click.option(
|
||||
"--log-format",
|
||||
help=(
|
||||
"Choose a log format; defaults to 'simple'."
|
||||
" Also implies --log-file-format, unless it is specified explicitly."
|
||||
" Using the 'structured_google_cloud' format disables log file output."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--log-file-format",
|
||||
help=(
|
||||
"Override the format used for the log file output."
|
||||
" Defaults to the application's global --log-format."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
def serve(
|
||||
install_plugin_deps: bool,
|
||||
debug: bool,
|
||||
log_level: Optional[str],
|
||||
log_format: Optional[str],
|
||||
log_file_format: Optional[str],
|
||||
) -> None:
|
||||
"""
|
||||
Starts an Agent Protocol compliant AutoGPT server, which creates a custom agent for
|
||||
every task.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt_server
|
||||
|
||||
run_auto_gpt_server(
|
||||
debug=debug,
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
221
autogpt/autogpt/app/config.py
Normal file
221
autogpt/autogpt/app/config.py
Normal file
@@ -0,0 +1,221 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import forge
|
||||
from forge.config.base import BaseConfig
|
||||
from forge.llm.providers import CHAT_MODELS, ModelName
|
||||
from forge.llm.providers.openai import OpenAICredentials, OpenAIModelName
|
||||
from forge.logging.config import LoggingConfig
|
||||
from forge.models.config import Configurable, UserConfigurable
|
||||
from pydantic import SecretStr, ValidationInfo, field_validator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PROJECT_ROOT = Path(forge.__file__).parent.parent
|
||||
AZURE_CONFIG_FILE = Path("azure.yaml")
|
||||
|
||||
GPT_4_MODEL = OpenAIModelName.GPT4
|
||||
GPT_3_MODEL = OpenAIModelName.GPT3
|
||||
|
||||
|
||||
class AppConfig(BaseConfig):
|
||||
name: str = "Auto-GPT configuration"
|
||||
description: str = "Default configuration for the Auto-GPT application."
|
||||
|
||||
########################
|
||||
# Application Settings #
|
||||
########################
|
||||
project_root: Path = PROJECT_ROOT
|
||||
app_data_dir: Path = project_root / "data"
|
||||
skip_news: bool = False
|
||||
skip_reprompt: bool = False
|
||||
authorise_key: str = UserConfigurable(default="y", from_env="AUTHORISE_COMMAND_KEY")
|
||||
exit_key: str = UserConfigurable(default="n", from_env="EXIT_KEY")
|
||||
noninteractive_mode: bool = False
|
||||
logging: LoggingConfig = LoggingConfig()
|
||||
component_config_file: Optional[Path] = UserConfigurable(
|
||||
default=None, from_env="COMPONENT_CONFIG_FILE"
|
||||
)
|
||||
|
||||
##########################
|
||||
# Agent Control Settings #
|
||||
##########################
|
||||
# Model configuration
|
||||
fast_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT3,
|
||||
from_env="FAST_LLM",
|
||||
)
|
||||
smart_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT4_TURBO,
|
||||
from_env="SMART_LLM",
|
||||
)
|
||||
temperature: float = UserConfigurable(default=0, from_env="TEMPERATURE")
|
||||
openai_functions: bool = UserConfigurable(
|
||||
default=False, from_env=lambda: os.getenv("OPENAI_FUNCTIONS", "False") == "True"
|
||||
)
|
||||
embedding_model: str = UserConfigurable(
|
||||
default="text-embedding-3-small", from_env="EMBEDDING_MODEL"
|
||||
)
|
||||
|
||||
# Run loop configuration
|
||||
continuous_mode: bool = False
|
||||
continuous_limit: int = 0
|
||||
|
||||
############
|
||||
# Commands #
|
||||
############
|
||||
# General
|
||||
disabled_commands: list[str] = UserConfigurable(
|
||||
default_factory=list,
|
||||
from_env=lambda: _safe_split(os.getenv("DISABLED_COMMANDS")),
|
||||
)
|
||||
|
||||
# File ops
|
||||
restrict_to_workspace: bool = UserConfigurable(
|
||||
default=True,
|
||||
from_env=lambda: os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True",
|
||||
)
|
||||
|
||||
###############
|
||||
# Credentials #
|
||||
###############
|
||||
# OpenAI
|
||||
openai_credentials: Optional[OpenAICredentials] = None
|
||||
azure_config_file: Optional[Path] = UserConfigurable(
|
||||
default=AZURE_CONFIG_FILE, from_env="AZURE_CONFIG_FILE"
|
||||
)
|
||||
|
||||
@field_validator("openai_functions")
|
||||
def validate_openai_functions(cls, value: bool, info: ValidationInfo):
|
||||
if value:
|
||||
smart_llm = info.data["smart_llm"]
|
||||
assert CHAT_MODELS[smart_llm].has_function_call_api, (
|
||||
f"Model {smart_llm} does not support tool calling. "
|
||||
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
class ConfigBuilder(Configurable[AppConfig]):
|
||||
default_settings = AppConfig()
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> AppConfig:
|
||||
"""Initialize the Config class"""
|
||||
|
||||
config = cls.build_agent_configuration()
|
||||
config.project_root = project_root
|
||||
|
||||
# Make relative paths absolute
|
||||
for k in {
|
||||
"azure_config_file", # TODO: move from project root
|
||||
}:
|
||||
setattr(config, k, project_root / getattr(config, k))
|
||||
|
||||
if (
|
||||
config.openai_credentials
|
||||
and config.openai_credentials.api_type == SecretStr("azure")
|
||||
and (config_file := config.azure_config_file)
|
||||
):
|
||||
config.openai_credentials.load_azure_config(config_file)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
async def assert_config_has_required_llm_api_keys(config: AppConfig) -> None:
|
||||
"""
|
||||
Check if API keys (if required) are set for the configured SMART_LLM and FAST_LLM.
|
||||
"""
|
||||
from forge.llm.providers.anthropic import AnthropicModelName
|
||||
from forge.llm.providers.groq import GroqModelName
|
||||
from pydantic import ValidationError
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(AnthropicModelName):
|
||||
from forge.llm.providers.anthropic import AnthropicCredentials
|
||||
|
||||
try:
|
||||
credentials = AnthropicCredentials.from_env()
|
||||
except ValidationError as e:
|
||||
if "api_key" in str(e):
|
||||
logger.error(
|
||||
"Set your Anthropic API key in .env or as an environment variable"
|
||||
)
|
||||
logger.info(
|
||||
"For further instructions: "
|
||||
"https://docs.agpt.co/autogpt/setup/#anthropic"
|
||||
)
|
||||
|
||||
raise ValueError("Anthropic is unavailable: can't load credentials") from e
|
||||
|
||||
key_pattern = r"^sk-ant-api03-[\w\-]{95}"
|
||||
|
||||
# If key is set, but it looks invalid
|
||||
if not re.search(key_pattern, credentials.api_key.get_secret_value()):
|
||||
logger.warning(
|
||||
"Possibly invalid Anthropic API key! "
|
||||
f"Configured Anthropic API key does not match pattern '{key_pattern}'. "
|
||||
"If this is a valid key, please report this warning to the maintainers."
|
||||
)
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(GroqModelName):
|
||||
from forge.llm.providers.groq import GroqProvider
|
||||
from groq import AuthenticationError
|
||||
|
||||
try:
|
||||
groq = GroqProvider()
|
||||
await groq.get_available_models()
|
||||
except ValidationError as e:
|
||||
if "api_key" not in str(e):
|
||||
raise
|
||||
|
||||
logger.error("Set your Groq API key in .env or as an environment variable")
|
||||
logger.info(
|
||||
"For further instructions: https://docs.agpt.co/autogpt/setup/#groq"
|
||||
)
|
||||
raise ValueError("Groq is unavailable: can't load credentials")
|
||||
except AuthenticationError as e:
|
||||
logger.error("The Groq API key is invalid!")
|
||||
logger.info(
|
||||
"For instructions to get and set a new API key: "
|
||||
"https://docs.agpt.co/autogpt/setup/#groq"
|
||||
)
|
||||
raise ValueError("Groq is unavailable: invalid API key") from e
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(OpenAIModelName):
|
||||
from forge.llm.providers.openai import OpenAIProvider
|
||||
from openai import AuthenticationError
|
||||
|
||||
try:
|
||||
openai = OpenAIProvider()
|
||||
await openai.get_available_models()
|
||||
except ValidationError as e:
|
||||
if "api_key" not in str(e):
|
||||
raise
|
||||
|
||||
logger.error(
|
||||
"Set your OpenAI API key in .env or as an environment variable"
|
||||
)
|
||||
logger.info(
|
||||
"For further instructions: https://docs.agpt.co/autogpt/setup/#openai"
|
||||
)
|
||||
raise ValueError("OpenAI is unavailable: can't load credentials")
|
||||
except AuthenticationError as e:
|
||||
logger.error("The OpenAI API key is invalid!")
|
||||
logger.info(
|
||||
"For instructions to get and set a new API key: "
|
||||
"https://docs.agpt.co/autogpt/setup/#openai"
|
||||
)
|
||||
raise ValueError("OpenAI is unavailable: invalid API key") from e
|
||||
|
||||
|
||||
def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
|
||||
"""Split a string by a separator. Return an empty list if the string is None."""
|
||||
if s is None:
|
||||
return []
|
||||
return s.split(sep)
|
||||
83
autogpt/autogpt/app/configurator.py
Normal file
83
autogpt/autogpt/app/configurator.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Literal, Optional
|
||||
|
||||
import click
|
||||
from forge.llm.providers import ModelName, MultiProvider
|
||||
|
||||
from autogpt.app.config import GPT_3_MODEL, AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def apply_overrides_to_config(
|
||||
config: AppConfig,
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
skip_reprompt: bool = False,
|
||||
skip_news: bool = False,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
|
||||
Args:
|
||||
config (Config): The config object to update.
|
||||
continuous (bool): Whether to run in continuous mode.
|
||||
continuous_limit (int): The number of times to run in continuous mode.
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages on start.
|
||||
speak (bool): Whether to enable speak mode.
|
||||
debug (bool): Whether to enable debug mode.
|
||||
log_level (int): The global log level for the application.
|
||||
log_format (str): The format for the log(s).
|
||||
log_file_format (str): Override the format for the log file.
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup.
|
||||
"""
|
||||
config.continuous_mode = False
|
||||
|
||||
if continuous:
|
||||
logger.warning(
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may"
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
config.continuous_mode = True
|
||||
|
||||
if continuous_limit:
|
||||
config.continuous_limit = continuous_limit
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
# Check availability of configured LLMs; fallback to other LLM if unavailable
|
||||
config.fast_llm, config.smart_llm = await check_models(
|
||||
(config.fast_llm, "fast_llm"), (config.smart_llm, "smart_llm")
|
||||
)
|
||||
|
||||
if skip_reprompt:
|
||||
config.skip_reprompt = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
|
||||
|
||||
async def check_models(
|
||||
*models: tuple[ModelName, Literal["smart_llm", "fast_llm"]]
|
||||
) -> tuple[ModelName, ...]:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
multi_provider = MultiProvider()
|
||||
available_models = await multi_provider.get_available_chat_models()
|
||||
|
||||
checked_models: list[ModelName] = []
|
||||
for model, model_type in models:
|
||||
if any(model == m.name for m in available_models):
|
||||
checked_models.append(model)
|
||||
else:
|
||||
logger.warning(
|
||||
f"You don't have access to {model}. "
|
||||
f"Setting {model_type} to {GPT_3_MODEL}."
|
||||
)
|
||||
checked_models.append(GPT_3_MODEL)
|
||||
|
||||
return tuple(checked_models)
|
||||
19
autogpt/autogpt/app/input.py
Normal file
19
autogpt/autogpt/app/input.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import logging
|
||||
|
||||
import click
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def clean_input(prompt: str = ""):
|
||||
try:
|
||||
# ask for input, default when just pressing Enter is y
|
||||
logger.debug("Asking user via keyboard...")
|
||||
|
||||
return click.prompt(
|
||||
text=prompt, prompt_suffix=" ", default="", show_default=False
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("You interrupted AutoGPT")
|
||||
logger.info("Quitting...")
|
||||
exit(0)
|
||||
774
autogpt/autogpt/app/main.py
Normal file
774
autogpt/autogpt/app/main.py
Normal file
@@ -0,0 +1,774 @@
|
||||
"""
|
||||
The application entry point. Can be invoked by a CLI or any other front end application.
|
||||
"""
|
||||
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import FrameType
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from forge.agent_protocol.database import AgentDB
|
||||
from forge.components.code_executor.code_executor import (
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.logging.config import configure_logging
|
||||
from forge.logging.utils import print_attribute, speak
|
||||
from forge.models.action import ActionInterruptedByHuman, ActionProposal
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.const import FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentTerminated, InvalidAgentResponseError
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
from autogpt.app.config import (
|
||||
AppConfig,
|
||||
ConfigBuilder,
|
||||
assert_config_has_required_llm_api_keys,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
from .configurator import apply_overrides_to_config
|
||||
from .input import clean_input
|
||||
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
|
||||
from .spinner import Spinner
|
||||
from .utils import (
|
||||
coroutine,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
print_git_branch_info,
|
||||
print_motd,
|
||||
print_python_version_info,
|
||||
)
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt(
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
skip_reprompt: bool = False,
|
||||
speak: bool = False,
|
||||
debug: bool = False,
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
skip_news: bool = False,
|
||||
install_plugin_deps: bool = False,
|
||||
override_ai_name: Optional[str] = None,
|
||||
override_ai_role: Optional[str] = None,
|
||||
resources: Optional[list[str]] = None,
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
override_directives: bool = False,
|
||||
component_config_file: Optional[Path] = None,
|
||||
):
|
||||
# Set up configuration
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
# Storage
|
||||
local = config.file_storage_backend == FileStorageBackendName.LOCAL
|
||||
restrict_to_root = not local or config.restrict_to_workspace
|
||||
file_storage = get_storage(
|
||||
config.file_storage_backend,
|
||||
root_path=Path("data"),
|
||||
restrict_to_root=restrict_to_root,
|
||||
)
|
||||
file_storage.initialize()
|
||||
|
||||
# Set up logging module
|
||||
if speak:
|
||||
config.tts_config.speak_mode = True
|
||||
configure_logging(
|
||||
debug=debug,
|
||||
level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
config=config.logging,
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
skip_reprompt=skip_reprompt,
|
||||
skip_news=skip_news,
|
||||
)
|
||||
|
||||
llm_provider = _configure_llm_provider(config)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config.continuous_mode:
|
||||
for line in get_legal_warning().split("\n"):
|
||||
logger.warning(
|
||||
extra={
|
||||
"title": "LEGAL:",
|
||||
"title_color": Fore.RED,
|
||||
"preserve_color": True,
|
||||
},
|
||||
msg=markdown_to_ansi_style(line),
|
||||
)
|
||||
|
||||
if not config.skip_news:
|
||||
print_motd(logger)
|
||||
print_git_branch_info(logger)
|
||||
print_python_version_info(logger)
|
||||
print_attribute("Smart LLM", config.smart_llm)
|
||||
print_attribute("Fast LLM", config.fast_llm)
|
||||
if config.continuous_mode:
|
||||
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
|
||||
if continuous_limit:
|
||||
print_attribute("Continuous Limit", config.continuous_limit)
|
||||
if config.tts_config.speak_mode:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
if we_are_running_in_a_docker_container() or is_docker_available():
|
||||
print_attribute("Code Execution", "ENABLED")
|
||||
else:
|
||||
print_attribute(
|
||||
"Code Execution",
|
||||
"DISABLED (Docker unavailable)",
|
||||
title_color=Fore.YELLOW,
|
||||
)
|
||||
|
||||
# Let user choose an existing agent to run
|
||||
agent_manager = AgentManager(file_storage)
|
||||
existing_agents = agent_manager.list_agents()
|
||||
load_existing_agent = ""
|
||||
if existing_agents:
|
||||
print(
|
||||
"Existing agents\n---------------\n"
|
||||
+ "\n".join(f"{i} - {id}" for i, id in enumerate(existing_agents, 1))
|
||||
)
|
||||
load_existing_agent = clean_input(
|
||||
"Enter the number or name of the agent to run,"
|
||||
" or hit enter to create a new one:",
|
||||
)
|
||||
if re.match(r"^\d+$", load_existing_agent.strip()) and 0 < int(
|
||||
load_existing_agent
|
||||
) <= len(existing_agents):
|
||||
load_existing_agent = existing_agents[int(load_existing_agent) - 1]
|
||||
|
||||
if load_existing_agent != "" and load_existing_agent not in existing_agents:
|
||||
logger.info(
|
||||
f"Unknown agent '{load_existing_agent}', "
|
||||
f"creating a new one instead.",
|
||||
extra={"color": Fore.YELLOW},
|
||||
)
|
||||
load_existing_agent = ""
|
||||
|
||||
# Either load existing or set up new agent state
|
||||
agent = None
|
||||
agent_state = None
|
||||
|
||||
############################
|
||||
# Resume an Existing Agent #
|
||||
############################
|
||||
if load_existing_agent:
|
||||
agent_state = None
|
||||
while True:
|
||||
answer = clean_input("Resume? [Y/n]")
|
||||
if answer == "" or answer.lower() == "y":
|
||||
agent_state = agent_manager.load_agent_state(load_existing_agent)
|
||||
break
|
||||
elif answer.lower() == "n":
|
||||
break
|
||||
|
||||
if agent_state:
|
||||
agent = configure_agent_with_state(
|
||||
state=agent_state,
|
||||
app_config=config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=agent.state.ai_profile,
|
||||
directives=agent.state.directives,
|
||||
override_name=override_ai_name,
|
||||
override_role=override_ai_role,
|
||||
resources=resources,
|
||||
constraints=constraints,
|
||||
best_practices=best_practices,
|
||||
replace_directives=override_directives,
|
||||
)
|
||||
|
||||
if (
|
||||
(current_episode := agent.event_history.current_episode)
|
||||
and current_episode.action.use_tool.name == FINISH_COMMAND
|
||||
and not current_episode.result
|
||||
):
|
||||
# Agent was resumed after `finish` -> rewrite result of `finish` action
|
||||
finish_reason = current_episode.action.use_tool.arguments["reason"]
|
||||
print(f"Agent previously self-terminated; reason: '{finish_reason}'")
|
||||
new_assignment = clean_input(
|
||||
"Please give a follow-up question or assignment:"
|
||||
)
|
||||
agent.event_history.register_result(
|
||||
ActionInterruptedByHuman(feedback=new_assignment)
|
||||
)
|
||||
|
||||
# If any of these are specified as arguments,
|
||||
# assume the user doesn't want to revise them
|
||||
if not any(
|
||||
[
|
||||
override_ai_name,
|
||||
override_ai_role,
|
||||
resources,
|
||||
constraints,
|
||||
best_practices,
|
||||
]
|
||||
):
|
||||
ai_profile, ai_directives = await interactively_revise_ai_settings(
|
||||
ai_profile=agent.state.ai_profile,
|
||||
directives=agent.state.directives,
|
||||
app_config=config,
|
||||
)
|
||||
else:
|
||||
logger.info("AI config overrides specified through CLI; skipping revision")
|
||||
|
||||
######################
|
||||
# Set up a new Agent #
|
||||
######################
|
||||
if not agent:
|
||||
task = ""
|
||||
while task.strip() == "":
|
||||
task = clean_input(
|
||||
"Enter the task that you want AutoGPT to execute,"
|
||||
" with as much detail as possible:",
|
||||
)
|
||||
|
||||
ai_profile = AIProfile()
|
||||
additional_ai_directives = AIDirectives()
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
override_name=override_ai_name,
|
||||
override_role=override_ai_role,
|
||||
resources=resources,
|
||||
constraints=constraints,
|
||||
best_practices=best_practices,
|
||||
replace_directives=override_directives,
|
||||
)
|
||||
|
||||
# If any of these are specified as arguments,
|
||||
# assume the user doesn't want to revise them
|
||||
if not any(
|
||||
[
|
||||
override_ai_name,
|
||||
override_ai_role,
|
||||
resources,
|
||||
constraints,
|
||||
best_practices,
|
||||
]
|
||||
):
|
||||
(
|
||||
ai_profile,
|
||||
additional_ai_directives,
|
||||
) = await interactively_revise_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
app_config=config,
|
||||
)
|
||||
else:
|
||||
logger.info("AI config overrides specified through CLI; skipping revision")
|
||||
|
||||
agent = create_agent(
|
||||
agent_id=agent_manager.generate_id(ai_profile.ai_name),
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
app_config=config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
file_manager = agent.file_manager
|
||||
|
||||
if file_manager and not agent.config.allow_fs_access:
|
||||
logger.info(
|
||||
f"{Fore.YELLOW}"
|
||||
"NOTE: All files/directories created by this agent can be found "
|
||||
f"inside its workspace at:{Fore.RESET} {file_manager.workspace.root}",
|
||||
extra={"preserve_color": True},
|
||||
)
|
||||
|
||||
# TODO: re-evaluate performance benefit of task-oriented profiles
|
||||
# # Concurrently generate a custom profile for the agent and apply it once done
|
||||
# def update_agent_directives(
|
||||
# task: asyncio.Task[tuple[AIProfile, AIDirectives]]
|
||||
# ):
|
||||
# logger.debug(f"Updating AIProfile: {task.result()[0]}")
|
||||
# logger.debug(f"Adding AIDirectives: {task.result()[1]}")
|
||||
# agent.state.ai_profile = task.result()[0]
|
||||
# agent.state.directives = agent.state.directives + task.result()[1]
|
||||
|
||||
# asyncio.create_task(
|
||||
# generate_agent_profile_for_task(
|
||||
# task, app_config=config, llm_provider=llm_provider
|
||||
# )
|
||||
# ).add_done_callback(update_agent_directives)
|
||||
|
||||
# Load component configuration from file
|
||||
if _config_file := component_config_file or config.component_config_file:
|
||||
try:
|
||||
logger.info(f"Loading component configuration from {_config_file}")
|
||||
agent.load_component_configs(_config_file.read_text())
|
||||
except Exception as e:
|
||||
logger.error(f"Could not load component configuration: {e}")
|
||||
|
||||
#################
|
||||
# Run the Agent #
|
||||
#################
|
||||
try:
|
||||
await run_interaction_loop(agent)
|
||||
except AgentTerminated:
|
||||
agent_id = agent.state.agent_id
|
||||
logger.info(f"Saving state of {agent_id}...")
|
||||
|
||||
# Allow user to Save As other ID
|
||||
save_as_id = clean_input(
|
||||
f"Press enter to save as '{agent_id}',"
|
||||
" or enter a different ID to save to:",
|
||||
)
|
||||
# TODO: allow many-to-one relations of agents and workspaces
|
||||
await agent.file_manager.save_state(
|
||||
save_as_id.strip() if not save_as_id.isspace() else None
|
||||
)
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt_server(
|
||||
debug: bool = False,
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
install_plugin_deps: bool = False,
|
||||
):
|
||||
from .agent_protocol_server import AgentProtocolServer
|
||||
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
# Storage
|
||||
local = config.file_storage_backend == FileStorageBackendName.LOCAL
|
||||
restrict_to_root = not local or config.restrict_to_workspace
|
||||
file_storage = get_storage(
|
||||
config.file_storage_backend,
|
||||
root_path=Path("data"),
|
||||
restrict_to_root=restrict_to_root,
|
||||
)
|
||||
file_storage.initialize()
|
||||
|
||||
# Set up logging module
|
||||
configure_logging(
|
||||
debug=debug,
|
||||
level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
config=config.logging,
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
)
|
||||
|
||||
llm_provider = _configure_llm_provider(config)
|
||||
|
||||
# Set up & start server
|
||||
database = AgentDB(
|
||||
database_string=os.getenv("AP_SERVER_DB_URL", "sqlite:///data/ap_server.db"),
|
||||
debug_enabled=debug,
|
||||
)
|
||||
port: int = int(os.getenv("AP_SERVER_PORT", default=8000))
|
||||
server = AgentProtocolServer(
|
||||
app_config=config,
|
||||
database=database,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
await server.start(port=port)
|
||||
|
||||
logging.getLogger().info(
|
||||
f"Total OpenAI session cost: "
|
||||
f"${round(sum(b.total_cost for b in server._task_budgets.values()), 2)}"
|
||||
)
|
||||
|
||||
|
||||
def _configure_llm_provider(config: AppConfig) -> MultiProvider:
|
||||
multi_provider = MultiProvider()
|
||||
for model in [config.smart_llm, config.fast_llm]:
|
||||
# Ensure model providers for configured LLMs are available
|
||||
multi_provider.get_model_provider(model)
|
||||
return multi_provider
|
||||
|
||||
|
||||
def _get_cycle_budget(continuous_mode: bool, continuous_limit: int) -> int | float:
|
||||
# Translate from the continuous_mode/continuous_limit config
|
||||
# to a cycle_budget (maximum number of cycles to run without checking in with the
|
||||
# user) and a count of cycles_remaining before we check in..
|
||||
if continuous_mode:
|
||||
cycle_budget = continuous_limit if continuous_limit else math.inf
|
||||
else:
|
||||
cycle_budget = 1
|
||||
|
||||
return cycle_budget
|
||||
|
||||
|
||||
class UserFeedback(str, enum.Enum):
|
||||
"""Enum for user feedback."""
|
||||
|
||||
AUTHORIZE = "GENERATE NEXT COMMAND JSON"
|
||||
EXIT = "EXIT"
|
||||
TEXT = "TEXT"
|
||||
|
||||
|
||||
async def run_interaction_loop(
|
||||
agent: "Agent",
|
||||
) -> None:
|
||||
"""Run the main interaction loop for the agent.
|
||||
|
||||
Args:
|
||||
agent: The agent to run the interaction loop for.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
app_config = agent.app_config
|
||||
ai_profile = agent.state.ai_profile
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
app_config.continuous_mode, app_config.continuous_limit
|
||||
)
|
||||
spinner = Spinner(
|
||||
"Thinking...", plain_output=app_config.logging.plain_console_output
|
||||
)
|
||||
stop_reason = None
|
||||
|
||||
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
|
||||
nonlocal cycle_budget, cycles_remaining, spinner, stop_reason
|
||||
if stop_reason:
|
||||
logger.error("Quitting immediately...")
|
||||
sys.exit()
|
||||
if cycles_remaining in [0, 1]:
|
||||
logger.warning("Interrupt signal received: shutting down gracefully.")
|
||||
logger.warning(
|
||||
"Press Ctrl+C again if you want to stop AutoGPT immediately."
|
||||
)
|
||||
stop_reason = AgentTerminated("Interrupt signal received")
|
||||
else:
|
||||
restart_spinner = spinner.running
|
||||
if spinner.running:
|
||||
spinner.stop()
|
||||
|
||||
logger.error(
|
||||
"Interrupt signal received: stopping continuous command execution."
|
||||
)
|
||||
cycles_remaining = 1
|
||||
if restart_spinner:
|
||||
spinner.start()
|
||||
|
||||
def handle_stop_signal() -> None:
|
||||
if stop_reason:
|
||||
raise stop_reason
|
||||
|
||||
# Set up an interrupt signal for the agent.
|
||||
signal.signal(signal.SIGINT, graceful_agent_interrupt)
|
||||
|
||||
#########################
|
||||
# Application Main Loop #
|
||||
#########################
|
||||
|
||||
# Keep track of consecutive failures of the agent
|
||||
consecutive_failures = 0
|
||||
|
||||
while cycles_remaining > 0:
|
||||
logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}")
|
||||
|
||||
########
|
||||
# Plan #
|
||||
########
|
||||
handle_stop_signal()
|
||||
# Have the agent determine the next action to take.
|
||||
if not (_ep := agent.event_history.current_episode) or _ep.result:
|
||||
with spinner:
|
||||
try:
|
||||
action_proposal = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
logger.warning(f"The agent's thoughts could not be parsed: {e}")
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 3:
|
||||
logger.error(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row. Terminating..."
|
||||
)
|
||||
raise AgentTerminated(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row."
|
||||
)
|
||||
continue
|
||||
else:
|
||||
action_proposal = _ep.action
|
||||
|
||||
consecutive_failures = 0
|
||||
|
||||
###############
|
||||
# Update User #
|
||||
###############
|
||||
# Print the assistant's thoughts and the next command to the user.
|
||||
update_user(
|
||||
ai_profile,
|
||||
action_proposal,
|
||||
speak_mode=app_config.tts_config.speak_mode,
|
||||
)
|
||||
|
||||
##################
|
||||
# Get user input #
|
||||
##################
|
||||
handle_stop_signal()
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
feedback_type, feedback, new_cycles_remaining = await get_user_feedback(
|
||||
app_config,
|
||||
ai_profile,
|
||||
)
|
||||
|
||||
if feedback_type == UserFeedback.AUTHORIZE:
|
||||
if new_cycles_remaining is not None:
|
||||
# Case 1: User is altering the cycle budget.
|
||||
if cycle_budget > 1:
|
||||
cycle_budget = new_cycles_remaining + 1
|
||||
# Case 2: User is running iteratively and
|
||||
# has initiated a one-time continuous cycle
|
||||
cycles_remaining = new_cycles_remaining + 1
|
||||
else:
|
||||
# Case 1: Continuous iteration was interrupted -> resume
|
||||
if cycle_budget > 1:
|
||||
logger.info(
|
||||
f"The cycle budget is {cycle_budget}.",
|
||||
extra={
|
||||
"title": "RESUMING CONTINUOUS EXECUTION",
|
||||
"title_color": Fore.MAGENTA,
|
||||
},
|
||||
)
|
||||
# Case 2: The agent used up its cycle budget -> reset
|
||||
cycles_remaining = cycle_budget + 1
|
||||
logger.info(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
extra={"color": Fore.MAGENTA},
|
||||
)
|
||||
elif feedback_type == UserFeedback.EXIT:
|
||||
logger.warning("Exiting...")
|
||||
exit()
|
||||
else: # user_feedback == UserFeedback.TEXT
|
||||
pass
|
||||
else:
|
||||
feedback = ""
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
print()
|
||||
if cycles_remaining != math.inf:
|
||||
# Print authorized commands left value
|
||||
print_attribute(
|
||||
"AUTHORIZED_COMMANDS_LEFT", cycles_remaining, title_color=Fore.CYAN
|
||||
)
|
||||
|
||||
###################
|
||||
# Execute Command #
|
||||
###################
|
||||
# Decrement the cycle counter first to reduce the likelihood of a SIGINT
|
||||
# happening during command execution, setting the cycles remaining to 1,
|
||||
# and then having the decrement set it to 0, exiting the application.
|
||||
if not feedback:
|
||||
cycles_remaining -= 1
|
||||
|
||||
if not action_proposal.use_tool:
|
||||
continue
|
||||
|
||||
handle_stop_signal()
|
||||
|
||||
if not feedback:
|
||||
result = await agent.execute(action_proposal)
|
||||
else:
|
||||
result = await agent.do_not_execute(action_proposal, feedback)
|
||||
|
||||
if result.status == "success":
|
||||
logger.info(result, extra={"title": "SYSTEM:", "title_color": Fore.YELLOW})
|
||||
elif result.status == "error":
|
||||
logger.warning(
|
||||
f"Command {action_proposal.use_tool.name} returned an error: "
|
||||
f"{result.error or result.reason}"
|
||||
)
|
||||
|
||||
|
||||
def update_user(
|
||||
ai_profile: AIProfile,
|
||||
action_proposal: "ActionProposal",
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
"""Prints the assistant's thoughts and the next command to the user.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_profile: The AI's personality/profile
|
||||
command_name: The name of the command to execute.
|
||||
command_args: The arguments for the command.
|
||||
assistant_reply_dict: The assistant's reply.
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
print_assistant_thoughts(
|
||||
ai_name=ai_profile.ai_name,
|
||||
thoughts=action_proposal.thoughts,
|
||||
speak_mode=speak_mode,
|
||||
)
|
||||
|
||||
if speak_mode:
|
||||
speak(f"I want to execute {action_proposal.use_tool.name}")
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
print()
|
||||
safe_tool_name = remove_ansi_escape(action_proposal.use_tool.name)
|
||||
logger.info(
|
||||
f"COMMAND = {Fore.CYAN}{safe_tool_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{action_proposal.use_tool.arguments}{Style.RESET_ALL}",
|
||||
extra={
|
||||
"title": "NEXT ACTION:",
|
||||
"title_color": Fore.CYAN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def get_user_feedback(
|
||||
config: AppConfig,
|
||||
ai_profile: AIProfile,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_profile: The AI's configuration.
|
||||
|
||||
Returns:
|
||||
A tuple of the user's feedback, the user's input, and the number of
|
||||
cycles remaining if the user has initiated a continuous cycle.
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
logger.info(
|
||||
f"Enter '{config.authorise_key}' to authorise command, "
|
||||
f"'{config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{config.exit_key}' to exit program, or enter feedback for "
|
||||
f"{ai_profile.ai_name}..."
|
||||
)
|
||||
|
||||
user_feedback = None
|
||||
user_input = ""
|
||||
new_cycles_remaining = None
|
||||
|
||||
while user_feedback is None:
|
||||
# Get input from user
|
||||
console_input = clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
|
||||
# Parse user input
|
||||
if console_input.lower().strip() == config.authorise_key:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warning("Invalid input format.")
|
||||
elif console_input.lower().startswith(f"{config.authorise_key} -"):
|
||||
try:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
new_cycles_remaining = abs(int(console_input.split(" ")[1]))
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
f"Invalid input format. "
|
||||
f"Please enter '{config.authorise_key} -N'"
|
||||
" where N is the number of continuous tasks."
|
||||
)
|
||||
elif console_input.lower() in [config.exit_key, "exit"]:
|
||||
user_feedback = UserFeedback.EXIT
|
||||
else:
|
||||
user_feedback = UserFeedback.TEXT
|
||||
user_input = console_input
|
||||
|
||||
return user_feedback, user_input, new_cycles_remaining
|
||||
|
||||
|
||||
def print_assistant_thoughts(
|
||||
ai_name: str,
|
||||
thoughts: str | ModelWithSummary | AssistantThoughts,
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
thoughts_text = remove_ansi_escape(
|
||||
thoughts.text
|
||||
if isinstance(thoughts, AssistantThoughts)
|
||||
else thoughts.summary()
|
||||
if isinstance(thoughts, ModelWithSummary)
|
||||
else thoughts
|
||||
)
|
||||
print_attribute(
|
||||
f"{ai_name.upper()} THOUGHTS", thoughts_text, title_color=Fore.YELLOW
|
||||
)
|
||||
|
||||
if isinstance(thoughts, AssistantThoughts):
|
||||
print_attribute(
|
||||
"REASONING", remove_ansi_escape(thoughts.reasoning), title_color=Fore.YELLOW
|
||||
)
|
||||
if assistant_thoughts_plan := remove_ansi_escape(
|
||||
"\n".join(f"- {p}" for p in thoughts.plan)
|
||||
):
|
||||
print_attribute("PLAN", "", title_color=Fore.YELLOW)
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.info(
|
||||
line.strip(), extra={"title": "- ", "title_color": Fore.GREEN}
|
||||
)
|
||||
print_attribute(
|
||||
"CRITICISM",
|
||||
remove_ansi_escape(thoughts.self_criticism),
|
||||
title_color=Fore.YELLOW,
|
||||
)
|
||||
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak := remove_ansi_escape(thoughts.speak):
|
||||
if speak_mode:
|
||||
speak(assistant_thoughts_speak)
|
||||
else:
|
||||
print_attribute(
|
||||
"SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW
|
||||
)
|
||||
else:
|
||||
speak(thoughts_text)
|
||||
|
||||
|
||||
def remove_ansi_escape(s: str) -> str:
|
||||
return s.replace("\x1B", "")
|
||||
203
autogpt/autogpt/app/setup.py
Normal file
203
autogpt/autogpt/app/setup.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""Set up the AI and its goals"""
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.logging.utils import print_attribute
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
from .input import clean_input
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def apply_overrides_to_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
override_name: Optional[str] = "",
|
||||
override_role: Optional[str] = "",
|
||||
replace_directives: bool = False,
|
||||
resources: Optional[list[str]] = None,
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
):
|
||||
if override_name:
|
||||
ai_profile.ai_name = override_name
|
||||
if override_role:
|
||||
ai_profile.ai_role = override_role
|
||||
|
||||
if replace_directives:
|
||||
if resources:
|
||||
directives.resources = resources
|
||||
if constraints:
|
||||
directives.constraints = constraints
|
||||
if best_practices:
|
||||
directives.best_practices = best_practices
|
||||
else:
|
||||
if resources:
|
||||
directives.resources += resources
|
||||
if constraints:
|
||||
directives.constraints += constraints
|
||||
if best_practices:
|
||||
directives.best_practices += best_practices
|
||||
|
||||
|
||||
async def interactively_revise_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
"""Interactively revise the AI settings.
|
||||
|
||||
Args:
|
||||
ai_profile (AIConfig): The current AI profile.
|
||||
ai_directives (AIDirectives): The current AI directives.
|
||||
app_config (Config): The application configuration.
|
||||
|
||||
Returns:
|
||||
AIConfig: The revised AI settings.
|
||||
"""
|
||||
logger = logging.getLogger("revise_ai_profile")
|
||||
|
||||
revised = False
|
||||
|
||||
while True:
|
||||
# Print the current AI configuration
|
||||
print_ai_settings(
|
||||
title="Current AI Settings" if not revised else "Revised AI Settings",
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
if (
|
||||
clean_input("Continue with these settings? [Y/n]").lower()
|
||||
or app_config.authorise_key
|
||||
) == app_config.authorise_key:
|
||||
break
|
||||
|
||||
# Ask for revised ai_profile
|
||||
ai_profile.ai_name = (
|
||||
clean_input("Enter AI name (or press enter to keep current):")
|
||||
or ai_profile.ai_name
|
||||
)
|
||||
ai_profile.ai_role = (
|
||||
clean_input("Enter new AI role (or press enter to keep current):")
|
||||
or ai_profile.ai_role
|
||||
)
|
||||
|
||||
# Revise constraints
|
||||
i = 0
|
||||
while i < len(directives.constraints):
|
||||
constraint = directives.constraints[i]
|
||||
print_attribute(f"Constraint {i+1}:", f'"{constraint}"')
|
||||
new_constraint = (
|
||||
clean_input(
|
||||
f"Enter new constraint {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or constraint
|
||||
)
|
||||
|
||||
if new_constraint == "-":
|
||||
directives.constraints.remove(constraint)
|
||||
continue
|
||||
elif new_constraint:
|
||||
directives.constraints[i] = new_constraint
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new constraints
|
||||
while True:
|
||||
new_constraint = clean_input(
|
||||
"Press enter to finish, or enter a constraint to add:",
|
||||
)
|
||||
if not new_constraint:
|
||||
break
|
||||
directives.constraints.append(new_constraint)
|
||||
|
||||
# Revise resources
|
||||
i = 0
|
||||
while i < len(directives.resources):
|
||||
resource = directives.resources[i]
|
||||
print_attribute(f"Resource {i+1}:", f'"{resource}"')
|
||||
new_resource = (
|
||||
clean_input(
|
||||
f"Enter new resource {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or resource
|
||||
)
|
||||
if new_resource == "-":
|
||||
directives.resources.remove(resource)
|
||||
continue
|
||||
elif new_resource:
|
||||
directives.resources[i] = new_resource
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new resources
|
||||
while True:
|
||||
new_resource = clean_input(
|
||||
"Press enter to finish, or enter a resource to add:",
|
||||
)
|
||||
if not new_resource:
|
||||
break
|
||||
directives.resources.append(new_resource)
|
||||
|
||||
# Revise best practices
|
||||
i = 0
|
||||
while i < len(directives.best_practices):
|
||||
best_practice = directives.best_practices[i]
|
||||
print_attribute(f"Best Practice {i+1}:", f'"{best_practice}"')
|
||||
new_best_practice = (
|
||||
clean_input(
|
||||
f"Enter new best practice {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or best_practice
|
||||
)
|
||||
if new_best_practice == "-":
|
||||
directives.best_practices.remove(best_practice)
|
||||
continue
|
||||
elif new_best_practice:
|
||||
directives.best_practices[i] = new_best_practice
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new best practices
|
||||
while True:
|
||||
new_best_practice = clean_input(
|
||||
"Press enter to finish, or add a best practice to add:",
|
||||
)
|
||||
if not new_best_practice:
|
||||
break
|
||||
directives.best_practices.append(new_best_practice)
|
||||
|
||||
revised = True
|
||||
|
||||
return ai_profile, directives
|
||||
|
||||
|
||||
def print_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
logger: logging.Logger,
|
||||
title: str = "AI Settings",
|
||||
):
|
||||
print_attribute(title, "")
|
||||
print_attribute("-" * len(title), "")
|
||||
print_attribute("Name :", ai_profile.ai_name)
|
||||
print_attribute("Role :", ai_profile.ai_role)
|
||||
|
||||
print_attribute("Constraints:", "" if directives.constraints else "(none)")
|
||||
for constraint in directives.constraints:
|
||||
logger.info(f"- {constraint}")
|
||||
print_attribute("Resources:", "" if directives.resources else "(none)")
|
||||
for resource in directives.resources:
|
||||
logger.info(f"- {resource}")
|
||||
print_attribute("Best practices:", "" if directives.best_practices else "(none)")
|
||||
for best_practice in directives.best_practices:
|
||||
logger.info(f"- {best_practice}")
|
||||
70
autogpt/autogpt/app/spinner.py
Normal file
70
autogpt/autogpt/app/spinner.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""A simple spinner module"""
|
||||
import itertools
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
|
||||
class Spinner:
|
||||
"""A simple spinner class"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str = "Loading...",
|
||||
delay: float = 0.1,
|
||||
plain_output: bool = False,
|
||||
) -> None:
|
||||
"""Initialize the spinner class
|
||||
|
||||
Args:
|
||||
message (str): The message to display.
|
||||
delay (float): The delay between each spinner update.
|
||||
plain_output (bool): Whether to display the spinner or not.
|
||||
"""
|
||||
self.plain_output = plain_output
|
||||
self.spinner = itertools.cycle(["-", "/", "|", "\\"])
|
||||
self.delay = delay
|
||||
self.message = message
|
||||
self.running = False
|
||||
self.spinner_thread = None
|
||||
|
||||
def spin(self) -> None:
|
||||
"""Spin the spinner"""
|
||||
if self.plain_output:
|
||||
self.print_message()
|
||||
return
|
||||
while self.running:
|
||||
self.print_message()
|
||||
time.sleep(self.delay)
|
||||
|
||||
def print_message(self):
|
||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
||||
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
|
||||
sys.stdout.flush()
|
||||
|
||||
def start(self):
|
||||
self.running = True
|
||||
self.spinner_thread = threading.Thread(target=self.spin)
|
||||
self.spinner_thread.start()
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
if self.spinner_thread is not None:
|
||||
self.spinner_thread.join()
|
||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
||||
sys.stdout.flush()
|
||||
|
||||
def __enter__(self):
|
||||
"""Start the spinner"""
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
|
||||
"""Stop the spinner
|
||||
|
||||
Args:
|
||||
exc_type (Exception): The exception type.
|
||||
exc_value (Exception): The exception value.
|
||||
exc_traceback (Exception): The exception traceback.
|
||||
"""
|
||||
self.stop()
|
||||
64
autogpt/autogpt/app/telemetry.py
Normal file
64
autogpt/autogpt/app/telemetry.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import os
|
||||
|
||||
import click
|
||||
from colorama import Fore, Style
|
||||
|
||||
from .utils import (
|
||||
env_file_exists,
|
||||
get_git_user_email,
|
||||
set_env_config_value,
|
||||
vcs_state_diverges_from_master,
|
||||
)
|
||||
|
||||
|
||||
def setup_telemetry() -> None:
|
||||
if os.getenv("TELEMETRY_OPT_IN") is None:
|
||||
# If no .env file is present, don't bother asking to enable telemetry,
|
||||
# to prevent repeated asking in non-persistent environments.
|
||||
if not env_file_exists():
|
||||
return
|
||||
|
||||
allow_telemetry = click.prompt(
|
||||
f"""
|
||||
{Style.BRIGHT}❓ Do you want to enable telemetry? ❓{Style.NORMAL}
|
||||
This means AutoGPT will send diagnostic data to the core development team when something
|
||||
goes wrong, and will help us to diagnose and fix problems earlier and faster. It also
|
||||
allows us to collect basic performance data, which helps us find bottlenecks and other
|
||||
things that slow down the application.
|
||||
|
||||
By entering 'yes', you confirm that you have read and agree to our Privacy Policy,
|
||||
which is available here:
|
||||
https://www.notion.so/auto-gpt/Privacy-Policy-ab11c9c20dbd4de1a15dcffe84d77984
|
||||
|
||||
Please enter 'yes' or 'no'""",
|
||||
type=bool,
|
||||
)
|
||||
set_env_config_value("TELEMETRY_OPT_IN", "true" if allow_telemetry else "false")
|
||||
click.echo(
|
||||
f"❤️ Thank you! Telemetry is {Fore.GREEN}enabled{Fore.RESET}."
|
||||
if allow_telemetry
|
||||
else f"👍 Telemetry is {Fore.RED}disabled{Fore.RESET}."
|
||||
)
|
||||
click.echo(
|
||||
"💡 If you ever change your mind, you can change 'TELEMETRY_OPT_IN' in .env"
|
||||
)
|
||||
click.echo()
|
||||
|
||||
if os.getenv("TELEMETRY_OPT_IN", "").lower() == "true":
|
||||
_setup_sentry()
|
||||
|
||||
|
||||
def _setup_sentry() -> None:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.init(
|
||||
dsn="https://dc266f2f7a2381194d1c0fa36dff67d8@o4505260022104064.ingest.sentry.io/4506739844710400", # noqa
|
||||
enable_tracing=True,
|
||||
environment=os.getenv(
|
||||
"TELEMETRY_ENVIRONMENT",
|
||||
"production" if not vcs_state_diverges_from_master() else "dev",
|
||||
),
|
||||
)
|
||||
|
||||
# Allow Sentry to distinguish between users
|
||||
sentry_sdk.set_user({"email": get_git_user_email(), "ip_address": "{{auto}}"})
|
||||
247
autogpt/autogpt/app/utils.py
Normal file
247
autogpt/autogpt/app/utils.py
Normal file
@@ -0,0 +1,247 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Coroutine, ParamSpec, TypeVar, cast
|
||||
|
||||
import requests
|
||||
from colorama import Fore, Style
|
||||
from git import InvalidGitRepositoryError, Repo
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_bulletin_from_web():
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/autogpt/BULLETIN.md" # noqa: E501
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.text
|
||||
except requests.exceptions.RequestException:
|
||||
pass
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def get_current_git_branch() -> str:
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
branch = repo.active_branch
|
||||
return branch.name
|
||||
except InvalidGitRepositoryError:
|
||||
return ""
|
||||
|
||||
|
||||
def vcs_state_diverges_from_master() -> bool:
|
||||
"""
|
||||
Returns whether a git repo is present and contains changes that are not in `master`.
|
||||
"""
|
||||
paths_we_care_about = "autogpt/autogpt/**/*.py"
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
|
||||
# Check for uncommitted changes in the specified path
|
||||
uncommitted_changes = repo.index.diff(None, paths=paths_we_care_about)
|
||||
if uncommitted_changes:
|
||||
return True
|
||||
|
||||
# Find OG AutoGPT remote
|
||||
for remote in repo.remotes:
|
||||
if remote.url.endswith(
|
||||
tuple(
|
||||
# All permutations of old/new repo name and HTTP(S)/Git URLs
|
||||
f"{prefix}{path}"
|
||||
for prefix in ("://github.com/", "git@github.com:")
|
||||
for path in (
|
||||
f"Significant-Gravitas/{n}.git" for n in ("AutoGPT", "Auto-GPT")
|
||||
)
|
||||
)
|
||||
):
|
||||
og_remote = remote
|
||||
break
|
||||
else:
|
||||
# Original AutoGPT remote is not configured: assume local codebase diverges
|
||||
return True
|
||||
|
||||
master_branch = og_remote.refs.master
|
||||
with contextlib.suppress(StopIteration):
|
||||
next(repo.iter_commits(f"HEAD..{master_branch}", paths=paths_we_care_about))
|
||||
# Local repo is one or more commits ahead of OG AutoGPT master branch
|
||||
return True
|
||||
|
||||
# Relevant part of the codebase is on master
|
||||
return False
|
||||
except InvalidGitRepositoryError:
|
||||
# No git repo present: assume codebase is a clean download
|
||||
return False
|
||||
|
||||
|
||||
def get_git_user_email() -> str:
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
return cast(str, repo.config_reader().get_value("user", "email", default=""))
|
||||
except InvalidGitRepositoryError:
|
||||
return ""
|
||||
|
||||
|
||||
def get_latest_bulletin() -> tuple[str, bool]:
|
||||
exists = os.path.exists("data/CURRENT_BULLETIN.md")
|
||||
current_bulletin = ""
|
||||
if exists:
|
||||
current_bulletin = open(
|
||||
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
|
||||
).read()
|
||||
new_bulletin = get_bulletin_from_web()
|
||||
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
|
||||
|
||||
news_header = Fore.YELLOW + "Welcome to AutoGPT!\n"
|
||||
if new_bulletin or current_bulletin:
|
||||
news_header += (
|
||||
"Below you'll find the latest AutoGPT News and feature updates!\n"
|
||||
"If you don't wish to see this message, you "
|
||||
"can run AutoGPT with the *--skip-news* flag.\n"
|
||||
)
|
||||
|
||||
if new_bulletin and is_new_news:
|
||||
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
|
||||
|
||||
return f"{news_header}\n{current_bulletin}", is_new_news
|
||||
|
||||
|
||||
def markdown_to_ansi_style(markdown: str):
|
||||
ansi_lines: list[str] = []
|
||||
for line in markdown.split("\n"):
|
||||
line_style = ""
|
||||
|
||||
if line.startswith("# "):
|
||||
line_style += Style.BRIGHT
|
||||
else:
|
||||
line = re.sub(
|
||||
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
|
||||
rf"{Style.BRIGHT}\1{Style.NORMAL}",
|
||||
line,
|
||||
)
|
||||
|
||||
if re.match(r"^#+ ", line) is not None:
|
||||
line_style += Fore.CYAN
|
||||
line = re.sub(r"^#+ ", "", line)
|
||||
|
||||
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
|
||||
return "\n".join(ansi_lines)
|
||||
|
||||
|
||||
def get_legal_warning() -> str:
|
||||
legal_text = """
|
||||
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
|
||||
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
|
||||
|
||||
## Introduction
|
||||
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
|
||||
|
||||
## No Liability for Actions of the System
|
||||
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
|
||||
|
||||
## User Responsibility and Respondeat Superior Liability
|
||||
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
|
||||
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
|
||||
|
||||
## Indemnification
|
||||
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
|
||||
""" # noqa: E501
|
||||
return legal_text
|
||||
|
||||
|
||||
def print_motd(logger: logging.Logger):
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(
|
||||
extra={
|
||||
"title": "NEWS:",
|
||||
"title_color": Fore.GREEN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
msg=motd_line,
|
||||
)
|
||||
if is_new_motd:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
|
||||
def print_git_branch_info(logger: logging.Logger):
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "master":
|
||||
logger.warning(
|
||||
f"You are running on `{git_branch}` branch"
|
||||
" - this is not a supported branch."
|
||||
)
|
||||
|
||||
|
||||
def print_python_version_info(logger: logging.Logger):
|
||||
if sys.version_info < (3, 10):
|
||||
logger.error(
|
||||
"WARNING: You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of AutoGPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
|
||||
ENV_FILE_PATH = Path(__file__).parent.parent.parent / ".env"
|
||||
|
||||
|
||||
def env_file_exists() -> bool:
|
||||
return ENV_FILE_PATH.is_file()
|
||||
|
||||
|
||||
def set_env_config_value(key: str, value: str) -> None:
|
||||
"""Sets the specified env variable and updates it in .env as well"""
|
||||
os.environ[key] = value
|
||||
|
||||
with ENV_FILE_PATH.open("r+") as file:
|
||||
lines = file.readlines()
|
||||
file.seek(0)
|
||||
key_already_in_file = False
|
||||
for line in lines:
|
||||
if re.match(rf"^(?:# )?{key}=.*$", line):
|
||||
file.write(f"{key}={value}\n")
|
||||
key_already_in_file = True
|
||||
else:
|
||||
file.write(line)
|
||||
|
||||
if not key_already_in_file:
|
||||
file.write(f"{key}={value}\n")
|
||||
|
||||
file.truncate()
|
||||
|
||||
|
||||
def is_port_free(port: int, host: str = "127.0.0.1"):
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
try:
|
||||
s.bind((host, port)) # Try to bind to the port
|
||||
return True # If successful, the port is free
|
||||
except OSError:
|
||||
return False # If failed, the port is likely in use
|
||||
|
||||
|
||||
def coroutine(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]:
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs):
|
||||
return asyncio.run(f(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
7
autogpt/azure.yaml.template
Normal file
7
autogpt/azure.yaml.template
Normal file
@@ -0,0 +1,7 @@
|
||||
azure_api_type: azure
|
||||
azure_api_version: api-version-for-azure
|
||||
azure_endpoint: your-azure-openai-endpoint
|
||||
azure_model_map:
|
||||
gpt-3.5-turbo-0125: gpt35-deployment-id-for-azure
|
||||
gpt-4-turbo-preview: gpt4-deployment-id-for-azure
|
||||
text-embedding-3-small: embedding-deployment-id-for-azure
|
||||
18
autogpt/codecov.yml
Normal file
18
autogpt/codecov.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: auto
|
||||
threshold: 1%
|
||||
informational: true
|
||||
patch:
|
||||
default:
|
||||
target: 80%
|
||||
|
||||
## Please add this section once you've separated your coverage uploads for unit and integration tests
|
||||
#
|
||||
# flags:
|
||||
# unit-tests:
|
||||
# carryforward: true
|
||||
# integration-tests:
|
||||
# carryforward: true
|
||||
53
autogpt/docker-compose.yml
Normal file
53
autogpt/docker-compose.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
# To boot the app run the following:
|
||||
# docker compose run auto-gpt
|
||||
# NOTE: Version 3.9 requires at least Docker Compose version 2 and Docker Engine version 20.10.13!
|
||||
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: Dockerfile.autogpt
|
||||
env_file:
|
||||
- .env
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- ./:/app/autogpt/
|
||||
- ./docker-compose.yml:/app/docker-compose.yml:ro
|
||||
# - ./Dockerfile:/app/Dockerfile:ro
|
||||
profiles: ["exclude-from-up"]
|
||||
|
||||
# Only for TESTING purposes. Run with: docker compose run --build --rm autogpt-test
|
||||
autogpt-test:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: Dockerfile.autogpt
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
S3_ENDPOINT_URL: http://minio:9000
|
||||
AWS_ACCESS_KEY_ID: minio
|
||||
AWS_SECRET_ACCESS_KEY: minio123
|
||||
entrypoint: ["poetry", "run"]
|
||||
command: ["pytest", "-v"]
|
||||
volumes:
|
||||
- ./autogpt:/app/autogpt/autogpt
|
||||
- ./tests:/app/autogpt/tests
|
||||
depends_on:
|
||||
- minio
|
||||
profiles: ["exclude-from-up"]
|
||||
minio:
|
||||
image: minio/minio
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
ports:
|
||||
- 9000:9000
|
||||
volumes:
|
||||
- minio-data:/data
|
||||
command: server /data
|
||||
profiles: ["exclude-from-up"]
|
||||
volumes:
|
||||
minio-data:
|
||||
2
autogpt/hooks/post-checkout
Normal file
2
autogpt/hooks/post-checkout
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
git submodule update --init --remote --recursive
|
||||
4
autogpt/hooks/post-rewrite
Normal file
4
autogpt/hooks/post-rewrite
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
case "$1" in
|
||||
rebase) git submodule update --init --recursive ;;
|
||||
esac
|
||||
BIN
autogpt/plugin.png
Normal file
BIN
autogpt/plugin.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 33 KiB |
0
autogpt/plugins/.keep
Normal file
0
autogpt/plugins/.keep
Normal file
6761
autogpt/poetry.lock
generated
Normal file
6761
autogpt/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user