mirror of
https://github.com/FoxxMD/context-mod.git
synced 2026-01-14 07:57:57 -05:00
Compare commits
480 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd5a92758d | ||
|
|
027199d788 | ||
|
|
2a9f01b928 | ||
|
|
cf54502f0d | ||
|
|
2a3663ccc9 | ||
|
|
dc2eeffcb5 | ||
|
|
39daa11f2d | ||
|
|
93de38a845 | ||
|
|
43caaca1f2 | ||
|
|
7bcc0195fe | ||
|
|
dac6541e28 | ||
|
|
2504a34a34 | ||
|
|
e19639ad0d | ||
|
|
b8084e02b5 | ||
|
|
97906281e6 | ||
|
|
2cea119657 | ||
|
|
6f16d289dd | ||
|
|
a96575c6b3 | ||
|
|
0a82e83352 | ||
|
|
d5e1cdec61 | ||
|
|
ef40c25b09 | ||
|
|
6370a2976a | ||
|
|
d8180299ea | ||
|
|
ac409dce3d | ||
|
|
56c007c20d | ||
|
|
487f13f704 | ||
|
|
00b9d87cdc | ||
|
|
2c797e0b9b | ||
|
|
4a2b27bfbf | ||
|
|
463a4dc0eb | ||
|
|
4b3bea661d | ||
|
|
976f310f51 | ||
|
|
4d8d3dc266 | ||
|
|
ce9e678c4c | ||
|
|
8cf30b6b7d | ||
|
|
2b6d08f8a5 | ||
|
|
f8fc63991f | ||
|
|
d96a1f677c | ||
|
|
b14689791c | ||
|
|
b70c877e44 | ||
|
|
041655376a | ||
|
|
e1eab7696b | ||
|
|
65d1d36d53 | ||
|
|
120d776fc2 | ||
|
|
425e16295b | ||
|
|
dd7e9d72cc | ||
|
|
55535ddd62 | ||
|
|
631e21452c | ||
|
|
be6fa4dd50 | ||
|
|
0d7a82836f | ||
|
|
d9a59b6824 | ||
|
|
ddbf8c3189 | ||
|
|
8393c471b2 | ||
|
|
fe66a2e8f7 | ||
|
|
4b0284102d | ||
|
|
95529f14a8 | ||
|
|
26af2c4e4d | ||
|
|
044c293f34 | ||
|
|
a082c9e593 | ||
|
|
4f3685a1f5 | ||
|
|
e242c36c09 | ||
|
|
d2d945db2c | ||
|
|
c5018183e0 | ||
|
|
c5358f196d | ||
|
|
1d9f8245f9 | ||
|
|
20b37f3a40 | ||
|
|
910f7f79ef | ||
|
|
641892cd3e | ||
|
|
1dfb9779e7 | ||
|
|
40111c54a2 | ||
|
|
b4745e3b45 | ||
|
|
838da497ce | ||
|
|
01755eada5 | ||
|
|
1ff59ad6e8 | ||
|
|
d8fd8e6140 | ||
|
|
255ffdb417 | ||
|
|
f0199366a0 | ||
|
|
20c724cab5 | ||
|
|
a670975f14 | ||
|
|
ee13feaf57 | ||
|
|
23a24b4448 | ||
|
|
a11b667d5e | ||
|
|
269b1620b9 | ||
|
|
6dee734440 | ||
|
|
3aea422eff | ||
|
|
e707e5a9a8 | ||
|
|
2a24eea3a5 | ||
|
|
8ad8297c0e | ||
|
|
0b94a14ac1 | ||
|
|
a04e0d2a9b | ||
|
|
3a1348c370 | ||
|
|
507818037f | ||
|
|
2c1f6daf4f | ||
|
|
fef79472fe | ||
|
|
885e3fa765 | ||
|
|
0b2c0e6451 | ||
|
|
15806b5f1f | ||
|
|
bf42cdf356 | ||
|
|
e21acd86db | ||
|
|
5dca1c9602 | ||
|
|
5274584d92 | ||
|
|
1d386c53a5 | ||
|
|
d6e351b195 | ||
|
|
ea32dc0b62 | ||
|
|
dca57bb19e | ||
|
|
43919f7f9c | ||
|
|
a176b51148 | ||
|
|
75ac5297df | ||
|
|
0ef2b99bd6 | ||
|
|
9596a476b5 | ||
|
|
92f52cada5 | ||
|
|
a482e852c5 | ||
|
|
e9055e5205 | ||
|
|
df2c40d9c1 | ||
|
|
fc4eeb47fa | ||
|
|
9fb3eaa611 | ||
|
|
23394ab5c2 | ||
|
|
5417b26417 | ||
|
|
b6d638d6c5 | ||
|
|
af1dd09e2d | ||
|
|
c42e56c68f | ||
|
|
561a007850 | ||
|
|
465c3c9acf | ||
|
|
6cee8691f5 | ||
|
|
cfb228de73 | ||
|
|
82a1a393de | ||
|
|
2fd1ffed19 | ||
|
|
7b00e1c54b | ||
|
|
bb2c5f076c | ||
|
|
8a9212def2 | ||
|
|
a9a5bd0066 | ||
|
|
f27b4a03e9 | ||
|
|
ce87285283 | ||
|
|
220c6cdd8b | ||
|
|
17440025b9 | ||
|
|
2655ae6041 | ||
|
|
a5d7b473a0 | ||
|
|
67a04c6cc6 | ||
|
|
c687ddbe57 | ||
|
|
980ff7da02 | ||
|
|
0f84a7cf6b | ||
|
|
51a93439bb | ||
|
|
18f115987b | ||
|
|
34faf56d5d | ||
|
|
d09a2df1e0 | ||
|
|
5349171913 | ||
|
|
e283d81fdf | ||
|
|
a606d6558c | ||
|
|
cc058388d0 | ||
|
|
4bbd170c1d | ||
|
|
c817716aa1 | ||
|
|
33f9b4a091 | ||
|
|
8d8e4405e0 | ||
|
|
ee302ee430 | ||
|
|
acbac54903 | ||
|
|
3858070cee | ||
|
|
ac5ace1f61 | ||
|
|
3d79a9217a | ||
|
|
4b6261517c | ||
|
|
d1960c68bb | ||
|
|
a8cc40e95d | ||
|
|
5c76f9ab1c | ||
|
|
a5d3c809aa | ||
|
|
3b905e6961 | ||
|
|
707547effc | ||
|
|
6b02350d96 | ||
|
|
7ff8094156 | ||
|
|
82c673c8a6 | ||
|
|
7f742d3a30 | ||
|
|
2442fc2483 | ||
|
|
e762cc29ef | ||
|
|
88db6767eb | ||
|
|
161251a943 | ||
|
|
6e4b1b68e3 | ||
|
|
a6212897b3 | ||
|
|
7b8a89e918 | ||
|
|
efd31c5f21 | ||
|
|
868bac9f1a | ||
|
|
adf18cc7ee | ||
|
|
3f1d1bc6d0 | ||
|
|
ce4cb96d9a | ||
|
|
4457e3957d | ||
|
|
c317f95953 | ||
|
|
2eda6c5fe1 | ||
|
|
1108216a50 | ||
|
|
b9215e944a | ||
|
|
a976171e3a | ||
|
|
b773afbe38 | ||
|
|
045e2c1d33 | ||
|
|
ad45f75267 | ||
|
|
643790d3bd | ||
|
|
a531d7e4e0 | ||
|
|
be065f919c | ||
|
|
8d5d44bf0d | ||
|
|
bbd8a6633e | ||
|
|
038e5d086b | ||
|
|
5422b181c0 | ||
|
|
d0e0515990 | ||
|
|
931dfa67fd | ||
|
|
af1ea5543e | ||
|
|
fd7a6edeb6 | ||
|
|
0a3409cfef | ||
|
|
89b2932495 | ||
|
|
3a05e43ce9 | ||
|
|
8b1d3cb170 | ||
|
|
90df5f45a8 | ||
|
|
ba4b4a69a7 | ||
|
|
e3d4ffa36d | ||
|
|
cdddd8de48 | ||
|
|
7f1429395c | ||
|
|
f598215d88 | ||
|
|
c92e6775cb | ||
|
|
2a5f812dba | ||
|
|
54905da782 | ||
|
|
5f30dd8ce9 | ||
|
|
547f57b99f | ||
|
|
bf336ca55a | ||
|
|
4716ac8c0a | ||
|
|
79a518edbc | ||
|
|
b72a3fea7f | ||
|
|
58603f17f4 | ||
|
|
99b5a01835 | ||
|
|
fd41c23128 | ||
|
|
3230c4b30b | ||
|
|
38507c8990 | ||
|
|
136098354b | ||
|
|
29fc9a3a2d | ||
|
|
0c7218571c | ||
|
|
fd4c2a38e7 | ||
|
|
f89dca5d77 | ||
|
|
acc7c49e0e | ||
|
|
7175965e3d | ||
|
|
3ec7d3530d | ||
|
|
01839512d5 | ||
|
|
d37958e5c8 | ||
|
|
bfbbb3466a | ||
|
|
775613374b | ||
|
|
44c8bd9a6a | ||
|
|
45e61b8bc7 | ||
|
|
4680640b0c | ||
|
|
897802b234 | ||
|
|
82b353c6d9 | ||
|
|
254d2ca896 | ||
|
|
5a531f0122 | ||
|
|
0afd87ab1b | ||
|
|
c1ab3b11f4 | ||
|
|
222fe0aeac | ||
|
|
ceb98d04bb | ||
|
|
b813ebdd96 | ||
|
|
4865259ae8 | ||
|
|
2616439f5f | ||
|
|
0eddac35fa | ||
|
|
fb3047ca82 | ||
|
|
193ecfba2f | ||
|
|
ef3475e519 | ||
|
|
a003e18360 | ||
|
|
6b6124d76e | ||
|
|
e4f18e8f06 | ||
|
|
24963ec333 | ||
|
|
2ab6ee3715 | ||
|
|
802884f686 | ||
|
|
67ed8ab4ee | ||
|
|
1e6d61ac31 | ||
|
|
7cda47183f | ||
|
|
a9edd4b998 | ||
|
|
9e1d5f1dd4 | ||
|
|
4617e06459 | ||
|
|
b2b4988246 | ||
|
|
e1c24133dd | ||
|
|
12a4e0436e | ||
|
|
484931d8b5 | ||
|
|
abf2674f80 | ||
|
|
1f3cfbeef9 | ||
|
|
2b21885a55 | ||
|
|
232925e691 | ||
|
|
a91b9ab146 | ||
|
|
73c3052c69 | ||
|
|
4fbb3edf8b | ||
|
|
c69d66c001 | ||
|
|
9b53974152 | ||
|
|
13d3ed2314 | ||
|
|
9d7505fa38 | ||
|
|
1b94316987 | ||
|
|
9316019b01 | ||
|
|
4642f67104 | ||
|
|
a78692d7a0 | ||
|
|
3627a5f60a | ||
|
|
6b04ea0a9d | ||
|
|
f6217547ae | ||
|
|
f1b24eb4a2 | ||
|
|
c9bdae66dd | ||
|
|
6ab162888b | ||
|
|
dd202ac790 | ||
|
|
eead88c9a7 | ||
|
|
f548be8060 | ||
|
|
5df4fd5ccc | ||
|
|
b25001b7af | ||
|
|
8733717cda | ||
|
|
6167d45e49 | ||
|
|
694842226b | ||
|
|
02ad661226 | ||
|
|
3be62f5560 | ||
|
|
eb84df5133 | ||
|
|
4b11e348ba | ||
|
|
9edacf29fa | ||
|
|
56c13474d9 | ||
|
|
66a4144b7b | ||
|
|
16880775fb | ||
|
|
d69d0e972c | ||
|
|
a9350c2828 | ||
|
|
2fe06f21d9 | ||
|
|
42d71a918f | ||
|
|
0aa2b24f39 | ||
|
|
4771efa32a | ||
|
|
1d9f4f32b8 | ||
|
|
d84e6f1905 | ||
|
|
ae19d1c9a1 | ||
|
|
f9c7cf433e | ||
|
|
2917233728 | ||
|
|
6dfb5823ba | ||
|
|
14e7275f64 | ||
|
|
1fbe6b708d | ||
|
|
495213bba9 | ||
|
|
15e031d448 | ||
|
|
6994bbe770 | ||
|
|
a3c923bda6 | ||
|
|
a40c4c5e58 | ||
|
|
be9dcdee1c | ||
|
|
07b34caffb | ||
|
|
c5a3404242 | ||
|
|
1e03b38f0a | ||
|
|
f64be77e70 | ||
|
|
a3da77874b | ||
|
|
a9f740c9fa | ||
|
|
00e6346cdb | ||
|
|
951359ac39 | ||
|
|
15824e5d0f | ||
|
|
e7c794ec85 | ||
|
|
70e426de7e | ||
|
|
cc2518d086 | ||
|
|
5517c75d4c | ||
|
|
8e2fee6d50 | ||
|
|
ed8be6dda2 | ||
|
|
00e38b5560 | ||
|
|
9cac11f436 | ||
|
|
f591c3a05a | ||
|
|
39fad91c7f | ||
|
|
529b8fc03e | ||
|
|
54eef5620d | ||
|
|
99537fbebb | ||
|
|
4c3f9ee082 | ||
|
|
5b028b6a45 | ||
|
|
859bcf9213 | ||
|
|
e790f7c260 | ||
|
|
20358294ce | ||
|
|
e0f18dc0a2 | ||
|
|
9a788a8323 | ||
|
|
bed9a9682a | ||
|
|
d39ce13209 | ||
|
|
4bd25e53b0 | ||
|
|
ac87d5acfa | ||
|
|
0f541f1961 | ||
|
|
db2be949b4 | ||
|
|
8c6b18cf4d | ||
|
|
add4204304 | ||
|
|
927d4ef07e | ||
|
|
b8c12009ee | ||
|
|
7f9b4ce6a0 | ||
|
|
ad8a668a08 | ||
|
|
84c5e97c92 | ||
|
|
03b2cb36ab | ||
|
|
93bdb89115 | ||
|
|
702e2ccccf | ||
|
|
631d67928d | ||
|
|
eea04344c0 | ||
|
|
7f29ade87b | ||
|
|
cced86381b | ||
|
|
01c575f2b2 | ||
|
|
f1d04d4718 | ||
|
|
6ca65079b3 | ||
|
|
73236e44ad | ||
|
|
4bef85e1e4 | ||
|
|
532f6aa3d8 | ||
|
|
e1e5b26264 | ||
|
|
46a583e20a | ||
|
|
24064dfe03 | ||
|
|
ad91901cc2 | ||
|
|
58c51e56b1 | ||
|
|
9850ccb8f3 | ||
|
|
79b82dab0f | ||
|
|
9c059beb85 | ||
|
|
88be7d8836 | ||
|
|
20acc12460 | ||
|
|
60c0569e21 | ||
|
|
879807390d | ||
|
|
08413dbe16 | ||
|
|
75cbde8b8b | ||
|
|
3acf268313 | ||
|
|
97b9391f3b | ||
|
|
f8ec0d7ee0 | ||
|
|
0002c1bc11 | ||
|
|
a09f3fe4f1 | ||
|
|
daf66083d0 | ||
|
|
7acd62d787 | ||
|
|
75889cc927 | ||
|
|
db0440356c | ||
|
|
016952128c | ||
|
|
884966b8d3 | ||
|
|
0ad7c66e9d | ||
|
|
c075e5fb24 | ||
|
|
a3de885620 | ||
|
|
e29d19ada8 | ||
|
|
c52e1d5e1d | ||
|
|
257563a3b8 | ||
|
|
7761372091 | ||
|
|
eb62e39975 | ||
|
|
bdd72dc28e | ||
|
|
e7b5a9bb60 | ||
|
|
699f2577e5 | ||
|
|
a22096a667 | ||
|
|
a6e72dc79d | ||
|
|
962e44bf57 | ||
|
|
2189d92725 | ||
|
|
14711efeb3 | ||
|
|
774b41c2a3 | ||
|
|
4928b8b57a | ||
|
|
4b39794e2f | ||
|
|
c0ede0561c | ||
|
|
d7cea1f705 | ||
|
|
3e29d7eb9f | ||
|
|
48ea60f886 | ||
|
|
1897d96a8f | ||
|
|
1279975a8a | ||
|
|
7d0f7e8714 | ||
|
|
f6b3f02e05 | ||
|
|
1f439dc290 | ||
|
|
0a299308fb | ||
|
|
a84b39cc5a | ||
|
|
f64c6e0df5 | ||
|
|
250313b6a8 | ||
|
|
e4be9ed4e6 | ||
|
|
788af401b3 | ||
|
|
6bc74b383f | ||
|
|
51825a594d | ||
|
|
186d9ac4b7 | ||
|
|
ac02fdabfd | ||
|
|
0eeb204371 | ||
|
|
64a97ee048 | ||
|
|
318a1d3326 | ||
|
|
08db50426b | ||
|
|
77f7a0167c | ||
|
|
23a9f9d652 | ||
|
|
72ed72ce4a | ||
|
|
8cea19c7f2 | ||
|
|
8eeaac2d53 | ||
|
|
3cf838ba9f | ||
|
|
16f3c2268b | ||
|
|
3be20b910d | ||
|
|
78aed4321a | ||
|
|
0fe2fa8934 | ||
|
|
37ba1dc1bf | ||
|
|
5905c910b0 | ||
|
|
d239d3c6cc | ||
|
|
16d0eebac6 | ||
|
|
1a393944c0 | ||
|
|
9f270010b7 | ||
|
|
2548cff367 | ||
|
|
c7acda46a0 | ||
|
|
530675179b | ||
|
|
7960423678 | ||
|
|
4ddb0f0963 | ||
|
|
8a54ce15cd | ||
|
|
01161c3493 | ||
|
|
9970156a3d | ||
|
|
b437156d99 | ||
|
|
de3a279dc3 | ||
|
|
86a6a75119 | ||
|
|
9634b59b3a | ||
|
|
37f7c99155 | ||
|
|
a99ab9a64a |
@@ -4,3 +4,5 @@ Dockerfile
|
||||
.gitignore
|
||||
.git
|
||||
src/logs
|
||||
/docs
|
||||
.github
|
||||
|
||||
49
.github/workflows/dockerhub.yml
vendored
Normal file
49
.github/workflows/dockerhub.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Publish Docker image to Dockerhub
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'edge'
|
||||
tags:
|
||||
- '*.*.*'
|
||||
# don't trigger if just updating docs
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
|
||||
jobs:
|
||||
push_to_registry:
|
||||
name: Push Docker image to Docker Hub
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: foxxmd/context-mod
|
||||
# generate Docker tags based on the following events/attributes
|
||||
tags: |
|
||||
type=raw,value=latest,enable=${{ endsWith(github.ref, 'master') }}
|
||||
type=ref,event=branch,enable=${{ !endsWith(github.ref, 'master') }}
|
||||
type=semver,pattern={{version}}
|
||||
flavor: |
|
||||
latest=false
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
2
.idea/redditcontextbot.iml
generated
2
.idea/redditcontextbot.iml
generated
@@ -5,7 +5,9 @@
|
||||
<excludeFolder url="file://$MODULE_DIR$/temp" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/.tmp" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/tmp" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/src/logs" />
|
||||
</content>
|
||||
<content url="file://$MODULE_DIR$/node_modules" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -1,8 +1,10 @@
|
||||
FROM node:16-alpine3.12
|
||||
FROM node:16-alpine3.14
|
||||
|
||||
ENV TZ=Etc/GMT
|
||||
|
||||
RUN apk update
|
||||
# vips required to run sharp library for image comparison
|
||||
RUN echo "http://dl-4.alpinelinux.org/alpine/v3.14/community" >> /etc/apk/repositories \
|
||||
&& apk --update add vips
|
||||
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
@@ -24,4 +26,8 @@ RUN mkdir -p $log_dir
|
||||
VOLUME $log_dir
|
||||
ENV LOG_DIR=$log_dir
|
||||
|
||||
CMD [ "node", "src/index.js" ]
|
||||
ARG webPort=8085
|
||||
ENV PORT=$webPort
|
||||
EXPOSE $PORT
|
||||
|
||||
CMD [ "node", "src/index.js", "run" ]
|
||||
|
||||
271
README.md
271
README.md
@@ -1,10 +1,8 @@
|
||||
# reddit-context-bot
|
||||
|
||||
[](https://github.com/FoxxMD/reddit-context-bot/releases)
|
||||
[](https://github.com/FoxxMD/context-mod/releases)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://hub.docker.com/r/foxxmd/reddit-context-bot)
|
||||
[](https://hub.docker.com/r/foxxmd/context-mod)
|
||||
|
||||
**Context Bot** is an event-based, [reddit](https://reddit.com) moderation bot built on top of [snoowrap](https://github.com/not-an-aardvark/snoowrap) and written in [typescript](https://www.typescriptlang.org/).
|
||||
**Context Mod** (CM) is an event-based, [reddit](https://reddit.com) moderation bot built on top of [snoowrap](https://github.com/not-an-aardvark/snoowrap) and written in [typescript](https://www.typescriptlang.org/).
|
||||
|
||||
It is designed to help fill in the gaps for [automoderator](https://www.reddit.com/wiki/automoderator/full-documentation) in regard to more complex behavior with a focus on **user-history based moderation.**
|
||||
|
||||
@@ -17,24 +15,36 @@ An example of the above that Context Bot can do now:
|
||||
|
||||
Some feature highlights:
|
||||
* Simple rule-action behavior can be combined to create any level of complexity in behavior
|
||||
* One instance can handle managing many subreddits (as many as it has moderator permissions in!)
|
||||
* Per-subreddit configuration is handled by JSON stored in the subreddit wiki
|
||||
* Any text-based actions (comment, submission, message, etc...) can be configured via a wiki page or raw text in JSON
|
||||
* All text-based actions support [mustache](https://mustache.github.io) templating
|
||||
* Server/client architecture
|
||||
* Default/no configuration runs "All In One" behavior
|
||||
* Additional configuration allows web interface to connect to multiple servers
|
||||
* Each server instance can run multiple reddit accounts as bots
|
||||
* **Per-subreddit configuration** is handled by JSON stored in the subreddit wiki
|
||||
* Any text-based actions (comment, submission, message, usernotes, ban, etc...) can be configured via a wiki page or raw text in JSON and support [mustache](https://mustache.github.io) [templating](/docs/actionTemplating.md)
|
||||
* History-based rules support multiple "valid window" types -- [ISO 8601 Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations), [Day.js Durations](https://day.js.org/docs/en/durations/creating), and submission/comment count limits.
|
||||
* All rules support skipping behavior based on author criteria -- name, css flair/text, and moderator status
|
||||
* Docker container support *(coming soon...)*
|
||||
* Support Activity skipping based on:
|
||||
* Author criteria (name, css flair/text, age, karma, moderator status, and [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes))
|
||||
* Activity state (removed, locked, distinguished, etc.)
|
||||
* Rules and Actions support named references (write once, reference anywhere)
|
||||
* [**Image Comparisons**](/docs/imageComparison.md) via fingerprinting and/or pixel differences
|
||||
* Global/subreddit-level **API caching**
|
||||
* Support for [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes) as criteria or Actions (writing notes)
|
||||
* Docker container support
|
||||
* Event notification via Discord
|
||||
* **Web interface** for monitoring, administration, and oauth bot authentication
|
||||
|
||||
# Table of Contents
|
||||
|
||||
* [How It Works](#how-it-works)
|
||||
* [Installation](#installation)
|
||||
* [Configuration](#configuration)
|
||||
* [Usage](#usage)
|
||||
* [Getting Started](#getting-started)
|
||||
* [Configuration And Documentation](#configuration-and-documentation)
|
||||
* [Web UI and Screenshots](#web-ui-and-screenshots)
|
||||
|
||||
### How It Works
|
||||
|
||||
Context Bot's configuration is made up of an array of **Checks**. Each **Check** consists of :
|
||||
Each subreddit using the RCB bot configures its behavior via their own wiki page.
|
||||
|
||||
When a monitored **Event** (new comment/submission, new modqueue item, etc.) is detected the bot runs through a list of **Checks** to determine what to do with the **Activity** from that Event. Each **Check** consists of :
|
||||
|
||||
#### Kind
|
||||
|
||||
@@ -42,228 +52,87 @@ Is this check for a submission or comment?
|
||||
|
||||
#### Rules
|
||||
|
||||
A list of **Rule** objects to run against the activity. If **any** Rule object is triggered by the activity then the Check runs its **Actions**
|
||||
A list of **Rule** objects to run against the **Activity**. Triggered Rules can cause the whole Check to trigger and run its **Actions**
|
||||
|
||||
#### Actions
|
||||
|
||||
A list of **Action** objects that describe what the bot should do with the activity or author of the activity. The bot will run **all** Actions in this list.
|
||||
A list of **Action** objects that describe what the bot should do with the **Activity** or **Author** of the activity (comment, remove, approve, etc.). The bot will run **all** Actions in this list.
|
||||
|
||||
___
|
||||
|
||||
The **Checks** for a subreddit are split up into **Submission Checks** and **Comment Checks** based on their **kind**. Each list of checks is run independently based on when events happen (submission or comment).
|
||||
|
||||
When an event occurs all Checks of that type are run in the order they were listed in the configuration. When one check is triggered (an action is performed) the remaining checks will not be run.
|
||||
When an Event occurs all Checks of that type are run in the order they were listed in the configuration. When one check is triggered (an Action is performed) the remaining checks will not be run.
|
||||
|
||||
## Installation
|
||||
___
|
||||
|
||||
[Learn more about the RCB lifecycle and core concepts in the docs.](/docs#how-it-works)
|
||||
|
||||
### Locally
|
||||
## Getting Started
|
||||
|
||||
Clone this repository somewhere and then install from the working directory
|
||||
#### Operators
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FoxxMD/reddit-context-bot.git .
|
||||
cd reddit-context-bot
|
||||
npm install
|
||||
```
|
||||
This guide is for users who want to **run their own bot on a ContextMod instance.**
|
||||
|
||||
### [Docker](https://hub.docker.com/r/foxxmd/reddit-context-bot)
|
||||
See the [Operator's Getting Started Guide](/docs/gettingStartedOperator.md)
|
||||
|
||||
```
|
||||
foxxmd/reddit-context-bot:latest
|
||||
```
|
||||
#### Moderators
|
||||
|
||||
Adding [**environmental variables**](#usage) to your `docker run` command will pass them through to the app EX:
|
||||
```
|
||||
docker run -e "CLIENT_ID=myId" ... foxxmd/reddit-context-bot
|
||||
```
|
||||
This guide is for **reddit moderators** who want to configure an existing CM bot to run on their subreddit.
|
||||
|
||||
## Configuration
|
||||
See the [Moderator's Getting Started Guide](/docs/gettingStartedMod.md)
|
||||
|
||||
Context Bot's [configuration schema](/src/Schema/App.json) conforms to [JSON Schema](https://json-schema.org/) Draft 7.
|
||||
## Configuration and Documentation
|
||||
|
||||
I suggest using [Atlassian JSON Schema Viewer](https://json-schema.app/start) ([direct link](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fmaster%2Fsrc%2FSchema%2FApp.json)) so you can view all documentation while also interactively writing and validating your config! From there you can drill down into any object, see its requirements, view an example JSON document, and live-edit your configuration on the right-hand side.
|
||||
Context Bot's configuration can be written in JSON, [JSON5](https://json5.org/) or YAML. Its schema conforms to [JSON Schema Draft 7](https://json-schema.org/). Additionally, many **operator** settings can be passed via command line or environmental variables.
|
||||
|
||||
### Action Templating
|
||||
* For **operators** (running the bot instance) see the [Operator Configuration](/docs/operatorConfiguration.md) guide
|
||||
* For **moderators** consult the [app schema and examples folder](/docs/#configuration-and-usage)
|
||||
|
||||
Actions that can submit text (Report, Comment) will have their `content` values run through a [Mustache Template](https://mustache.github.io/). This means you can insert data generated by Rules into your text before the Action is performed.
|
||||
[**Check the full docs for in-depth explanations of all concepts and examples**](/docs)
|
||||
|
||||
See here for a [cheatsheet](https://gist.github.com/FoxxMD/d365707cf99fdb526a504b8b833a5b78) and [here](https://www.tsmean.com/articles/mustache/the-ultimate-mustache-tutorial/) for a more thorough tutorial.
|
||||
## Web UI and Screenshots
|
||||
|
||||
All Actions with `content` have access to this data:
|
||||
### Dashboard
|
||||
|
||||
```json5
|
||||
{
|
||||
item: {
|
||||
kind: 'string', // the type of item (comment/submission)
|
||||
author: 'string', // name of the item author (reddit user)
|
||||
permalink: 'string', // a url to the item
|
||||
url: 'string', // if the item is a Submission then its URL (external for link type submission, reddit link for self-posts)
|
||||
title: 'string', // if the item is a Submission, then the title of the Submission
|
||||
},
|
||||
rules: {
|
||||
// contains all rules that were run and are accessible using the name, lowercased, with all spaces/dashes/underscores removed
|
||||
}
|
||||
}
|
||||
CM comes equipped with a dashboard designed for use by both moderators and bot operators.
|
||||
|
||||
```
|
||||
* Authentication via Reddit OAuth -- only accessible if you are the bot operator or a moderator of a subreddit the bot moderates
|
||||
* Connect to multiple ContextMod instances (specified in configuration)
|
||||
* Monitor API usage/rates
|
||||
* Monitoring and administration **per subreddit:**
|
||||
* Start/stop/pause various bot components
|
||||
* View statistics on bot usage (# of events, checks run, actions performed) and cache usage
|
||||
* View various parts of your subreddit's configuration and manually update configuration
|
||||
* View **real-time logs** of what the bot is doing on your subreddit
|
||||
* **Run bot on any permalink**
|
||||
|
||||
The properties of `rules` are accessible using the name, lower-cased, with all spaces/dashes/underscores. If no name is given `kind` is used as `name` Example:
|
||||

|
||||
|
||||
```
|
||||
"rules": [
|
||||
{
|
||||
"name": "My Custom-Recent Activity Rule", // mycustomrecentactivityrule
|
||||
"kind": "recentActivity"
|
||||
},
|
||||
{
|
||||
// name = repeatsubmission
|
||||
"kind": "repeatSubmission",
|
||||
}
|
||||
]
|
||||
```
|
||||
### Bot Setup/Authentication
|
||||
|
||||
**To see what data is available for individual Rules [consult the schema](#configuration) for each Rule.**
|
||||
A bot oauth helper allows operators to define oauth credentials/permissions and then generate unique, one-time invite links that allow moderators to authenticate their own bots without operator assistance. [Learn more about using the oauth helper.](docs/botAuthentication.md#cm-oauth-helper-recommended)
|
||||
|
||||
#### Quick Templating Tutorial
|
||||
Operator view/invite link generation:
|
||||
|
||||
<details>
|
||||

|
||||
|
||||
As a quick example for how you will most likely be using templating -- wrapping a variable in curly brackets, `{{variable}}`, will cause the variable value to be rendered instead of the brackets:
|
||||
```
|
||||
myVariable = 50;
|
||||
myOtherVariable = "a text fragment"
|
||||
template = "This is my template, the variable is {{myVariable}}, my other variable is {{myOtherVariable}}, and that's it!";
|
||||
Moderator view/invite and authorization:
|
||||
|
||||
console.log(Mustache.render(template, {myVariable});
|
||||
// will render...
|
||||
"This is my template, the variable is 50, my other variable is a text fragment, and that's it!";
|
||||
```
|
||||

|
||||
|
||||
**Note: When accessing an object or its properties you must use dot notation**
|
||||
```
|
||||
const item = {
|
||||
aProperty: 'something',
|
||||
anotherObject: {
|
||||
bProperty: 'something else'
|
||||
}
|
||||
}
|
||||
const content = "My content will render the property {{item.aProperty}} like this, and another nested property {{item.anotherObject.bProperty}} like this."
|
||||
```
|
||||
</details>
|
||||
### Configuration Editor
|
||||
|
||||
### Example Config
|
||||
A built-in editor using [monaco-editor](https://microsoft.github.io/monaco-editor/) makes editing configurations easy:
|
||||
|
||||
Below is a configuration fulfilling the example given at the start of this readme:
|
||||
* Automatic JSON syntax validation and formatting
|
||||
* Automatic Schema (subreddit or operator) validation
|
||||
* All properties are annotated via hover popups
|
||||
* Unauthenticated view via `yourdomain.com/config`
|
||||
* Authenticated view loads subreddit configurations by simple link found on the subreddit dashboard
|
||||
* Switch schemas to edit either subreddit or operator configurations
|
||||
|
||||
<details>
|
||||
<summary>Click to expand configuration</summary>
|
||||
|
||||
```json
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "repeatSpam",
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"kind": "repeatSubmission",
|
||||
"gapAllowance": 2,
|
||||
"threshold": 10
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
},
|
||||
{
|
||||
"kind": "comment",
|
||||
"content": "Thank you for your submission but we do not allow mass crossposting. Your submission has been removed",
|
||||
"distingish": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "selfPromoActivity",
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"kind": "recentActivity",
|
||||
"thresholds": [
|
||||
{
|
||||
"subreddits": [
|
||||
"YouTubeSubscribeBoost",
|
||||
"AdvertiseYourVideos"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "User posted link {{rules.recentactivity.totalCount}} times in {{rules.recentactivity.subCount}} SP subs: {{rules.recentactivity.summary}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
</details>
|
||||
|
||||
## Usage
|
||||
|
||||
`npm run start [list,of,subreddits] [...--options]`
|
||||
|
||||
CLI options take precedence over environmental variables
|
||||
|
||||
| CLI | Environmental Variable | Required | Description |
|
||||
|------------------|------------------------|----------|----------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [First Argument] | | No | Comma-deliminated list of subreddits to run on if you don't want to run all the account has access to. |
|
||||
| --clientId | CLIENT_ID | **Yes** | Your reddit application client id |
|
||||
| --clientSecret | CLIENT_SECRET | **Yes** | Your reddit application client secret |
|
||||
| --accessToken | ACCESS_TOKEN | **Yes** | A valid access token retrieved from completing the oauth flow for a user with your application. |
|
||||
| --refreshToken | REFRESH_TOKEN | **Yes** | A valid refresh token retrieved from completing the oauth flow for a user with your application. |
|
||||
| --logDir | LOG_DIR | No | The absolute path to where logs should be stored. use `false` to turn off log files. Defaults to `CWD/logs` |
|
||||
| --logLevel | LOG_LEVEL | No | The minimum level to log at. Uses [Winston Log Levels](https://github.com/winstonjs/winston#logging-levels). Defaults to `info` |
|
||||
| --wikiConfig | WIKI_CONFIG | No | The location of the bot configuration in the subreddit wiki. Defaults to `botconfig/contextbox` |
|
||||
|
||||
### Reddit App??
|
||||
|
||||
To use this bot you must do two things:
|
||||
* Create a reddit application
|
||||
* Authenticate that application to act as a user (login to the application with an account)
|
||||
|
||||
#### Create Application
|
||||
|
||||
Visit [your reddit preferences](https://www.reddit.com/prefs/apps) and at the bottom of the page go through the **create an(other) app** process.
|
||||
* Choose **script**
|
||||
* For redirect uri use https://not-an-aardvark.github.io/reddit-oauth-helper/
|
||||
* Write down your **Client ID** and **Client Secret** somewhere
|
||||
|
||||
#### Authenticate an Account
|
||||
|
||||
Visit https://not-an-aardvark.github.io/reddit-oauth-helper/
|
||||
* Input your **Client ID** and **Client Secret** in the text boxes with those names.
|
||||
* Choose scopes. **It is very important you check everything on this list or Context Bot will not work correctly**
|
||||
* edit
|
||||
* flair
|
||||
* history
|
||||
* identity
|
||||
* modcontributors
|
||||
* modflair
|
||||
* modposts
|
||||
* modself
|
||||
* mysubreddits
|
||||
* read
|
||||
* report
|
||||
* submit
|
||||
* wikiread
|
||||
* Click **Generate tokens*, you will get a popup asking you to approve access (or login) -- **the account you approve access with is the account that Bot will control.**
|
||||
* After approving an **Access Token** and **Refresh Token** will be shown at the bottom of the page. Write these down.
|
||||
|
||||
You should now have all the information you need to start the bot.
|
||||

|
||||
|
||||
## License
|
||||
|
||||
|
||||
43
app.json
Normal file
43
app.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"name": "Reddit Context Bot",
|
||||
"description": "An event-based, reddit moderation bot built on top of snoowrap and written in typescript",
|
||||
"repository": "https://github.com/FoxxMD/context-mod",
|
||||
"stack": "container",
|
||||
"env": {
|
||||
"CLIENT_ID": {
|
||||
"description": "Client ID for your Reddit application",
|
||||
"value": "",
|
||||
"required": true
|
||||
},
|
||||
"CLIENT_SECRET": {
|
||||
"description": "Client Secret for your Reddit application",
|
||||
"value": "",
|
||||
"required": true
|
||||
},
|
||||
"REFRESH_TOKEN": {
|
||||
"description": "Refresh token retrieved from authenticating an account with your Reddit Application",
|
||||
"value": "",
|
||||
"required": false
|
||||
},
|
||||
"ACCESS_TOKEN": {
|
||||
"description": "Access token retrieved from authenticating an account with your Reddit Application",
|
||||
"value": "",
|
||||
"required": false
|
||||
},
|
||||
"REDIRECT_URI": {
|
||||
"description": "Redirect URI you specified when creating your Reddit Application. Required if you want to use the web interface. In the provided example replace 'your-heroku-app-name' with the name of your HEROKU app.",
|
||||
"value": "https://your-heroku-6app-name.herokuapp.com/callback",
|
||||
"required": false
|
||||
},
|
||||
"OPERATOR": {
|
||||
"description": "Your reddit username WITHOUT any prefixes EXAMPLE /u/FoxxMD => FoxxMD. Specified user will be recognized as an admin.",
|
||||
"value": "",
|
||||
"required": false
|
||||
},
|
||||
"WIKI_CONFIG": {
|
||||
"description": "Relative url to contextbot wiki page EX https://reddit.com/r/subreddit/wiki/<path>",
|
||||
"value": "botconfig/contextbot",
|
||||
"required": false
|
||||
}
|
||||
}
|
||||
}
|
||||
67
cliff.toml
Normal file
67
cliff.toml
Normal file
@@ -0,0 +1,67 @@
|
||||
# configuration file for git-cliff (0.1.0)
|
||||
|
||||
[changelog]
|
||||
# changelog header
|
||||
header = """
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.\n
|
||||
"""
|
||||
# template for the changelog body
|
||||
# https://tera.netlify.app/docs/#introduction
|
||||
body = """
|
||||
{% if version %}\
|
||||
## [{{ version | replace(from="v", to="") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||
{% else %}\
|
||||
## [unreleased]
|
||||
{% endif %}\
|
||||
{% for group, commits in commits | group_by(attribute="group") %}
|
||||
### {{ group | upper_first }}
|
||||
{% for commit in commits
|
||||
| filter(attribute="scope")
|
||||
| sort(attribute="scope") %}
|
||||
- *({{commit.scope}})* {{ commit.message | upper_first }}
|
||||
{%- if commit.breaking %}
|
||||
{% raw %} {% endraw %}- **BREAKING**: {{commit.breaking_description}}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- for commit in commits %}
|
||||
{%- if commit.scope -%}
|
||||
{% else -%}
|
||||
- *(No Category)* {{ commit.message | upper_first }}
|
||||
{% if commit.breaking -%}
|
||||
{% raw %} {% endraw %}- **BREAKING**: {{commit.breaking_description}}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
{% endfor -%}
|
||||
{% endfor %}
|
||||
"""
|
||||
# remove the leading and trailing whitespaces from the template
|
||||
trim = true
|
||||
# changelog footer
|
||||
footer = """
|
||||
<!-- generated by git-cliff -->
|
||||
"""
|
||||
|
||||
[git]
|
||||
# allow only conventional commits
|
||||
# https://www.conventionalcommits.org
|
||||
conventional_commits = true
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Features"},
|
||||
{ message = "^fix", group = "Bug Fixes"},
|
||||
{ message = "^doc", group = "Documentation"},
|
||||
{ message = "^perf", group = "Performance"},
|
||||
{ message = "^refactor", group = "Refactor"},
|
||||
{ message = "^style", group = "Styling"},
|
||||
{ message = "^test", group = "Testing"},
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true},
|
||||
{ message = "^chore", group = "Miscellaneous Tasks"},
|
||||
{ body = ".*security", group = "Security"},
|
||||
]
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
filter_commits = false
|
||||
# glob pattern for matching git tags
|
||||
tag_pattern = "[0-9]*"
|
||||
# regex for skipping tags
|
||||
skip_tags = "v0.1.0-beta.1"
|
||||
344
docs/README.md
Normal file
344
docs/README.md
Normal file
@@ -0,0 +1,344 @@
|
||||
# Documentation
|
||||
|
||||
# Table of Contents
|
||||
|
||||
* [Getting Started](#getting-started)
|
||||
* [How It Works](#how-it-works)
|
||||
* [Concepts](#concepts)
|
||||
* [Check](#checks)
|
||||
* [Rule](#rule)
|
||||
* [Examples](#available-rules)
|
||||
* [Rule Set](#rule-set)
|
||||
* [Examples](#rule-set-examples)
|
||||
* [Action](#action)
|
||||
* [Examples](#available-actions)
|
||||
* [Filters](#filters)
|
||||
* [Configuration and Usage](#configuration-and-usage)
|
||||
* [Common Resources](#common-resources)
|
||||
* [Activities `window`](#activities-window)
|
||||
* [Comparisons](#thresholds-and-comparisons)
|
||||
* [Activity Templating](/docs/actionTemplating.md)
|
||||
* [Image Comparisons](#image-comparisons)
|
||||
* [Best Practices](#best-practices)
|
||||
* [Named Rules](#named-rules)
|
||||
* [Rule Order](#rule-order)
|
||||
* [Caching](#caching)
|
||||
* FAQ
|
||||
|
||||
## Getting Started
|
||||
|
||||
Review **at least** the **How It Works** and **Concepts** below, then:
|
||||
|
||||
* For **Operators** (running a bot instance) refer to [**Operator Getting Started**](/docs/gettingStartedOperator.md) guide
|
||||
* For **Moderators** (configuring an existing bot for your subreddit) refer to the [**Moderator Getting Started**](/docs/gettingStartedMod.md) guide
|
||||
|
||||
## How It Works
|
||||
|
||||
Where possible Context Mod (CM) uses the same terminology as, and emulates the behavior, of **automoderator** so if you are familiar with that much of this may seem familiar to you.
|
||||
|
||||
CM's lifecycle looks like this:
|
||||
|
||||
#### 1) A new event in your subreddit is received by CM
|
||||
|
||||
The events CM watches for are configured by you. These can be new modqueue/unmoderated items, submissions, or comments.
|
||||
|
||||
#### 2) CM sequentially processes each Check in your configuration
|
||||
|
||||
A **Check** is a set of:
|
||||
|
||||
* One or more **Rules** that define what conditions should **trigger** this Check
|
||||
* One or more **Actions** that define what the bot should do once the Check is **triggered**
|
||||
|
||||
#### 3) Each Check is processed, *in order*, until a Check is triggered
|
||||
|
||||
Once a Check is **triggered** no more Checks will be processed. This means all subsequent Checks in your configuration (in the order you listed them) are basically skipped.
|
||||
|
||||
#### 4) All Actions from that Check are executed
|
||||
|
||||
After all Actions are executed CM returns to waiting for the next Event.
|
||||
|
||||
## Concepts
|
||||
|
||||
Core, high-level concepts regarding how CM works.
|
||||
|
||||
### Checks
|
||||
|
||||
A **Check** is the main logical unit of behavior for the bot. It is equivalent to "if X then Y". A Check is comprised of:
|
||||
|
||||
* One or more **Rules** that are tested against an **Activity**
|
||||
* One of more **Actions** that are performed when the **Rules** are satisfied
|
||||
|
||||
The bot's configuration can be made up of one or more **Checks** that are processed **in the order they are listed in the configuration.**
|
||||
|
||||
Once a Check is **triggered** (its Rules are satisfied and Actions performed) all subsequent Checks are skipped.
|
||||
|
||||
Some other important concepts regarding Checks:
|
||||
|
||||
* All Checks have a **kind** (defined in the configuration) that determine if they should run on **Submissions** or **Comments**
|
||||
* Checks have a **condition** property that determines when they are considered **triggered**
|
||||
* If the **condition** is `AND` then ALL of their **Rules** must be **triggered** for the Check to be **triggered**
|
||||
* If the **condition** is `OR` then if ANY **Rules** is triggered **triggered** then the Check is **triggered**
|
||||
|
||||
Examples of different types of Checks can be found in the [subreddit-ready examples.](/docs/examples/subredditReady)
|
||||
|
||||
### Rule
|
||||
|
||||
A **Rule** is some set of **criteria** (conditions) that are tested against an Activity (comment/submission), a User, or a User's history. A Rule is considered **triggered** when the **criteria** for that rule are found to be **true** for whatever is being tested against.
|
||||
|
||||
There are generally three main properties for a Rule:
|
||||
|
||||
* **Critiera** -- The conditions/values you want to test for.
|
||||
* **Activities Window** -- If applicable, the range of activities that the **criteria** will be tested against.
|
||||
* **Rule-specific options** -- Any number of options that modify how the **criteria** are tested.
|
||||
|
||||
CM has different **Rules** that can test against different types of behavior and aspects of a User, their history, and the Activity (submission/common) being checked.
|
||||
|
||||
#### Available Rules
|
||||
Find detailed descriptions of all the Rules, with examples, below:
|
||||
|
||||
* [Attribution](/docs/examples/attribution)
|
||||
* [Recent Activity](/docs/examples/recentActivity)
|
||||
* [Repeat Activity](/docs/examples/repeatActivity)
|
||||
* [History](/docs/examples/history)
|
||||
* [Author](/docs/examples/author)
|
||||
* [Regex](/docs/examples/regex)
|
||||
|
||||
### Rule Set
|
||||
|
||||
A **Rule Set** is a "grouped" set of `Rules` with a **trigger condition** specified.
|
||||
|
||||
Rule Sets can be used interchangeably with other **Rules** and **Rule Sets** in the `rules` list of a **Check**.
|
||||
|
||||
They allow you to create more complex trigger behavior by combining multiple rules into one "parent rule".
|
||||
|
||||
It consists of:
|
||||
|
||||
* **condition** -- Under what condition should the Rule Set be considered triggered?
|
||||
* `AND` -- ALL Rules in the Rule Set must **trigger** in order for the Rule Set to **trigger.**
|
||||
* `OR` -- ANY Rule in the Rule Set that is **triggered** will trigger the whole Rule Set.
|
||||
* **rules** -- The **Rules** for the Rule Set.
|
||||
|
||||
Example
|
||||
```json5
|
||||
{
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// all the rules go here
|
||||
]
|
||||
}
|
||||
```
|
||||
#### Rule Set Examples
|
||||
|
||||
* [**Detailed Example**](/docs/examples/advancedConcepts/ruleSets.json5)
|
||||
|
||||
### Action
|
||||
|
||||
An **Action** is some action the bot can take against the checked Activity (comment/submission) or Author of the Activity. CM has Actions for most things a normal reddit user or moderator can do.
|
||||
|
||||
#### Available Actions
|
||||
|
||||
* Remove (Comment/Submission)
|
||||
* Flair (Submission)
|
||||
* Ban (User)
|
||||
* Approve (Comment/Submission)
|
||||
* Comment (Reply to Comment/Submission)
|
||||
* Lock (Comment/Submission)
|
||||
* Report (Comment/Submission)
|
||||
* [UserNote](/docs/examples/userNotes) (User, when /r/Toolbox is used)
|
||||
|
||||
For detailed explanation and options of what individual Actions can do [see the links in the `actions` property in the schema.](https://json-schema.app/view/%23/%23%2Fdefinitions%2FSubmissionCheckJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json)
|
||||
|
||||
### Filters
|
||||
|
||||
**Checks, Rules, and Actions** all have two additional (optional) criteria "tests". These tests behave differently than rule/check triggers in that:
|
||||
|
||||
* When they **pass** the thing being tested continues to process as usual
|
||||
* When they **fail** the thing being tested **is skipped, not failed.**
|
||||
|
||||
For **Checks** and **Actions** skipping means that the thing is not processed. The Action is not run, the Check is not triggered.
|
||||
|
||||
In the context of **Rules** (in a Check) skipping means the Rule does not get run BUT it does not fail. The Check will continue processing as if the Rule did not exist. However, if ALL Rules in a Check are skipped then the Check does "fail" (is not triggered).
|
||||
|
||||
#### Available Filters
|
||||
|
||||
##### Item Filter (`itemIs`)
|
||||
|
||||
This filter will test against the **state of the Activity currently being run.** Some criteria available to test against IE "Is the activity...":
|
||||
|
||||
* removed
|
||||
* nsfw
|
||||
* locked
|
||||
* stickied
|
||||
* deleted
|
||||
* etc...
|
||||
|
||||
The `itemIs` filter is made up of an array (list) of `State` criteria objects. **All** criteria in the array must pass for this filter to pass.
|
||||
|
||||
There are two different State criteria depending on what type of Activity is being tested:
|
||||
|
||||
* Submission -- [SubmissionState](https://json-schema.app/view/%23/%23%2Fdefinitions%2FSubmissionCheckJson/%23%2Fdefinitions%2FSubmissionState?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json)
|
||||
* Comment -- [CommentState](https://json-schema.app/view/%23/%23%2Fdefinitions%2FCommentCheckJson/%23%2Fdefinitions%2FCommentState?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json)
|
||||
|
||||
##### Author Filter (`authorIs`)
|
||||
|
||||
This filter will test against the **Author of the Activity currently being run.** Some criteria available to test against:
|
||||
|
||||
* account age
|
||||
* comment, link, and total karma
|
||||
* subreddit flair text/css
|
||||
* name
|
||||
* User Notes
|
||||
* verified
|
||||
* etc...
|
||||
|
||||
The `authorIs` filter is made up two (optional) lists of [`AuthorCriteria`](https://json-schema.app/view/%23/%23%2Fdefinitions%2FSubmissionCheckJson/%23%2Fdefinitions%2FAuthorOptions/%23%2Fdefinitions%2FAuthorCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) criteria objects that define how the test behaves:
|
||||
|
||||
* `include` list -- If **any** `AuthorCriteria` from this list passes then the `authorIs` test passes
|
||||
* `exclude` list -- If **any** `AuthorCriteria` from this list **does not pass** then the `authorIs` test passes. **Note:** This property is ignored if `include` is also present IE you cannot use both properties at the same time.
|
||||
|
||||
Refer to the [app schema for `AuthorCriteria`](https://json-schema.app/view/%23/%23%2Fdefinitions%2FSubmissionCheckJson/%23%2Fdefinitions%2FAuthorOptions/%23%2Fdefinitions%2FAuthorCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for all available properties to test against.
|
||||
|
||||
Some examples of using `authorIs` can be found in the [Author examples.](/docs/examples/author)
|
||||
|
||||
## Configuration And Usage
|
||||
|
||||
* For **Operator/Bot maintainers** see **[Operation Configuration](/docs/operatorConfiguration.md)**
|
||||
* [CLI Usage](docs/operatorConfiguration.md#cli-usage)
|
||||
* For **Moderators**
|
||||
* Refer to the [examples folder](/docs/examples) or the [subreddit-ready examples](/docs/examples/subredditReady)
|
||||
* as well as the [schema editor](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) which has
|
||||
* fully annotated configuration data/structure
|
||||
* generated examples in json/yaml
|
||||
* built-in editor that automatically validates your config
|
||||
|
||||
## Common Resources
|
||||
|
||||
Technical information on recurring, common data/patterns used in CM.
|
||||
|
||||
### Activities `window`
|
||||
|
||||
Most **Rules** must define the **range of Activities (submissions and/or comments)** that will be used to check the criteria of the Rule. This range is defined wherever you see a `window` property in configuration.
|
||||
|
||||
Refer to the [Activities Window](/docs/activitiesWindow.md) documentation for a technical explanation with examples.
|
||||
|
||||
### Thresholds and Comparisons
|
||||
|
||||
Most rules/filters have criteria that require you to define a specific condition to test against. This can be anything from repeats of activities to account age.
|
||||
|
||||
In all of these scenarios the condition is defined using a subset of [comparison operators](https://www.codecademy.com/articles/fwd-js-comparison-logical) (very similar to how automoderator does things).
|
||||
|
||||
Available operators:
|
||||
|
||||
* `<` -- **less than** => `5 < 6` => 5 is less than 6
|
||||
* `>` -- **greater than** => `6 > 5` => 6 is greater than 5
|
||||
* `<=` -- **less than or equal to** => `5 <= 5` => 5 is less than **or equal to** 5
|
||||
* `>=` -- **greater than or equal to** => `5 >= 5` => 5 is greater than **or equal to** 5
|
||||
|
||||
In the context of a rule/filter comparison you provide the comparison **omitting** the value that is being tested. An example...
|
||||
|
||||
The RepeatActivity rule has a `threshold` comparison to test against the number of repeat activities it finds
|
||||
|
||||
* You want the rule to trigger if it finds **4 or more repeat activities**
|
||||
* The rule would be configured like this `"threshold": ">= 4"`
|
||||
|
||||
Essentially what this is telling the rule is `threshold: "x >= 4"` where `x` is the largest repeat of activities it finds.
|
||||
|
||||
#### Other Comparison Types
|
||||
|
||||
Other than comparison numeric values there are two other values that can be compared (depending on the criteria)
|
||||
|
||||
##### Percentages
|
||||
|
||||
Some criteria accept an optional **percentage** to compare against:
|
||||
|
||||
```
|
||||
"threshold": "> 20%"
|
||||
```
|
||||
|
||||
Refer to the individual rule/criteria schema to see what this percentage is comparing against.
|
||||
|
||||
##### Durations
|
||||
|
||||
Some criteria accept an optional **duration** to compare against:
|
||||
|
||||
```
|
||||
"threshold": "< 1 month"
|
||||
```
|
||||
|
||||
The duration value compares a time range from **now** to `duration value` time in the past.
|
||||
|
||||
Refer to [duration values in activity window documentation](/docs/activitiesWindow.md#duration-values) as well as the individual rule/criteria schema to see what this duration is comparing against.
|
||||
|
||||
### Image Comparisons
|
||||
|
||||
ContextMod implements two methods for comparing **image content**, perceptual hashing and pixel-to-pixel comparisons. Comparisons can be used to filter activities in some activities.
|
||||
|
||||
See [image comparison documentation](/docs/imageComparison.md) for a full reference.
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Named Rules
|
||||
|
||||
All **Rules** in a subreddit's configuration can be assigned a **name** that can then be referenced from any other Check.
|
||||
|
||||
Create general-use rules so they can be reused and de-clutter your configuration. Additionally, CM will automatically cache the result of a rule so there is a performance and api usage benefit to re-using Rules.
|
||||
|
||||
See [ruleNameReuse.json5](/docs/examples/advancedConcepts/ruleNameReuse.json5) for a detailed configuration with annotations.
|
||||
|
||||
### Check Order
|
||||
|
||||
Checks are run in the order they appear in your configuration, therefore you should place your highest requirement/severe action checks at the top and lowest requirement/moderate actions at the bottom.
|
||||
|
||||
This is so that if an Activity warrants a more serious reaction that Check is triggered first rather than having a lower requirement check with less severe actions triggered and causing all subsequent Checks to be skipped.
|
||||
|
||||
* Attribution >50% AND Repeat Activity 8x AND Recent Activity in 2 subs => remove submission + ban
|
||||
* Attribution >20% AND Repeat Activity 4x AND Recent Activity in 5 subs => remove submission + flair user restricted
|
||||
* Attribution >20% AND Repeat Activity 2x => remove submission
|
||||
* Attribution >20% AND History comments <30% => remove submission
|
||||
* Attribution >15% => report
|
||||
* Repeat Activity 2x => report
|
||||
* Recent Activity in 3 subs => report
|
||||
* Author not vetted => flair new user submission
|
||||
|
||||
### Rule Order
|
||||
|
||||
The ordering of your Rules within a Check/RuleSet can have an impact on Check performance (speed) as well as API usage.
|
||||
|
||||
Consider these three rules:
|
||||
|
||||
* Rule A -- Recent Activity => 3 subreddits => last 15 submissions
|
||||
* Rule B -- Repeat Activity => last 3 days
|
||||
* Rule C -- Attribution => >10% => last 90 days or 300 submissions
|
||||
|
||||
The first two rules are lightweight in their requirements -- Rule A can be completed in 1 API call, Rule B potentially completed in 1 Api call.
|
||||
|
||||
However, depending on how active the Author is, Rule C will take *at least* 3 API calls just to get all activities (Reddit limit 100 items per call).
|
||||
|
||||
If the Check is using `AND` condition for its rules (default) then if either Rule A or Rule B fail then Rule C will never run. This means 3 API calls never made plus the time waiting for each to return.
|
||||
|
||||
**It is therefore advantageous to list your lightweight Rules first in each Check.**
|
||||
|
||||
### Caching
|
||||
|
||||
ContextMod implements caching functionality for:
|
||||
|
||||
* author history (`window` criteria in rules)
|
||||
* `authorIs` results
|
||||
* `content` that uses wiki pages (on Comment/Report/Ban Actions)
|
||||
* and User Notes
|
||||
|
||||
All of these use api requests so caching them reduces api usage.
|
||||
|
||||
Cached results can be re-used if the criteria in configuration is identical to a previously cached result. So...
|
||||
|
||||
* author history cache results are re-used if **`window` criteria on a Rule is identical to the `window` on another Rule** IE always use **7 Days** or always use **50 Items** for absolute counts.
|
||||
* `authorIs` criteria is identical to another `authorIs` elsewhere in configuration..
|
||||
* etc...
|
||||
|
||||
Re-use will result in less API calls and faster Check times.
|
||||
|
||||
PROTIP: You can monitor the re-use of cache in the `Cache` section of your subreddit on the web interface. See the tooltips in that section for a better breakdown of cache statistics.
|
||||
|
||||
## FAQ
|
||||
|
||||
TODO
|
||||
72
docs/actionTemplating.md
Normal file
72
docs/actionTemplating.md
Normal file
@@ -0,0 +1,72 @@
|
||||
Actions that can submit text (Report, Comment) will have their `content` values run through a [Mustache Template](https://mustache.github.io/). This means you can insert data generated by Rules into your text before the Action is performed.
|
||||
|
||||
See here for a [cheatsheet](https://gist.github.com/FoxxMD/d365707cf99fdb526a504b8b833a5b78) and [here](https://www.tsmean.com/articles/mustache/the-ultimate-mustache-tutorial/) for a more thorough tutorial.
|
||||
|
||||
All Actions with `content` have access to this data:
|
||||
|
||||
```json5
|
||||
|
||||
{
|
||||
item: {
|
||||
kind: 'string', // the type of item (comment/submission)
|
||||
author: 'string', // name of the item author (reddit user)
|
||||
permalink: 'string', // a url to the item
|
||||
url: 'string', // if the item is a Submission then its URL (external for link type submission, reddit link for self-posts)
|
||||
title: 'string', // if the item is a Submission, then the title of the Submission,
|
||||
botLink: 'string' // a link to the bot's FAQ
|
||||
},
|
||||
rules: {
|
||||
// contains all rules that were run and are accessible using the name, lowercased, with all spaces/dashes/underscores removed
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The properties of `rules` are accessible using the name, lower-cased, with all spaces/dashes/underscores. If no name is given `kind` is used as `name` Example:
|
||||
|
||||
```
|
||||
|
||||
"rules": [
|
||||
{
|
||||
"name": "My Custom-Recent Activity Rule", // mycustomrecentactivityrule
|
||||
"kind": "recentActivity"
|
||||
},
|
||||
{
|
||||
// name = repeatsubmission
|
||||
"kind": "repeatActivity",
|
||||
}
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
**To see what data is available for individual Rules [consult the schema](#configuration) for each Rule.**
|
||||
|
||||
#### Quick Templating Tutorial
|
||||
|
||||
As a quick example for how you will most likely be using templating -- wrapping a variable in curly brackets, `{{variable}}`, will cause the variable value to be rendered instead of the brackets:
|
||||
|
||||
```
|
||||
|
||||
myVariable = 50;
|
||||
myOtherVariable = "a text fragment"
|
||||
template = "This is my template, the variable is {{myVariable}}, my other variable is {{myOtherVariable}}, and that's it!";
|
||||
|
||||
console.log(Mustache.render(template, {myVariable});
|
||||
// will render...
|
||||
"This is my template, the variable is 50, my other variable is a text fragment, and that's it!";
|
||||
|
||||
```
|
||||
|
||||
**Note: When accessing an object or its properties you must use dot notation**
|
||||
|
||||
```
|
||||
|
||||
const item = {
|
||||
aProperty: 'something',
|
||||
anotherObject: {
|
||||
bProperty: 'something else'
|
||||
}
|
||||
}
|
||||
const content = "My content will render the property {{item.aProperty}} like this, and another nested property {{item.anotherObject.bProperty}} like this."
|
||||
|
||||
```
|
||||
217
docs/activitiesWindow.md
Normal file
217
docs/activitiesWindow.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# Activity Window
|
||||
|
||||
Most **Rules** have a `window` property somewhere within their configuration. This property defines the range of **Activities** (submission and/or comments) that should be retrieved for checking the criteria of the Rule.
|
||||
|
||||
As an example if you want to run an **Recent Activity Rule** to check if a user has had activity in /r/mealtimevideos you also need to define what range of activities you want to look at from that user's history.
|
||||
|
||||
## `window` property overview (tldr)
|
||||
|
||||
The value of `window` can be any of these types:
|
||||
|
||||
* `number` count of activities
|
||||
* `string` [duration](#duration-string-recommended) or [iso 8601](#an-iso-8601-duration-string)
|
||||
* [duration `object`](#duration-object)
|
||||
* [ActivityWindowCriteria `object`](#activitywindowcriteria)
|
||||
|
||||
Examples of all of the above
|
||||
|
||||
<details>
|
||||
|
||||
```
|
||||
// count, last 100 activities
|
||||
{
|
||||
"window": 100
|
||||
}
|
||||
|
||||
// duration string, last 10 days
|
||||
{
|
||||
"window": "10 days"
|
||||
}
|
||||
|
||||
// duration object, last 2 months and 5 days
|
||||
{
|
||||
"window": {
|
||||
"months": 2,
|
||||
"days": 5,
|
||||
}
|
||||
}
|
||||
|
||||
// iso 8601 string, last 15 minutes
|
||||
{
|
||||
"window": "PT15M"
|
||||
}
|
||||
|
||||
// ActivityWindowCriteria, last 100 activities or 6 weeks of activities (whichever is found first)
|
||||
{
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 weeks"
|
||||
}
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
## Types of Ranges
|
||||
|
||||
There are two types of values that can be used when defining a range:
|
||||
|
||||
### Count
|
||||
|
||||
This is the **number** of activities you want to retrieve. It's straightforward -- if you want to look at the last 100 activities for a user you can use `100` as the value.
|
||||
|
||||
### Duration
|
||||
|
||||
A **duration of time** between which all activities will be retrieved. This is a **relative value** that calculates the actual range based on **the duration of time subtracted from when the rule is run.**
|
||||
|
||||
For example:
|
||||
|
||||
* Today is **July 15th**
|
||||
* You define a duration of **10 days**
|
||||
|
||||
Then the range of activities to be retrieved will be between **July 5th and July 15th** (10 days).
|
||||
|
||||
#### Duration Values
|
||||
|
||||
The value used to define the duration can be **any of these three types**:
|
||||
|
||||
##### Duration String (recommended)
|
||||
|
||||
A string consisting of
|
||||
|
||||
* A [Dayjs unit of time](https://day.js.org/docs/en/durations/creating#list-of-all-available-units)
|
||||
* The value of that unit of time
|
||||
|
||||
Examples:
|
||||
|
||||
* `9 days`
|
||||
* `14 hours`
|
||||
* `80 seconds`
|
||||
|
||||
You can ensure your string is valid by testing it [here.](https://regexr.com/61em3)
|
||||
|
||||
##### Duration Object
|
||||
|
||||
If you need to specify multiple units of time for your duration you can instead provide a [Dayjs duration **object**](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) consisting of Dayjs unit-values.
|
||||
|
||||
Example
|
||||
|
||||
```json
|
||||
{
|
||||
"days": 4,
|
||||
"hours": 6,
|
||||
"minutes": 20
|
||||
}
|
||||
```
|
||||
|
||||
##### An ISO 8601 duration string
|
||||
|
||||
If you're a real nerd you can also use a [standard duration](https://en.wikipedia.org/wiki/ISO_8601#Durations)) string.
|
||||
|
||||
Examples
|
||||
|
||||
* `PT15M` (15 minutes)
|
||||
|
||||
Ensure your string is valid by testing it [here.](https://regexr.com/61em9)
|
||||
|
||||
## ActivityWindowCriteria
|
||||
|
||||
This is an object that lets you specify more granular conditions for your range.
|
||||
|
||||
The full object looks like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
"duration": "10 days",
|
||||
"satisfyOn": "any",
|
||||
"subreddits": {
|
||||
"include": ["mealtimevideos","pooptimevideos"],
|
||||
"exclude": ["videos"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Specifying Range
|
||||
|
||||
You may use **one or both range properties.**
|
||||
|
||||
If both range properties are specified then the value `satisfyOn` determines how the final range is determined
|
||||
|
||||
|
||||
#### Using `"satisfyOn": "any"` (default)
|
||||
|
||||
If **any** then Activities will be retrieved until one of the range properties is met, **whichever occurs first.**
|
||||
|
||||
Example
|
||||
```json
|
||||
{
|
||||
"count": 80,
|
||||
"duration": "90 days",
|
||||
"satisfyOn": "any"
|
||||
}
|
||||
```
|
||||
Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
|
||||
|
||||
* If 90 days of activities returns only 40 activities => returns 40 activities
|
||||
* If 80 activities is only 20 days of range => 80 activities
|
||||
|
||||
#### Using `"satisfyOn": "all"`
|
||||
|
||||
If **all** then both ranges must be satisfied. Effectively, whichever range produces the most Activities will be the one that is used.
|
||||
|
||||
Example
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
"duration": "90 days",
|
||||
"satisfyOn": "all"
|
||||
}
|
||||
```
|
||||
Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
|
||||
|
||||
* If at 90 days of activities => 40 activities retrieved
|
||||
* continue retrieving results until 100 activities
|
||||
* so range is >90 days of activities
|
||||
* If at 100 activities => 20 days of activities retrieved
|
||||
* continue retrieving results until 90 days of range
|
||||
* so results in >100 activities
|
||||
|
||||
### Filtering Activities
|
||||
|
||||
You may filter retrieved Activities using an array of subreddits.
|
||||
|
||||
**Note:** Activities are filtered **before** range check is made so you will always end up with specified range (but may require more api calls if many activities are filtered out)
|
||||
|
||||
#### Include
|
||||
|
||||
Use **include** to specify which subreddits should be included from results
|
||||
|
||||
Example where only activities from /r/mealtimevideos and /r/modsupport will be returned
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
"duration": "90 days",
|
||||
"satisfyOn": "any",
|
||||
"subreddits": {
|
||||
"include": ["mealtimevideos","modsupport"]
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
#### Exclude
|
||||
|
||||
Use **exclude** to specify which subreddits should NOT be in the results
|
||||
|
||||
Example where activities from /r/mealtimevideos and /r/modsupport will not be returned in results
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
"duration": "90 days",
|
||||
"satisfyOn": "any",
|
||||
"subreddits": {
|
||||
"exclude": ["mealtimevideos","modsupport"]
|
||||
}
|
||||
}
|
||||
```
|
||||
**Note:** `exclude` will be ignored if `include` is also present.
|
||||
109
docs/botAuthentication.md
Normal file
109
docs/botAuthentication.md
Normal file
@@ -0,0 +1,109 @@
|
||||
**Note:** This is for **bot operators.** If you are a subreddit moderator check out the **[Getting Started Guide](/docs/gettingStartedMod.md)**
|
||||
|
||||
Before you can start using your bot on reddit there are a few steps you must take:
|
||||
|
||||
* Create your bot account IE the reddit account that will be the "bot"
|
||||
* Create a Reddit application
|
||||
* Authenticate your bot account with the application
|
||||
|
||||
At the end of this process you will have this info:
|
||||
|
||||
* clientId
|
||||
* clientSecret
|
||||
* refreshToken
|
||||
* accessToken
|
||||
* redirectUri
|
||||
|
||||
**Note:** If you already have this information you can skip this guide **but make sure your redirect uri is correct if you plan on using the web interface.**
|
||||
|
||||
# Table Of Contents
|
||||
|
||||
* [Creating an Application](#create-application)
|
||||
* [Authenticate Your Bot](#authenticate-your-bot-account)
|
||||
* [Using CM OAuth Helper](#cm-oauth-helper-recommended)
|
||||
* [Using Aardvark OAuth Helper](#aardvark-oauth-helper)
|
||||
* [Provide Credentials to CM](#provide-credentials-to-cm)
|
||||
|
||||
# Create Application
|
||||
|
||||
Visit [your reddit preferences](https://www.reddit.com/prefs/apps) and at the bottom of the page go through the **create an(other) app** process.
|
||||
* Give it a **name**
|
||||
* Choose **web app**
|
||||
* If you know what you will use for **redirect uri** go ahead and use it, otherwise use **http://localhost:8085/callback**
|
||||
|
||||
Click **create app**.
|
||||
|
||||
Then write down your **Client ID, Client Secret, and Redirect Uri** somewhere (or keep this webpage open)
|
||||
|
||||
# Authenticate Your Bot Account
|
||||
|
||||
There are **two ways** you can authenticate your bot account. It is recommended to use the CM oauth helper.
|
||||
|
||||
## CM OAuth Helper (Recommended)
|
||||
|
||||
This method will use CM's built in oauth flow. It is recommended because it will ensure your bot is authenticated with the correct oauth permissions.
|
||||
|
||||
### Start CM with Client ID/Secret and Operator
|
||||
|
||||
Start the application and provide these to your configuration:
|
||||
|
||||
* **Client ID**
|
||||
* **Client Secret**
|
||||
* **Redirect URI**
|
||||
* **Operator**
|
||||
|
||||
It is important you define **Operator** because the auth route is **protected.** You must login to the application in order to access the route.
|
||||
|
||||
Refer to the [operator config guide](/docs/operatorConfiguration.md) if you need help with this.
|
||||
|
||||
Examples:
|
||||
|
||||
* CLI - `node src/index.js --clientId=myId --clientSecret=mySecret --redirectUri="http://localhost:8085/callback" --operator=FoxxMD`
|
||||
* Docker - `docker run -e "CLIENT_ID=myId" -e "CLIENT_SECRET=mySecret" -e "OPERATOR=FoxxMD" -e "REDIRECT_URI=http://localhost:8085/callback" foxxmd/context-mod`
|
||||
|
||||
### Create An Auth Invite
|
||||
|
||||
Then open the CM web interface (default is [http://localhost:8085](http://localhost:8085)) and login.
|
||||
|
||||
After logging in you should be automatically redirected the auth page. If you are not then visit [http://localhost:8085/auth/helper](http://localhost:8085/auth/helper))
|
||||
|
||||
Follow the directions in the helper to create an **auth invite link.** Open this link and then follow the directions to authenticate your bot. At the end of the process you will receive an **Access Token** and **Refresh Token**
|
||||
|
||||
## Aardvark OAuth Helper
|
||||
|
||||
This method should only be used if you cannot use the [CM OAuth Helper method](#cm-oauth-helper-recommended) because you cannot access the CM web interface.
|
||||
|
||||
* Visit [https://not-an-aardvark.github.io/reddit-oauth-helper/](https://not-an-aardvark.github.io/reddit-oauth-helper/) and follow the instructions given.
|
||||
* **Note:** You will need to update your **redirect uri.**
|
||||
* Input your **Client ID** and **Client Secret** in the text boxes with those names.
|
||||
* Choose scopes. **It is very important you check everything on this list or CM may not work correctly**
|
||||
* edit
|
||||
* flair
|
||||
* history
|
||||
* identity
|
||||
* modcontributors
|
||||
* modflair
|
||||
* modposts
|
||||
* modself
|
||||
* mysubreddits
|
||||
* read
|
||||
* report
|
||||
* submit
|
||||
* wikiread
|
||||
* wikiedit (if you are using Toolbox User Notes)
|
||||
* Click **Generate tokens**, you will get a popup asking you to approve access (or login) -- **the account you approve access with is the account that Bot will control.**
|
||||
* After approving an **Access Token** and **Refresh Token** will be shown at the bottom of the page. Save these to use with CM.
|
||||
|
||||
# Provide Credentials to CM
|
||||
|
||||
At the end of the last step you chose you should now have this information saved somewhere:
|
||||
|
||||
* clientId
|
||||
* clientSecret
|
||||
* refreshToken
|
||||
* accessToken
|
||||
* redirectUri
|
||||
|
||||
This is all the information you need to run your bot with CM.
|
||||
|
||||
Using these credentials follow the [operator config guide](/docs/operatorConfiguration.md) to finish setting up your CM instance.
|
||||
27
docs/examples/README.md
Normal file
27
docs/examples/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Examples
|
||||
|
||||
This directory contains example of valid, ready-to-go configurations for Context Mod for the purpose of:
|
||||
|
||||
* showcasing what the bot can do
|
||||
* providing best practices for writing your configuration
|
||||
* providing generally useful configurations **that can be used immediately** or as a jumping-off point for your configuration
|
||||
|
||||
|
||||
|
||||
### Examples Overview
|
||||
|
||||
* Rules
|
||||
* [Attribution](/docs/examples/attribution)
|
||||
* [Recent Activity](/docs/examples/recentActivity)
|
||||
* [Repeat Activity](/docs/examples/repeatActivity)
|
||||
* [History](/docs/examples/history)
|
||||
* [Author](/docs/examples/author)
|
||||
* [Regex](/docs/examples/regex)
|
||||
* [Toolbox User Notes](/docs/examples/userNotes)
|
||||
* [Advanced Concepts](/docs/examples/advancedConcepts)
|
||||
* [Rule Sets](/docs/examples/advancedConcepts/ruleSets.json5)
|
||||
* [Name Rules](/docs/examples/advancedConcepts/ruleNameReuse.json5)
|
||||
* [Check Ordering](/docs/examples/advancedConcepts)
|
||||
* [Subreddit-ready examples](/docs/examples/subredditReady)
|
||||
|
||||
PROTIP: You can edit/build on examples by using the [schema editor.](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json)
|
||||
56
docs/examples/advancedConcepts/README.md
Normal file
56
docs/examples/advancedConcepts/README.md
Normal file
@@ -0,0 +1,56 @@
|
||||
### Named Rules
|
||||
|
||||
See [ruleNameReuse.json5](/docs/examples/advancedConcepts/ruleNameReuse.json5)
|
||||
|
||||
### Check Order
|
||||
|
||||
Checks are run in the order they appear in your configuration, therefore you should place your highest requirement/severe action checks at the top and lowest requirement/moderate actions at the bottom.
|
||||
|
||||
This is so that if an Activity warrants a more serious reaction that Check is triggered first rather than having a lower requirement check with less severe actions triggered and causing all subsequent Checks to be skipped.
|
||||
|
||||
* Attribution >50% AND Repeat Activity 8x AND Recent Activity in 2 subs => remove submission + ban
|
||||
* Attribution >20% AND Repeat Activity 4x AND Recent Activity in 5 subs => remove submission + flair user restricted
|
||||
* Attribution >20% AND Repeat Activity 2x => remove submission
|
||||
* Attribution >20% AND History comments <30% => remove submission
|
||||
* Attribution >15% => report
|
||||
* Repeat Activity 2x => report
|
||||
* Recent Activity in 3 subs => report
|
||||
* Author not vetted => flair new user submission
|
||||
|
||||
### Rule Sets
|
||||
|
||||
The `rules` array on a `Checks` can contain both `Rule` objects and `RuleSet` objects.
|
||||
|
||||
A **Rule Set** is a "nested" set of `Rule` objects with a passing condition specified. These allow you to create more complex trigger behavior by combining multiple rules.
|
||||
|
||||
See **[ruleSets.json5](/docs/examples/advancedConcepts/ruleSets.json5)** for a complete example as well as consulting the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRuleSetJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json).
|
||||
|
||||
### Rule Order
|
||||
|
||||
The ordering of your Rules within a Check/RuleSet can have an impact on Check performance (speed) as well as API usage.
|
||||
|
||||
Consider these three rules:
|
||||
|
||||
* Rule A -- Recent Activity => 3 subreddits => last 15 submissions
|
||||
* Rule B -- Repeat Activity => last 3 days
|
||||
* Rule C -- Attribution => >10% => last 90 days or 300 submissions
|
||||
|
||||
The first two rules are lightweight in their requirements -- Rule A can be completed in 1 API call, Rule B potentially completed in 1 Api call.
|
||||
|
||||
However, depending on how active the Author is, Rule C will take *at least* 3 API calls just to get all activities (Reddit limit 100 items per call).
|
||||
|
||||
If the Check is using `AND` condition for its rules (default) then if either Rule A or Rule B fail then Rule C will never run. This means 3 API calls never made plus the time waiting for each to return.
|
||||
|
||||
**It is therefore advantageous to list your lightweight Rules first in each Check.**
|
||||
|
||||
### API Caching
|
||||
|
||||
Context Mod implements some basic caching functionality for **Author Activities** and wiki pages (on Comment/Report Actions).
|
||||
|
||||
**Author Activities** are cached for a subreddit-configurable amount of time (10 seconds by default). A cached activities set can be re-used if the **window on a Rule is identical to the window on another Rule**.
|
||||
|
||||
This means that when possible you should re-use window values.
|
||||
|
||||
IE If you want to check an Author's Activities for a time range try to always use **7 Days** or always use **50 Items** for absolute counts.
|
||||
|
||||
Re-use will result in less API calls and faster Check times.
|
||||
75
docs/examples/advancedConcepts/ruleNameReuse.json5
Normal file
75
docs/examples/advancedConcepts/ruleNameReuse.json5
Normal file
@@ -0,0 +1,75 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Auto Remove SP Karma",
|
||||
"description": "Remove submission because author has self-promo >10% and posted in karma subs recently",
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
// named rules can be referenced at any point in the configuration (where they occur does not matter)
|
||||
// and can be used in any Check
|
||||
// Note: rules do not transfer between subreddit configurations
|
||||
"freekarmasub",
|
||||
{
|
||||
"name": "attr10all",
|
||||
"kind": "attribution",
|
||||
"criteria": [
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": "90 days"
|
||||
},
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": 100
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
},
|
||||
{
|
||||
"kind": "comment",
|
||||
"content": "Your submission was removed because you are over reddit's threshold for self-promotion and recently posted this content in a karma sub"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Free Karma On Submission Alert",
|
||||
"description": "Check if author has posted this submission in 'freekarma' subreddits",
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
// rules can be re-used throughout a configuration by referencing them by name
|
||||
//
|
||||
// The rule name itself can only contain spaces, hyphens and underscores
|
||||
// The value used to reference it will have all of these removed, and lower-cased
|
||||
//
|
||||
// so to reference this rule use the value 'freekarmasub'
|
||||
"name": "Free_Karma-SUB",
|
||||
"kind": "recentActivity",
|
||||
"lookAt": "submissions",
|
||||
"useSubmissionAsReference":true,
|
||||
"thresholds": [
|
||||
{
|
||||
"threshold": ">= 1",
|
||||
"subreddits": [
|
||||
"DeFreeKarma",
|
||||
"FreeKarma4U",
|
||||
"FreeKarma4You",
|
||||
"upvote"
|
||||
]
|
||||
}
|
||||
],
|
||||
"window": "7 days"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Submission posted {{rules.freekarmasub.totalCount}} times in karma {{rules.freekarmasub.subCount}} subs over {{rules.freekarmasub.window}}: {{rules.freekarmasub.subSummary}}"
|
||||
}
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
||||
84
docs/examples/advancedConcepts/ruleSets.json5
Normal file
84
docs/examples/advancedConcepts/ruleSets.json5
Normal file
@@ -0,0 +1,84 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Self Promo All or low comment",
|
||||
"description": "SP >10% of all activities or >10% of submissions with low comment engagement",
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
// this attribution rule is looking at all activities
|
||||
//
|
||||
// we want want this one rule to trigger the check because >10% of all activity (submission AND comments) is a good requirement
|
||||
"name": "attr10all",
|
||||
"kind": "attribution",
|
||||
"criteria": [
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": "90 days"
|
||||
},
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": 100
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
// this is a **Rule Set**
|
||||
//
|
||||
// it is made up of "nested" rules with a pass condition (AND/OR)
|
||||
// if the nested rules pass the condition then the Rule Set triggers the Check
|
||||
//
|
||||
// AND = all nested rules must be triggered to make the Rule Set trigger
|
||||
// AND = any of the nested Rules will be the Rule Set trigger
|
||||
"condition": "AND",
|
||||
// in this check we use an Attribution >10% on ONLY submissions, which is a lower requirement then the above attribution rule
|
||||
// and combine it with a History rule looking for low comment engagement
|
||||
// to make a "higher" requirement Rule Set our of two low requirement Rules
|
||||
"rules": [
|
||||
{
|
||||
"name": "attr20sub",
|
||||
"kind": "attribution",
|
||||
"criteria": [
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"thresholdOn": "submissions",
|
||||
"window": "90 days"
|
||||
},
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"thresholdOn": "submissions",
|
||||
"window": 100
|
||||
}
|
||||
],
|
||||
"lookAt": "media"
|
||||
},
|
||||
{
|
||||
"name": "lowOrOpComm",
|
||||
"kind": "history",
|
||||
"criteriaJoin": "OR",
|
||||
"criteria": [
|
||||
{
|
||||
"window": "90 days",
|
||||
"comment": "< 50%"
|
||||
},
|
||||
{
|
||||
"window": "90 days",
|
||||
"comment": "> 40% OP"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
},
|
||||
{
|
||||
"kind": "comment",
|
||||
"content": "Your submission was removed because you are over reddit's threshold for self-promotion or exhibit low comment engagement"
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
}
|
||||
14
docs/examples/attribution/README.md
Normal file
14
docs/examples/attribution/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Attribution
|
||||
|
||||
The **Attribution** rule will aggregate an Author's content Attribution (youtube channels, twitter, website domains, etc.) and can check on their totals or percentages of all Activities over a time period:
|
||||
* Total # of attributions
|
||||
* As percentage of all Activity or only Submissions
|
||||
* Look at all domains or only media (youtube, vimeo, etc.)
|
||||
* Include self posts (by reddit domain) or not
|
||||
|
||||
Consult the [schema](https://json-schema.app/view/%23/%23%2Fdefinitions%2FCheckJson/%23%2Fdefinitions%2FAttributionJSONConfig?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for a complete reference of the rule's properties.
|
||||
|
||||
### Examples
|
||||
|
||||
* [Self Promotion as percentage of all Activities](/docs/examples/attribution/redditSelfPromoAll.json5) - Check if Author is submitting much more than they comment.
|
||||
* [Self Promotion as percentage of Submissions](/docs/examplesm/attribution/redditSelfPromoSubmissionsOnly.json5) - Check if any of Author's aggregated submission origins are >10% of their submissions
|
||||
39
docs/examples/attribution/redditSelfPromoAll.json5
Normal file
39
docs/examples/attribution/redditSelfPromoAll.json5
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Self Promo Activities",
|
||||
"description": "Check if any of Author's aggregated submission origins are >10% of entire history",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "attr10all",
|
||||
"kind": "attribution",
|
||||
// criteria defaults to OR -- so either of these criteria will trigger the rule
|
||||
"criteria": [
|
||||
{
|
||||
// threshold can be a percent or an absolute number
|
||||
"threshold": "> 10%",
|
||||
// The default is "all" -- calculate percentage of entire history (submissions & comments)
|
||||
// "thresholdOn": "all",
|
||||
|
||||
// look at last 90 days of Author's activities (comments and submissions)
|
||||
"window": "90 days"
|
||||
},
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
// look at Author's last 100 activities (comments and submissions)
|
||||
"window": 100
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "{{rules.attr10all.largestPercent}}% of {{rules.attr10all.activityTotal}} items over {{rules.attr10all.window}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Self Promo Submissions",
|
||||
"description": "Check if any of Author's aggregated submission origins are >10% of their submissions",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "attr10sub",
|
||||
"kind": "attribution",
|
||||
// criteria defaults to OR -- so either of these criteria will trigger the rule
|
||||
"criteria": [
|
||||
{
|
||||
// threshold can be a percent or an absolute number
|
||||
"threshold": "> 10%",
|
||||
// calculate percentage of submissions, rather than entire history (submissions & comments)
|
||||
"thresholdOn": "submissions",
|
||||
|
||||
// look at last 90 days of Author's activities (comments and submissions)
|
||||
"window": "90 days"
|
||||
},
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"thresholdOn": "submissions",
|
||||
// look at Author's last 100 activities (comments and submissions)
|
||||
"window": 100
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "{{rules.attr10sub.largestPercent}}% of {{rules.attr10sub.activityTotal}} items over {{rules.attr10sub.window}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
38
docs/examples/author/README.md
Normal file
38
docs/examples/author/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Author
|
||||
|
||||
## Rule
|
||||
|
||||
The **Author** rule triggers if any [AuthorCriteria](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) from a list are either **included** or **excluded**, depending on which property you put them in.
|
||||
|
||||
**AuthorCriteria** that can be checked:
|
||||
* name (u/userName)
|
||||
* author's subreddit flair text
|
||||
* author's subreddit flair css
|
||||
* author's subreddit mod status
|
||||
* [Toolbox User Notes](/docs/examples/userNotes)
|
||||
|
||||
The Author **Rule** is best used in conjunction with other Rules to short-circuit a Check based on who the Author is. It is easier to use a Rule to do this then to write **author filters** for every Rule (and makes Rules more re-useable).
|
||||
|
||||
Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorRuleJSONConfig?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for a complete reference of the rule's properties.
|
||||
|
||||
### Examples
|
||||
|
||||
* Basic examples
|
||||
* [Flair new user Submission](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does not have the `vet` flair then flair the Submission with `New User`
|
||||
* [Flair vetted user Submission](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does have the `vet` flair then flair the Submission with `Vetted`
|
||||
* Used with other Rules
|
||||
* [Ignore vetted user](/docs/examples/author/flairNewUserSubmission.json5) - Short-circuit the Check if the Author has the `vet` flair
|
||||
|
||||
## Filter
|
||||
|
||||
All **Rules** and **Checks** have an optional `authorIs` property that takes an [AuthorOptions](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorOptions?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) object.
|
||||
|
||||
**This property works the same as the Author Rule except that:**
|
||||
* On **Rules** if all criteria fail the Rule is **skipped.**
|
||||
* If a Rule is skipped **it does not fail or pass** and so does not affect the outcome of the Check.
|
||||
* However, if all Rules on a Check are skipped the Check will fail.
|
||||
* On **Checks** if all criteria fail the Check **fails**.
|
||||
|
||||
### Examples
|
||||
|
||||
* [Skip recent activity check based on author](/docs/examples/author/authorFilter.json5) - Skip a Recent Activity check for a set of subreddits if the Author of the Submission has any set of flairs.
|
||||
69
docs/examples/author/authorFilter.json5
Normal file
69
docs/examples/author/authorFilter.json5
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Karma/Meme Sub Activity",
|
||||
"description": "Report on karma sub activity or meme sub activity if user isn't a memelord",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "freekarma",
|
||||
"kind": "recentActivity",
|
||||
"lookAt": "submissions",
|
||||
"thresholds": [
|
||||
{
|
||||
"threshold": ">= 1",
|
||||
"subreddits": [
|
||||
"DeFreeKarma",
|
||||
"FreeKarma4U",
|
||||
]
|
||||
}
|
||||
],
|
||||
"window": "7 days"
|
||||
},
|
||||
{
|
||||
"name": "noobmemer",
|
||||
"kind": "recentActivity",
|
||||
// authors filter will be checked before a rule is run. If anything passes then the Rule is skipped -- it is not failed or triggered.
|
||||
// if *all* Rules for a Check are skipped due to authors filter then the Check will fail
|
||||
"authorIs": {
|
||||
// each property (include/exclude) can contain multiple AuthorCriteria
|
||||
// if any AuthorCriteria passes its test the Rule is skipped
|
||||
//
|
||||
// for an AuthorCriteria to pass all properties present on it must pass
|
||||
//
|
||||
// if "include" is present it will always run and exclude will be skipped
|
||||
// "include:" []
|
||||
"exclude": [
|
||||
// for this to pass the Author of the Submission must not have the flair "Supreme Memer" and have the name "user1" or "user2"
|
||||
{
|
||||
"flairText": ["Supreme Memer"],
|
||||
"names": ["user1","user2"]
|
||||
},
|
||||
{
|
||||
// for this to pass the Author of the Submission must not have the flair "Decent Memer"
|
||||
"flairText": ["Decent Memer"]
|
||||
}
|
||||
]
|
||||
},
|
||||
"lookAt": "submissions",
|
||||
"thresholds": [
|
||||
{
|
||||
"threshold": ">= 1",
|
||||
"subreddits": [
|
||||
"dankmemes",
|
||||
]
|
||||
}
|
||||
],
|
||||
"window": "7 days"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Author has posted in free karma sub, or in /r/dankmemes and does not have meme flair in this subreddit"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
29
docs/examples/author/flairNewUserSubmission.json5
Normal file
29
docs/examples/author/flairNewUserSubmission.json5
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Flair New User Sub",
|
||||
"description": "Flair submission as sketchy if user does not have vet flair",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "newflair",
|
||||
"kind": "author",
|
||||
// rule will trigger if Author does not have "vet" flair text
|
||||
"exclude": [
|
||||
{
|
||||
"flairText": ["vet"]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "flair",
|
||||
"text": "New User",
|
||||
"css": "orange"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
29
docs/examples/author/flairVettedUserSubmission.json5
Normal file
29
docs/examples/author/flairVettedUserSubmission.json5
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Flair Vetted User Submission",
|
||||
"description": "Flair submission as Approved if user has vet flair",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "newflair",
|
||||
"kind": "author",
|
||||
// rule will trigger if Author has "vet" flair text
|
||||
"include": [
|
||||
{
|
||||
"flairText": ["vet"]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "flair",
|
||||
"text": "Vetted",
|
||||
"css": "green"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
75
docs/examples/author/ignoreVettedUser.json5
Normal file
75
docs/examples/author/ignoreVettedUser.json5
Normal file
@@ -0,0 +1,75 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "non-vetted karma/meme activity",
|
||||
"description": "Report if Author has SP and has recent karma/meme sub activity and isn't vetted",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
// The Author Rule is best used in conjunction with other Rules --
|
||||
// instead of having to write an AuthorFilter for every Rule where you want to skip it based on Author criteria
|
||||
// you can write one Author Rule and make it fail on the required criteria
|
||||
// so that the check fails and Actions don't run
|
||||
"name": "nonvet",
|
||||
"kind": "author",
|
||||
"exclude": [
|
||||
{
|
||||
"flairText": ["vet"]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "attr10",
|
||||
"kind": "attribution",
|
||||
"criteria": [
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": "90 days"
|
||||
},
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": 100
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "freekarma",
|
||||
"kind": "recentActivity",
|
||||
"lookAt": "submissions",
|
||||
"thresholds": [
|
||||
{
|
||||
"threshold": ">= 1",
|
||||
"subreddits": [
|
||||
"DeFreeKarma",
|
||||
"FreeKarma4U",
|
||||
]
|
||||
}
|
||||
],
|
||||
"window": "7 days"
|
||||
},
|
||||
{
|
||||
"name": "memes",
|
||||
"kind": "recentActivity",
|
||||
"lookAt": "submissions",
|
||||
"thresholds": [
|
||||
{
|
||||
"threshold": ">= 3",
|
||||
"subreddits": [
|
||||
"dankmemes",
|
||||
]
|
||||
}
|
||||
],
|
||||
"window": "7 days"
|
||||
}
|
||||
],
|
||||
// will NOT run if the Author for this Submission has the flair "vet"
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Author has posted in free karma or meme subs recently"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
13
docs/examples/history/README.md
Normal file
13
docs/examples/history/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# History
|
||||
|
||||
The **History** rule can check an Author's submission/comment statistics over a time period:
|
||||
* Submission total or percentage of All Activity
|
||||
* Comment total or percentage of all Activity
|
||||
* Comments made as OP (commented in their own Submission) total or percentage of all Comments
|
||||
|
||||
Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FHistoryJSONConfig?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for a complete reference of the rule's properties.
|
||||
|
||||
### Examples
|
||||
|
||||
* [Low Comment Engagement](/docs/examples/history/lowEngagement.json5) - Check if Author is submitting much more than they comment.
|
||||
* [OP Comment Engagement](/docs/examples/history/opOnlyEngagement.json5) - Check if Author is mostly engaging only in their own content
|
||||
30
docs/examples/history/lowEngagement.json5
Normal file
30
docs/examples/history/lowEngagement.json5
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Low Comment Engagement",
|
||||
"description": "Check if Author is submitting much more than they comment",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "lowComm",
|
||||
"kind": "history",
|
||||
"criteria": [
|
||||
{
|
||||
// look at last 90 days of Author's activities
|
||||
"window": "90 days",
|
||||
// trigger if less than 30% of their activities in this time period are comments
|
||||
"comment": "< 30%"
|
||||
},
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Low engagement: comments were {{rules.lowcomm.commentPercent}} of {{rules.lowcomm.activityTotal}} over {{rules.lowcomm.window}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
30
docs/examples/history/opOnlyEngagement.json5
Normal file
30
docs/examples/history/opOnlyEngagement.json5
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Engaging Own Content Only",
|
||||
"description": "Check if Author is mostly engaging in their own content only",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "opOnly",
|
||||
"kind": "history",
|
||||
"criteria": [
|
||||
{
|
||||
// look at last 90 days of Author's activities
|
||||
"window": "90 days",
|
||||
// trigger if more than 60% of their activities in this time period are comments as OP
|
||||
"comment": "> 60% OP"
|
||||
},
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Selfish OP: {{rules.oponly.opPercent}} of {{rules.oponly.commentTotal}} comments over {{rules.oponly.window}} are as OP"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
10
docs/examples/recentActivity/README.md
Normal file
10
docs/examples/recentActivity/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# Recent Activity
|
||||
|
||||
The **Recent Activity** rule can check if an Author has made any Submissions/Comments in a list of defined Subreddits.
|
||||
|
||||
Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRecentActivityRuleJSONConfig?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for a complete reference of the rule's properties.
|
||||
|
||||
### Examples
|
||||
|
||||
* [Free Karma Subreddits](/docs/examples/recentActivity/freeKarma.json5) - Check if the Author has recently posted in any "free karma" subreddits
|
||||
* [Submission in Free Karma Subreddits](/docs/examples/recentActivity/freeKarmaOnSubmission.json5) - Check if the Author has posted the Submission this check is running on in any "free karma" subreddits recently
|
||||
40
docs/examples/recentActivity/freeKarma.json5
Normal file
40
docs/examples/recentActivity/freeKarma.json5
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Free Karma Alert",
|
||||
"description": "Check if author has posted in 'freekarma' subreddits",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "freekarma",
|
||||
"kind": "recentActivity",
|
||||
"useSubmissionAsReference": false,
|
||||
// when `lookAt` is not present this rule will look for submissions and comments
|
||||
// lookAt: "submissions"
|
||||
// lookAt: "comments"
|
||||
"thresholds": [
|
||||
{
|
||||
// for all subreddits, if the number of activities (sub/comment) is equal to or greater than 1 then the rule is triggered
|
||||
"threshold": ">= 1",
|
||||
"subreddits": [
|
||||
"DeFreeKarma",
|
||||
"FreeKarma4U",
|
||||
"FreeKarma4You",
|
||||
"upvote"
|
||||
]
|
||||
}
|
||||
],
|
||||
// will look at all of the Author's activities in the last 7 days
|
||||
"window": "7 days"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "{{rules.freekarma.totalCount}} activities in karma {{rules.freekarma.subCount}} subs over {{rules.freekarma.window}}: {{rules.freekarma.subSummary}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
41
docs/examples/recentActivity/freeKarmaOnSubmission.json5
Normal file
41
docs/examples/recentActivity/freeKarmaOnSubmission.json5
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Free Karma On Submission Alert",
|
||||
"description": "Check if author has posted this submission in 'freekarma' subreddits",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "freekarmasub",
|
||||
"kind": "recentActivity",
|
||||
// rule will only look at Author's submissions in these subreddits
|
||||
"lookAt": "submissions",
|
||||
// rule will only look at Author's submissions in these subreddits that have the same content (link) as the submission this event was made on
|
||||
// In simpler terms -- rule will only check to see if the same link the author just posted is also posted in these subreddits
|
||||
"useSubmissionAsReference":true,
|
||||
"thresholds": [
|
||||
{
|
||||
// for all subreddits, if the number of activities (sub/comment) is equal to or greater than 1 then the rule is triggered
|
||||
"threshold": ">= 1",
|
||||
"subreddits": [
|
||||
"DeFreeKarma",
|
||||
"FreeKarma4U",
|
||||
"FreeKarma4You",
|
||||
"upvote"
|
||||
]
|
||||
}
|
||||
],
|
||||
// look at all of the Author's submissions in the last 7 days
|
||||
"window": "7 days"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Submission posted {{rules.freekarmasub.totalCount}} times in karma {{rules.freekarmasub.subCount}} subs over {{rules.freekarmasub.window}}: {{rules.freekarmasub.subSummary}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
20
docs/examples/regex/README.md
Normal file
20
docs/examples/regex/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
The **Regex** rule matches on text content from a comment or submission in the same way automod uses regex. The rule, however, provides additional functionality automod does not:
|
||||
|
||||
* Can set the **number** of matches that trigger the rule (`matchThreshold`)
|
||||
|
||||
Which can then be used in conjunction with a [`window`](https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md) to match against activities from the history of the Author of the Activity being checked (including the Activity being checked):
|
||||
|
||||
* Can set the **number of Activities** that meet the `matchThreshold` to trigger the rule (`activityMatchThreshold`)
|
||||
* Can set the **number of total matches** across all Activities to trigger the rule (`totalMatchThreshold`)
|
||||
* Can set the **type of Activities** to check (`lookAt`)
|
||||
* When an Activity is a Submission can **specify which parts of the Submission to match against** IE title, body, and/or url (`testOn`)
|
||||
|
||||
### Examples
|
||||
|
||||
* [Trigger if regex matches against the current activity](/docs/examples/regex/matchAnyCurrentActivity.json5)
|
||||
* [Trigger if regex matches 5 times against the current activity](/docs/examples/regex/matchThresholdCurrentActivity.json5)
|
||||
* [Trigger if regex matches against any part of a Submission](/docs/examples/regex/matchSubmissionParts.json5)
|
||||
* [Trigger if regex matches any of Author's last 10 activities](/docs/examples/regex/matchHistoryActivity.json5)
|
||||
* [Trigger if regex matches at least 3 of Author's last 10 activities](/docs/examples/regex/matchActivityThresholdHistory.json5)
|
||||
* [Trigger if there are 5 regex matches in the Author's last 10 activities](/docs/examples/regex/matchTotalHistoryActivity.json5)
|
||||
* [Trigger if there are 5 regex matches in the Author's last 10 comments](/docs/examples/regex/matchSubsetHistoryActivity.json5)
|
||||
20
docs/examples/regex/matchActivityThresholdHistory.json5
Normal file
20
docs/examples/regex/matchActivityThresholdHistory.json5
Normal file
@@ -0,0 +1,20 @@
|
||||
// goes inside
|
||||
// "rules": []
|
||||
{
|
||||
"name": "swear",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
// triggers if more than 3 activities in the last 10 match the regex
|
||||
{
|
||||
"regex": "/fuck|shit|damn/",
|
||||
// this differs from "totalMatchThreshold"
|
||||
//
|
||||
// activityMatchThreshold => # of activities from window must match regex
|
||||
// totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
"activityMatchThreshold": "> 3",
|
||||
// if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
// learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
"window": 10,
|
||||
},
|
||||
]
|
||||
}
|
||||
14
docs/examples/regex/matchAnyCurrentActivity.json5
Normal file
14
docs/examples/regex/matchAnyCurrentActivity.json5
Normal file
@@ -0,0 +1,14 @@
|
||||
// goes inside
|
||||
// "rules": []
|
||||
{
|
||||
"name": "swear",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
// triggers if current activity has more than 0 matches
|
||||
{
|
||||
"regex": "/fuck|shit|damn/",
|
||||
// if "matchThreshold" is not specified it defaults to this -- default behavior is to trigger if there are any matches
|
||||
// "matchThreshold": "> 0"
|
||||
},
|
||||
]
|
||||
}
|
||||
15
docs/examples/regex/matchHistoryActivity.json5
Normal file
15
docs/examples/regex/matchHistoryActivity.json5
Normal file
@@ -0,0 +1,15 @@
|
||||
// goes inside
|
||||
// "rules": []
|
||||
{
|
||||
"name": "swear",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
// triggers if any activity in the last 10 (including current activity) match the regex
|
||||
{
|
||||
"regex": "/fuck|shit|damn/",
|
||||
// if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
// learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
"window": 10,
|
||||
},
|
||||
]
|
||||
}
|
||||
19
docs/examples/regex/matchSubmissionParts.json5
Normal file
19
docs/examples/regex/matchSubmissionParts.json5
Normal file
@@ -0,0 +1,19 @@
|
||||
// goes inside
|
||||
// "rules": []
|
||||
{
|
||||
"name": "swear",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
// triggers if the current activity has more than 0 matches
|
||||
// if the activity is a submission then matches against title, body, and url
|
||||
// if "testOn" is not provided then `title, body` are the defaults
|
||||
"regex": "/fuck|shit|damn/",
|
||||
"testOn": [
|
||||
"title",
|
||||
"body",
|
||||
"url"
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
||||
23
docs/examples/regex/matchSubsetHistoryActivity.json5
Normal file
23
docs/examples/regex/matchSubsetHistoryActivity.json5
Normal file
@@ -0,0 +1,23 @@
|
||||
// goes inside
|
||||
// "rules": []
|
||||
{
|
||||
"name": "swear",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
// triggers if there are more than 5 regex matches in the last 10 activities (comments only)
|
||||
{
|
||||
"regex": "/fuck|shit|damn/",
|
||||
// this differs from "activityMatchThreshold"
|
||||
//
|
||||
// activityMatchThreshold => # of activities from window must match regex
|
||||
// totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
"totalMatchThreshold": "> 5",
|
||||
// if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
// learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
"window": 10,
|
||||
// determines which activities from window to consider
|
||||
//defaults to "all" (submissions and comments)
|
||||
"lookAt": "comments",
|
||||
},
|
||||
]
|
||||
}
|
||||
13
docs/examples/regex/matchThresholdCurrentActivity.json5
Normal file
13
docs/examples/regex/matchThresholdCurrentActivity.json5
Normal file
@@ -0,0 +1,13 @@
|
||||
// goes inside
|
||||
// "rules": []
|
||||
{
|
||||
"name": "swear",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"regex": "/fuck|shit|damn/",
|
||||
// triggers if current activity has greater than 5 matches
|
||||
"matchThreshold": "> 5"
|
||||
},
|
||||
]
|
||||
}
|
||||
21
docs/examples/regex/matchTotalHistoryActivity.json5
Normal file
21
docs/examples/regex/matchTotalHistoryActivity.json5
Normal file
@@ -0,0 +1,21 @@
|
||||
// goes inside
|
||||
// "rules": []
|
||||
{
|
||||
"name": "swear",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
// triggers if there are more than 5 regex matches in the last 10 activities (comments or submission)
|
||||
{
|
||||
// triggers if there are more than 5 *total matches* across the last 10 activities
|
||||
"regex": "/fuck|shit|damn/",
|
||||
// this differs from "activityMatchThreshold"
|
||||
//
|
||||
// activityMatchThreshold => # of activities from window must match regex
|
||||
// totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
"totalMatchThreshold": "> 5",
|
||||
// if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
// learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
"window": 10,
|
||||
},
|
||||
]
|
||||
}
|
||||
49
docs/examples/repeatActivity/README.md
Normal file
49
docs/examples/repeatActivity/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Repeat Activity
|
||||
|
||||
The **Repeat Activity** rule will check for patterns of repetition in an Author's Submission/Comment history. Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRepeatActivityJSONConfig?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for a complete reference of the rule's properties.
|
||||
|
||||
## Tuning
|
||||
|
||||
The most critical properties for this Rule are **gapAllowance** and **lookAt**.
|
||||
|
||||
### `lookAt`
|
||||
|
||||
Determines which Activities from a User's history are checked when looking for repeats.
|
||||
|
||||
Can be either:
|
||||
|
||||
* `all` -- All of a user's submissions and comments are considered
|
||||
* `submissions` -- Only a user's submissions are considered
|
||||
|
||||
Defaults to `all`
|
||||
|
||||
### `gapAllowance`
|
||||
|
||||
`gapAllowance` determines how many **non-repeat Activities** are "allowed" between "in a row" submissions. `N` number of non-repeat activities will be thrown away during the count which allows checking for patterns with a bit of "fuzziness".
|
||||
|
||||
By default `gapAllowance: 0` so all repeats must be truly consecutive.
|
||||
___
|
||||
Consider the following example in a user's history:
|
||||
|
||||
* crossposts 2 times
|
||||
* 1 comment
|
||||
* crossposts 2 times
|
||||
* 2 comments
|
||||
* crossposts 4 times
|
||||
|
||||
Your goal is to remove a submission if it has been crossposted **5 times.**
|
||||
|
||||
With defaults for lookAt and gapAllowance this rule **would not be triggered** because no set of consecutive submissions was repeated 5 times.
|
||||
|
||||
With only `lookAt: "submissions"` this rule **would trigger** because all the comments would be ignored resulting in 8 repeats.
|
||||
|
||||
With only `gapAllowance: 1` this rule **would not trigger** because the 2 comment non-repeat would break the "in a row" count.
|
||||
|
||||
With only `gapAllowance: 2` this rule **would trigger** because the the 1 and 2 comment non-repeats would be thrown out resulting in 8 repeats.
|
||||
|
||||
**Note:** `lookAt: "submissions"` should be used with caution because all comments are thrown away. This isn't indicative of real repeat behavior if the user is a heavy commenter. For this reason the default is `all`.
|
||||
|
||||
## Examples
|
||||
|
||||
* [Crosspost Spamming](/docs/examples/repeatActivity/crosspostSpamming.json5) - Check if an Author is spamming their Submissions across multiple subreddits
|
||||
* [Burst-posting](/docs/examples/repeatActivity/burstPosting.json5) - Check if Author is crossposting their Submissions in short bursts
|
||||
30
docs/examples/repeatActivity/burstPosting.json5
Normal file
30
docs/examples/repeatActivity/burstPosting.json5
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Burstpost Spam",
|
||||
"description": "Check if Author is crossposting in short bursts",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "burstpost",
|
||||
"kind": "repeatActivity",
|
||||
// will only look at Submissions in Author's history that contain the same content (link) as the Submission this check was initiated by
|
||||
"useSubmissionAsReference": true,
|
||||
// the number of non-repeat activities (submissions or comments) to ignore between repeat submissions
|
||||
"gapAllowance": 3,
|
||||
// if the Author has posted this Submission 6 times, ignoring 3 non-repeat activities between each repeat, then this rule will trigger
|
||||
"threshold": ">= 6",
|
||||
// look at all of the Author's submissions in the last 7 days
|
||||
"window": "7 days"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Author has burst-posted this link {{rules.burstpost.largestRepeat}} times over {{rules.burstpost.window}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
28
docs/examples/repeatActivity/crosspostSpamming.json5
Normal file
28
docs/examples/repeatActivity/crosspostSpamming.json5
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Crosspost Spam",
|
||||
"description": "Check if Author is spamming Submissions across subreddits",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "xpostspam",
|
||||
"kind": "repeatActivity",
|
||||
// will only look at Submissions in Author's history that contain the same content (link) as the Submission this check was initiated by
|
||||
"useSubmissionAsReference": true,
|
||||
// if the Author has posted this Submission 5 times consecutively then this rule will trigger
|
||||
"threshold": ">= 5",
|
||||
// look at all of the Author's submissions in the last 7 days
|
||||
"window": "7 days"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Author has posted this link {{rules.xpostspam.largestRepeat}} times over {{rules.xpostspam.window}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
41
docs/examples/subredditReady/README.md
Normal file
41
docs/examples/subredditReady/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
Provided here are **complete, ready-to-go configuration** that can copy-pasted straight into your configuration wiki page to get going with ContextMod immediately.
|
||||
|
||||
These configurations attempt to provide sensible, non-destructive, default behavior for some common scenarios and subreddit types.
|
||||
|
||||
In most cases these will perform decently out-of-the-box but they are not perfect. You should still monitor bot behavior to see how it performs and will most likely still need to tweak these configurations to get your desired behavior.
|
||||
|
||||
All actions for these configurations are non-destructive in that:
|
||||
|
||||
* All instances where an activity would be modified (remove/ban/approve) will have `dryRun: true` set to prevent the action from actually being performed
|
||||
* These instances will also have a `report` action detailing the action would have been performed
|
||||
|
||||
**You will have to remove the `report` action and `dryRun` settings yourself.** This is to ensure that you understand the behavior the bot will be performing. If you are unsure of this you should leave them in place until you are certain the behavior the bot is performing is acceptable.
|
||||
|
||||
## Submission-based Behavior
|
||||
|
||||
### [Remove submissions from users who have used 'freekarma' subs to bypass karma checks](/docs/examples/subredditReady/freekarma.json5)
|
||||
|
||||
If the user has any activity (comment/submission) in known freekarma subreddits in the past (50 activities or 6 months) then remove the submission.
|
||||
|
||||
### [Remove submissions from users who have crossposted the same submission 4 or more times](/docs/examples/subredditReady/crosspostSpam.json5)
|
||||
|
||||
If the user has crossposted the same submission in the past (50 activities or 6 months) 4 or more times in a row then remove the submission.
|
||||
|
||||
### [Remove submissions from users who have crossposted or used 'freekarma' subs](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5)
|
||||
|
||||
Will remove submission if either of the above two behaviors is detected
|
||||
|
||||
### [Remove link submissions where the user's history is comprised of 10% or more of the same link](/docs/examples/subredditReady/selfPromo.json5)
|
||||
|
||||
If the link origin (youtube author, twitter author, etc. or regular domain for non-media links)
|
||||
|
||||
* comprises 10% or more of the users **entire** history in the past (100 activities or 6 months)
|
||||
* or comprises 10% or more of the users **submission** history in the past (100 activities or 6 months) and the user has low engagement (<50% of history is comments or 40%> of comment are as OP)
|
||||
|
||||
then remove the submission
|
||||
|
||||
## Comment-based behavior
|
||||
|
||||
### [Remove comment if the user has posted the same comment 4 or more times in a row](/docs/examples/subredditReady/commentSpam.json5)
|
||||
|
||||
If the user made the same comment (with some fuzzy matching) 4 or more times in a row in the past (50 activities or 6 months) then remove the comment.
|
||||
42
docs/examples/subredditReady/commentSpam.json5
Normal file
42
docs/examples/subredditReady/commentSpam.json5
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"polling": ["newComm"],
|
||||
"checks": [
|
||||
{
|
||||
//
|
||||
// Stop users who spam the same comment many times
|
||||
//
|
||||
// Remove a COMMENT if the user has crossposted it at least 4 times in recent history
|
||||
//
|
||||
"name": "low xp comment spam",
|
||||
"description": "X-posted comment >=4x",
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "xPostLow",
|
||||
"kind": "repeatActivity",
|
||||
"gapAllowance": 2,
|
||||
"threshold": ">= 4",
|
||||
"window": {
|
||||
"count": 50,
|
||||
"duration": "6 months"
|
||||
}
|
||||
},
|
||||
],
|
||||
"actions": [
|
||||
// remove this after confirming behavior is acceptable
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Remove=> Posted same comment {{rules.xpostlow.largestRepeat}}x times"
|
||||
},
|
||||
//
|
||||
//
|
||||
{
|
||||
"kind": "remove",
|
||||
// remove the line below after confirming behavior is acceptable
|
||||
"dryRun": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
77
docs/examples/subredditReady/crosspostSpam.json5
Normal file
77
docs/examples/subredditReady/crosspostSpam.json5
Normal file
@@ -0,0 +1,77 @@
|
||||
{
|
||||
"polling": ["unmoderated"],
|
||||
"checks": [
|
||||
{
|
||||
//
|
||||
// Stop users who post low-effort, crossposted spam
|
||||
//
|
||||
// Remove a SUBMISSION if the user has crossposted it at least 4 times in recent history AND
|
||||
// less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
//
|
||||
"name": "low xp spam and engagement",
|
||||
"description": "X-posted 4x and low comment engagement",
|
||||
"kind": "submission",
|
||||
"itemIs": [
|
||||
{
|
||||
"removed": false
|
||||
}
|
||||
],
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "xPostLow",
|
||||
"kind": "repeatActivity",
|
||||
"gapAllowance": 2,
|
||||
"threshold": ">= 4",
|
||||
"window": {
|
||||
"count": 50,
|
||||
"duration": "6 months"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "lowOrOpComm",
|
||||
"kind": "history",
|
||||
"criteriaJoin": "OR",
|
||||
"criteria": [
|
||||
{
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"comment": "< 50%"
|
||||
},
|
||||
{
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"comment": "> 40% OP"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
// remove this after confirming behavior is acceptable
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Remove=>{{rules.xpostlow.largestRepeat}} X-P => {{rules.loworopcomm.thresholdSummary}}"
|
||||
},
|
||||
//
|
||||
//
|
||||
{
|
||||
"kind": "remove",
|
||||
// remove the line below after confirming behavior is acceptable
|
||||
"dryRun": true
|
||||
},
|
||||
// optionally remove "dryRun" from below if you want to leave a comment on removal
|
||||
// PROTIP: the comment is bland, you should make it better
|
||||
{
|
||||
"kind": "comment",
|
||||
"content": "Your submission has been removed because you cross-posted it {{rules.xpostlow.largestRepeat}} times and you have very low engagement outside of making submissions",
|
||||
"distinguish": true,
|
||||
"dryRun": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
138
docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5
Normal file
138
docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5
Normal file
@@ -0,0 +1,138 @@
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
//
|
||||
// Stop users who post low-effort, crossposted spam
|
||||
//
|
||||
// Remove a SUBMISSION if the user has crossposted it at least 4 times in recent history AND
|
||||
// less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
//
|
||||
"name": "remove on low xp spam and engagement",
|
||||
"description": "X-posted 4x and low comment engagement",
|
||||
"kind": "submission",
|
||||
"itemIs": [
|
||||
{
|
||||
"removed": false
|
||||
}
|
||||
],
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "xPostLow",
|
||||
"kind": "repeatActivity",
|
||||
"gapAllowance": 2,
|
||||
"threshold": ">= 4",
|
||||
"window": {
|
||||
"count": 50,
|
||||
"duration": "6 months"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "lowOrOpComm",
|
||||
"kind": "history",
|
||||
"criteriaJoin": "OR",
|
||||
"criteria": [
|
||||
{
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"comment": "< 50%"
|
||||
},
|
||||
{
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"comment": "> 40% OP"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
// remove this after confirming behavior is acceptable
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Remove=>{{rules.xpostlow.largestRepeat}} X-P => {{rules.loworopcomm.thresholdSummary}}"
|
||||
},
|
||||
//
|
||||
//
|
||||
{
|
||||
"kind": "remove",
|
||||
// remove the line below after confirming behavior is acceptable
|
||||
"dryRun": true
|
||||
},
|
||||
// optionally remove "dryRun" from below if you want to leave a comment on removal
|
||||
// PROTIP: the comment is bland, you should make it better
|
||||
{
|
||||
"kind": "comment",
|
||||
"content": "Your submission has been removed because you cross-posted it {{rules.xpostlow.largestRepeat}} times and you have very low engagement outside of making submissions",
|
||||
"distinguish": true,
|
||||
"dryRun": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
//
|
||||
// Remove submissions from users who have recent activity in freekarma subs within the last 50 activities or 6 months (whichever is less)
|
||||
//
|
||||
"name": "freekarma removal",
|
||||
"description": "Remove submission if user has used freekarma sub recently",
|
||||
"kind": "submission",
|
||||
"itemIs": [
|
||||
{
|
||||
"removed": false
|
||||
}
|
||||
],
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "freekarma",
|
||||
"kind": "recentActivity",
|
||||
"window": {
|
||||
"count": 50,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"useSubmissionAsReference": false,
|
||||
"thresholds": [
|
||||
{
|
||||
"subreddits": [
|
||||
"FreeKarma4U",
|
||||
"FreeKarma4You",
|
||||
"KarmaStore",
|
||||
"promote",
|
||||
"shamelessplug",
|
||||
"upvote"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
// remove this after confirming behavior is acceptable
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Remove=> {{rules.newtube.totalCount}} activities in freekarma subs"
|
||||
},
|
||||
//
|
||||
//
|
||||
{
|
||||
"kind": "remove",
|
||||
// remove the line below after confirming behavior is acceptable
|
||||
"dryRun": true
|
||||
},
|
||||
// optionally remove "dryRun" from below if you want to leave a comment on removal
|
||||
// PROTIP: the comment is bland, you should make it better
|
||||
{
|
||||
"kind": "comment",
|
||||
"content": "Your submission has been removed because you have recent activity in 'freekarma' subs",
|
||||
"distinguish": true,
|
||||
"dryRun": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
64
docs/examples/subredditReady/freekarma.json5
Normal file
64
docs/examples/subredditReady/freekarma.json5
Normal file
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
//
|
||||
// Remove submissions from users who have recent activity in freekarma subs within the last 50 activities or 6 months (whichever is less)
|
||||
//
|
||||
"name": "freekarma removal",
|
||||
"description": "Remove submission if user has used freekarma sub recently",
|
||||
"kind": "submission",
|
||||
"itemIs": [
|
||||
{
|
||||
"removed": false
|
||||
}
|
||||
],
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "freekarma",
|
||||
"kind": "recentActivity",
|
||||
"window": {
|
||||
"count": 50,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"useSubmissionAsReference": false,
|
||||
"thresholds": [
|
||||
{
|
||||
"subreddits": [
|
||||
"FreeKarma4U",
|
||||
"FreeKarma4You",
|
||||
"KarmaStore",
|
||||
"upvote"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
// remove this after confirming behavior is acceptable
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "Remove=> {{rules.newtube.totalCount}} activities in freekarma subs"
|
||||
},
|
||||
//
|
||||
//
|
||||
{
|
||||
"kind": "remove",
|
||||
// remove the line below after confirming behavior is acceptable
|
||||
"dryRun": true,
|
||||
},
|
||||
// optionally remove "dryRun" from below if you want to leave a comment on removal
|
||||
// PROTIP: the comment is bland, you should make it better
|
||||
{
|
||||
"kind": "comment",
|
||||
"content": "Your submission has been removed because you have recent activity in 'freekarma' subs",
|
||||
"distinguish": true,
|
||||
"dryRun": true,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
104
docs/examples/subredditReady/selfPromo.json5
Normal file
104
docs/examples/subredditReady/selfPromo.json5
Normal file
@@ -0,0 +1,104 @@
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
//
|
||||
// Stop users who make link submissions with a self-promotional agenda (with reddit's suggested 10% rule)
|
||||
// https://www.reddit.com/wiki/selfpromotion#wiki_guidelines_for_self-promotion_on_reddit
|
||||
//
|
||||
// Remove a SUBMISSION if the link comprises more than or equal to 10% of users history (100 activities or 6 months) OR
|
||||
//
|
||||
// if link comprises 10% of submission history (100 activities or 6 months)
|
||||
// AND less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
//
|
||||
"name": "Self-promo all AND low engagement",
|
||||
"description": "Self-promo is >10% for all or just sub and low comment engagement",
|
||||
"kind": "submission",
|
||||
"condition": "OR",
|
||||
"rules": [
|
||||
{
|
||||
"name": "attr",
|
||||
"kind": "attribution",
|
||||
"criteria": [
|
||||
{
|
||||
"threshold": ">= 10%",
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"domains": [
|
||||
"AGG:SELF"
|
||||
]
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "attrsub",
|
||||
"kind": "attribution",
|
||||
"criteria": [
|
||||
{
|
||||
"threshold": ">= 10%",
|
||||
"thresholdOn": "submissions",
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"domains": [
|
||||
"AGG:SELF"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "lowOrOpComm",
|
||||
"kind": "history",
|
||||
"criteriaJoin": "OR",
|
||||
"criteria": [
|
||||
{
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"comment": "< 50%"
|
||||
},
|
||||
{
|
||||
"window": {
|
||||
"count": 100,
|
||||
"duration": "6 months"
|
||||
},
|
||||
"comment": "> 40% OP"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "{{rules.attr.largestPercent}}{{rules.attrsub.largestPercent}} of {{rules.attr.activityTotal}}{{rules.attrsub.activityTotal}} items ({{rules.attr.window}}{{rules.attrsub.window}}){{#rules.loworopcomm.thresholdSummary}} => {{rules.loworopcomm.thresholdSummary}}{{/rules.loworopcomm.thresholdSummary}}"
|
||||
},
|
||||
//
|
||||
//
|
||||
{
|
||||
"kind": "remove",
|
||||
// remove the line below after confirming behavior is acceptable
|
||||
"dryRun": true
|
||||
},
|
||||
// optionally remove "dryRun" from below if you want to leave a comment on removal
|
||||
// PROTIP: the comment is bland, you should make it better
|
||||
{
|
||||
"kind": "comment",
|
||||
"content": "Your submission has been removed it comprises 10% or more of your recent history ({{rules.attr.largestPercent}}{{rules.attrsub.largestPercent}}). This is against [reddit's self promotional guidelines.](https://www.reddit.com/wiki/selfpromotion#wiki_guidelines_for_self-promotion_on_reddit)",
|
||||
"distinguish": true,
|
||||
"dryRun": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
26
docs/examples/userNotes/README.md
Normal file
26
docs/examples/userNotes/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# [Toolbox](https://www.reddit.com/r/toolbox/wiki/docs) [User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes)
|
||||
|
||||
Context Mod supports reading and writing [User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes) for the [Toolbox](https://www.reddit.com/r/toolbox/wiki/docs) extension.
|
||||
|
||||
**You must have Toolbox setup for your subreddit and at least one User Note created before you can use User Notes related features on Context Bot.**
|
||||
|
||||
[Click here for the Toolbox Quickstart Guide](https://www.reddit.com/r/toolbox/wiki/docs/quick_start)
|
||||
|
||||
## Filter
|
||||
|
||||
User Notes are an additional criteria on [AuthorCriteria](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) that can be used alongside other Author properties for both [filtering rules and in the AuthorRule.](/docs/examples/author/)
|
||||
|
||||
Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FUserNoteCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for a complete reference of the **UserNoteCriteria** object that can be used in AuthorCriteria.
|
||||
|
||||
### Examples
|
||||
|
||||
* [Do not tag user with Good User note](/docs/examples/userNotes/usernoteFilter.json5)
|
||||
|
||||
## Action
|
||||
|
||||
A User Note can also be added to the Author of a Submission or Comment with the [UserNoteAction.](https://json-schema.app/view/%23%2Fdefinitions%2FUserNoteActionJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json)
|
||||
|
||||
|
||||
### Examples
|
||||
|
||||
* [Add note on user doing self promotion](/docs/examples/userNotes/usernoteSP.json5)
|
||||
45
docs/examples/userNotes/usernoteFilter.json5
Normal file
45
docs/examples/userNotes/usernoteFilter.json5
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Self Promo Activities",
|
||||
"description": "Tag SP only if user does not have good contributor user note",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "attr10all",
|
||||
"kind": "attribution",
|
||||
"author": {
|
||||
"exclude": [
|
||||
{
|
||||
// the key of the usernote type to look for https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
// rule will not run if current usernote on Author is of type 'gooduser'
|
||||
"type": "gooduser"
|
||||
}
|
||||
]
|
||||
},
|
||||
"criteria": [
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": "90 days"
|
||||
},
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": 100
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "usernote",
|
||||
// the key of usernote type
|
||||
// https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
"type": "spamwarn",
|
||||
// content is mustache templated as usual
|
||||
"content": "Self Promotion: {{rules.attr10all.titlesDelim}} {{rules.attr10sub.largestPercent}}%"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
36
docs/examples/userNotes/usernoteSP.json5
Normal file
36
docs/examples/userNotes/usernoteSP.json5
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Self Promo Activities",
|
||||
"description": "Check if any of Author's aggregated submission origins are >10% of entire history",
|
||||
// check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "attr10all",
|
||||
"kind": "attribution",
|
||||
"criteria": [
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": "90 days"
|
||||
},
|
||||
{
|
||||
"threshold": "> 10%",
|
||||
"window": 100
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "usernote",
|
||||
// the key of usernote type
|
||||
// https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
"type": "spamwarn",
|
||||
// content is mustache templated as usual
|
||||
"content": "Self Promotion: {{rules.attr10all.titlesDelim}} {{rules.attr10sub.largestPercent}}%"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
140
docs/gettingStartedMod.md
Normal file
140
docs/gettingStartedMod.md
Normal file
@@ -0,0 +1,140 @@
|
||||
This getting started guide is for **reddit moderators** -- that is, someone who wants **an existing ContextMod bot to run on their subreddit.** If you are trying to run a ContextMod
|
||||
instance (software) please refer to the [operator getting started](/docs/gettingStartedOperator.md) guide.
|
||||
|
||||
# Table of Contents
|
||||
|
||||
* [Prior Knowledge](#prior-knowledge)
|
||||
* [Choose A Bot](#choose-a-bot)
|
||||
* [Use The Operator's Bot](#use-the-operators-bot)
|
||||
* [Bring Your Own Bot (BYOB)](#bring-your-own-bot-byob)
|
||||
* [Creating Configuration](#configuring-the-bot)
|
||||
* [Monitor the Bot](#monitor-the-bot)
|
||||
|
||||
# Prior Knowledge
|
||||
|
||||
Before continuing with this guide you should first make sure you understand how a ContextMod works. Please review this documentation:
|
||||
|
||||
* [How It Works](/docs#how-it-works)
|
||||
* [Core Concepts](/docs#concepts)
|
||||
|
||||
# Choose A Bot
|
||||
|
||||
First determine what bot (reddit account) you want to run ContextMod with. (You may have already discussed this with your operator)
|
||||
|
||||
## Use the Operator's Bot
|
||||
|
||||
If the Operator has communicated that **you should add a bot they control as a moderator** to your subreddit this is the option you will use.
|
||||
|
||||
**Pros:**
|
||||
|
||||
* Do not have to create and keep track of another reddit account
|
||||
* Easiest option in terms of setup for both moderators and operator
|
||||
|
||||
**Cons:**
|
||||
|
||||
* Shared api quota among other moderated subreddits (not great for high-volume subreddits)
|
||||
|
||||
___
|
||||
|
||||
Ensure that you are in communication with the **operator** for this bot. The bot **will not automatically accept a moderator invitation,** it must be manually done by the bot operator. This is an intentional barrier to ensure moderators and the operator are familiar with their respective needs and have some form of trust.
|
||||
|
||||
Now invite the bot to moderate your subreddit. The bot should have at least these permissions:
|
||||
|
||||
* Manage Users
|
||||
* Manage Posts and Comments
|
||||
* Manage Flair
|
||||
|
||||
Additionally, the bot must have the **Manage Wiki Pages** permission if you plan to use [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes). If you are not planning on using this feature and do not want the bot to have this permission then you **must** ensure the bot has visibility to the configuration wiki page (detailed below).
|
||||
|
||||
## Bring Your Own Bot (BYOB)
|
||||
|
||||
If the operator has communicated that **they want to use a bot you control** this is the option you will use.
|
||||
|
||||
**Pros:**
|
||||
|
||||
* **Dedicated API quota**
|
||||
* This is basically a requirement if your subreddit has high-volume activity and you plan on running checks on comments
|
||||
* More security guarantees since you control the account
|
||||
* **Note:** authenticating an account does NOT give the operator access to view or change the email/password for the account
|
||||
* Established history in your subreddit
|
||||
|
||||
**Cons:**
|
||||
|
||||
* More setup required for both moderators and operators
|
||||
|
||||
___
|
||||
|
||||
The **operator** will send you an **invite link** that you will use to authenticate your bot with the operator's application. Example link: `https://operatorsUrl.com/auth/invite?invite=4kf9n3o03ncd4nd`
|
||||
|
||||
Review the information shown on the invite link webpage and then follow the directions shown to authorize your bot for the operator.
|
||||
|
||||
**Note:** There is information display **after** authentication that you will need to communicate to your operator -- **Refresh** and **Access** token values. Make sure you save these somewhere as the invite link is **one-use only.**
|
||||
|
||||
# Configuring the Bot
|
||||
|
||||
## Setup wiki page
|
||||
|
||||
* Visit the wiki page of the subreddit you want the bot to moderate
|
||||
* The default location the bot checks for a configuration is at `https://old.reddit.com/r/YOURSUBERDDIT/wiki/botconfig/contextbot`
|
||||
* If the page does not exist create it
|
||||
* Ensure the wiki page visibility is restricted
|
||||
* On the wiki page click **settings** (**Page settings** in new reddit)
|
||||
* Check the box for **Only mods may edit and view** and then **save**
|
||||
* Alternatively, if you did not give the bot the **Manage Wiki Pages** permission then add it to the **allow users to edit page** setting
|
||||
|
||||
## Procure a configuration
|
||||
|
||||
Now you need to make the actual configuration that will be used to configure the bot's behavior on your subreddit. This may have already been done for you by your operator or you may be copying a fellow moderator's configuration.
|
||||
|
||||
If you already have a configuration you may skip the below step and go directly to [saving your configuration](#saving-your-configuration)
|
||||
|
||||
### Using an Example Config
|
||||
|
||||
Visit the [Examples](https://github.com/FoxxMD/context-mod/tree/master/docs/examples) folder to find various examples of individual rules or see the [subreddit-ready examples.](/docs/examples/subredditReady)
|
||||
|
||||
After you have found a configuration to use as a starting point:
|
||||
|
||||
* In a new tab open the github page for the configuration you want ([example](/docs/examples/repeatActivity/crosspostSpamming.json5))
|
||||
* Click the **Raw** button, then select all and copy all of the text to your clipboard.
|
||||
|
||||
### Build Your Own Config
|
||||
|
||||
Additionally, you can use [this schema editor](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) to build your configuration. The editor features a ton of handy features:
|
||||
|
||||
* fully annotated configuration data/structure
|
||||
* generated examples in json/yaml
|
||||
* built-in editor that automatically validates your config
|
||||
|
||||
PROTIP: Find an example config to use as a starting point and then build on it using the editor.
|
||||
|
||||
## Saving Your Configuration
|
||||
|
||||
* Open the wiki page you created in the [previous step](#setup-wiki-page) and click **edit**
|
||||
* Copy-paste your configuration into the wiki text box
|
||||
* Save the edited wiki page
|
||||
|
||||
___
|
||||
|
||||
The bot automatically checks for new configurations on your wiki page every 5 minutes. If your operator has the web interface accessible you may login there and force the config to update on your subreddit.
|
||||
|
||||
# Monitor the Bot
|
||||
|
||||
Monitoring the behavior of the bot is dependent on how your operator setup their instance. ContextMod comes with a built-in web interface that is secure and accessible only to moderates of subreddits it is running on. However there is some additional setup for the operator to perform in order to make this interface accessible publicly. If you do not have access to this interface please communicate with your operator.
|
||||
|
||||
After logging in to the interface you will find your subreddit in a tab at the top of the web page. Selecting your subreddit will give you access to:
|
||||
|
||||
* Current status of the bot
|
||||
* Current status of your configuration
|
||||
* Statistics pertaining to the number of checks/rules/actions run and cache usage
|
||||
* **A real-time view for bot logs pertaining to your subreddit**
|
||||
|
||||
The logs are the meat and potatoes of the bot and the main source of feedback you have for fine-tuning the bot's behavior. The **verbose** log level will show you:
|
||||
|
||||
* The event being processed
|
||||
* The individual results of triggered rules, per check
|
||||
* The checks that were run and their rules
|
||||
* The actions performed, with markdown content preview, of triggered checks
|
||||
|
||||
This information should enable you to tweak the criteria for your rules in order to get the required behavior from the bot.
|
||||
|
||||
Additionally, you can test your bot on any comment/submission by entering its permalink in the text bot at the top of the logs and selecting **Dry Run** -- this will run the bot on an Activity without actually performing any actions allowing you to preview the results of a run.
|
||||
83
docs/gettingStartedOperator.md
Normal file
83
docs/gettingStartedOperator.md
Normal file
@@ -0,0 +1,83 @@
|
||||
This getting started guide is for **Operators** -- that is, someone who wants to run the actual software for a ContentMod bot. If you are a **Moderator** check out the [moderator getting started](/docs/gettingStartedMod.md) guide instead.
|
||||
|
||||
# Table of Contents
|
||||
|
||||
* [Installation](#installation)
|
||||
* [Docker](#docker-recommended)
|
||||
* [Locally](#locally)
|
||||
* [Heroku](#heroku-quick-deployhttpsherokucomabout)
|
||||
* [Bot Authentication](#bot-authentication)
|
||||
* [Instance Configuration](#instance-configuration)
|
||||
* [Run Your Bot and Start Moderating](#run-your-bot-and-start-moderating)
|
||||
|
||||
# Installation
|
||||
|
||||
In order to run a ContextMod instance you must first you must install it somewhere.
|
||||
|
||||
ContextMod can be run on almost any operating system but it is recommended to use Docker due to ease of deployment.
|
||||
|
||||
## Docker (Recommended)
|
||||
|
||||
PROTIP: Using a container management tool like [Portainer.io CE](https://www.portainer.io/products/community-edition) will help with setup/configuration tremendously.
|
||||
|
||||
### [Dockerhub](https://hub.docker.com/r/foxxmd/context-mod)
|
||||
|
||||
```
|
||||
foxxmd/context-mod:latest
|
||||
```
|
||||
|
||||
Adding **environmental variables** to your `docker run` command will pass them through to the app EX:
|
||||
```
|
||||
docker run -d -e "CLIENT_ID=myId" ... foxxmd/context-mod
|
||||
```
|
||||
|
||||
### Locally
|
||||
|
||||
Requirements:
|
||||
|
||||
* Typescript >=4.3.5
|
||||
* Node >=15
|
||||
|
||||
Clone this repository somewhere and then install from the working directory
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FoxxMD/context-mod.git .
|
||||
cd context-mod
|
||||
npm install
|
||||
tsc -p .
|
||||
```
|
||||
|
||||
### [Heroku Quick Deploy](https://heroku.com/about)
|
||||
[](https://dashboard.heroku.com/new?template=https://github.com/FoxxMD/context-mod)
|
||||
|
||||
This template provides a **web** and **worker** dyno for heroku.
|
||||
|
||||
* **Web** -- Will run the bot **and** the web interface for ContextMod.
|
||||
* **Worker** -- Will run **just** the bot.
|
||||
|
||||
Be aware that Heroku's [free dyno plan](https://devcenter.heroku.com/articles/free-dyno-hours#dyno-sleeping) enacts some limits:
|
||||
|
||||
* A **Web** dyno will go to sleep (pause) after 30 minutes without web activity -- so your bot will ALSO go to sleep at this time
|
||||
* The **Worker** dyno **will not** go to sleep but you will NOT be able to access the web interface. You can, however, still see how Cm is running by reading the logs for the dyno.
|
||||
|
||||
If you want to use a free dyno it is recommended you perform first-time setup (bot authentication and configuration, testing, etc...) with the **Web** dyno, then SWITCH to a **Worker** dyno so it can run 24/7.
|
||||
|
||||
# Bot Authentication
|
||||
|
||||
Next you need to create a bot and authenticate it with Reddit. Follow the [bot authentication guide](/docs/botAuthentication.md) to complete this step.
|
||||
|
||||
# Instance Configuration
|
||||
|
||||
Finally, you must provide the credentials you received from the **Bot Authentication** step to the ContextMod instance you installed earlier. Refer to the [Operator Configuration](/docs/operatorConfiguration.md) guide to learn how this can be done as there are multiple approaches depending on how you installed the software.
|
||||
|
||||
Additionally, at this step you can also tweak many more settings and behavior concerning how your CM bot will operate.
|
||||
|
||||
# Run Your Bot and Start Moderating
|
||||
|
||||
Congratulations! You should now have a fully authenticated bot running on ContextMod software.
|
||||
|
||||
In order for your Bot to operate on reddit though it **must be a moderator in the subreddit you want it to run in.** This may be your own subreddit or someone else's.
|
||||
|
||||
**Note: ContextMod does not currently handle moderation invites automatically** and may never have this functionality. Due to the fact that many of its behaviors are api-heavy and that subreddits can control their own configuration the api and resource (cpu/memory) usage of a ContextMod instance can be highly variable. It therefore does not make sense to allow any/all subreddits to automatically have access to an instance through automatically accepting moderator invites. So...if you are planning to run a ContextMod instance for subreddits other than those you moderate you should establish solid trust with moderators of that subreddit as well as a solid line of communication in order to ensure their configurations can be tailored to best fit their needs and your resources.
|
||||
|
||||
Once you have logged in as your bot and manually accepted the moderator invite you will need to restart your ContextMod instance in order for these changes to take effect.
|
||||
201
docs/imageComparison.md
Normal file
201
docs/imageComparison.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Overview
|
||||
|
||||
ContextMod supports comparing image content, for the purpose of detecting duplicates, with two different but complimentary systems. Image comparison behavior is available for the following rules:
|
||||
|
||||
* [Recent Activity](/docs/examples/recentActivity)
|
||||
* Repeat Activity (In-progress)
|
||||
|
||||
To enable comparisons reference the example below (at the top-level of your rule) and configure as needed:
|
||||
|
||||
```json5
|
||||
{
|
||||
"name": "ruleWithImageDetection",
|
||||
"kind": "recentActivity",
|
||||
// Add block below...
|
||||
//
|
||||
"imageDetection": {
|
||||
// enables image comparison
|
||||
"enable": true,
|
||||
// The difference, in percentage, between the reference submission and the submissions being checked
|
||||
// must be less than this number to consider the images "the same"
|
||||
"threshold": 5,
|
||||
// optional
|
||||
// set the behavior for determining if image comparison should occur on a URL:
|
||||
//
|
||||
// "extension" => try image detection if URL ends in a known image extension (jpeg, gif, png, bmp, etc.)
|
||||
// "unknown" => try image detection if URL ends in known image extension OR there is no extension OR the extension is unknown (not video, html, doc, etc...)
|
||||
// "all" => ALWAYS try image detection, regardless of URL extension
|
||||
//
|
||||
// if fetchBehavior is not defined then "extension" is the default
|
||||
"fetchBehavior": "extension",
|
||||
},
|
||||
//
|
||||
// And above ^^^
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
**Perceptual Hashing** (`hash`) and **Pixel Comparisons** (`pixel`) may be used at the same time. Refer to the documentation below to see how they interact.
|
||||
|
||||
**Note:** Regardless of `fetchBehavior`, if the response from the URL does not indicate it is an image then image detection will not occur. IE Response `Content-Type` must contain `image`
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Both image comparison systems require [Sharp](https://sharp.pixelplumbing.com/) as a dependency. Most modern operating systems running Node.js >= 12.13.0 do not require installing additional dependencies in order to use Sharp.
|
||||
|
||||
If you are using the docker image for ContextMod (`foxxmd/context-mod`) Sharp is built-in.
|
||||
|
||||
If you are installing ContextMod using npm then **Sharp should be installed automatically as an optional dependency.**
|
||||
|
||||
**If you do not want to install it automatically** install ContextMod with the following command:
|
||||
|
||||
```
|
||||
npm install --no-optional
|
||||
```
|
||||
|
||||
If you are using ContextMod as part of a larger project you may want to require Sharp in your own package:
|
||||
|
||||
```
|
||||
npm install sharp@0.29.1 --save
|
||||
```
|
||||
|
||||
# Comparison Systems
|
||||
|
||||
## Perceptual Hashing
|
||||
|
||||
[Perceptual Hashing](https://en.wikipedia.org/wiki/Perceptual_hashing) creates a text fingerprint of an image by:
|
||||
|
||||
* Dividing up the image into a grid
|
||||
* Using an algorithm to derive a value from the pixels in each grid
|
||||
* Adding up all the values to create a unique string (the "fingerprint")
|
||||
|
||||
An example of how a perceptual hash can work [can be found here.](https://www.hackerfactor.com/blog/?/archives/432-Looks-Like-It.html)
|
||||
|
||||
ContextMod uses [blockhash-js](https://github.com/commonsmachinery/blockhash-js) which is a javascript implementation of the algorithm described in the paper [Block Mean Value Based Image Perceptual Hashing by Bian Yang, Fan Gu and Xiamu Niu.](https://ieeexplore.ieee.org/document/4041692)
|
||||
|
||||
|
||||
**Advantages**
|
||||
|
||||
* Low memory requirements and not CPU intensive
|
||||
* Does not require any image transformations
|
||||
* Hash results can be stored to make future comparisons even faster and skip downloading images (cached by url)
|
||||
* Resolution-independent
|
||||
|
||||
**Disadvantages**
|
||||
|
||||
* Hash is weak when image differences are based only on color
|
||||
* Hash is weak when image contains lots of text
|
||||
* Higher accuracy requires larger calculation (more bits required)
|
||||
|
||||
**When should I use it?**
|
||||
|
||||
* General duplicate detection
|
||||
* Comparing many images
|
||||
* Comparing the same images often
|
||||
|
||||
### How To Use
|
||||
|
||||
If `imageDetection.enable` is `true` then hashing is enabled by default and no further configuration is required.
|
||||
|
||||
To further configure hashing refer to this code block:
|
||||
|
||||
```json5
|
||||
{
|
||||
"name": "ruleWithImageDetectionAndConfiguredHashing",
|
||||
"kind": "recentActivity",
|
||||
"imageDetection": {
|
||||
"enable": true,
|
||||
// Add block below...
|
||||
//
|
||||
"hash": {
|
||||
// enable or disable hash comparisons (enabled by default)
|
||||
"enable": true,
|
||||
// determines accuracy of hash and granularity of hash comparison (comparison to other hashes)
|
||||
// the higher the bits the more accurate the comparison
|
||||
//
|
||||
// NOTE: Hashes of different sizes (bits) cannot be compared. If you are caching hashes make sure all rules where results may be shared use the same bit count to ensure hashes can be compared. Otherwise hashes will be recomputed.
|
||||
"bits": 32, // default is 32 if not defined
|
||||
//
|
||||
// number of seconds to cache an image hash
|
||||
"ttl": 60, // default is 60 if not defined
|
||||
//
|
||||
// "High Confidence" Threshold
|
||||
// If the difference in comparison is equal to or less than this number the images are considered the same and pixel comparison WILL NOT occur
|
||||
//
|
||||
// Defaults to the parent-level `threshold` value if not present
|
||||
//
|
||||
// Use null if you want pixel comparison to ALWAYS occur (softThreshold must be present)
|
||||
"hardThreshold": 5,
|
||||
//
|
||||
// "Low Confidence" Threshold -- only used if `pixel` is enabled
|
||||
// If the difference in comparison is:
|
||||
//
|
||||
// 1) equal to or less than this value and
|
||||
// 2) the value is greater than `hardThreshold`
|
||||
//
|
||||
// the images will be compared using the `pixel` method
|
||||
"softThreshold": 0,
|
||||
},
|
||||
//
|
||||
// And above ^^^
|
||||
//"pixel": {...}
|
||||
},
|
||||
//...
|
||||
```
|
||||
|
||||
## Pixel Comparison
|
||||
|
||||
This approach is as straight forward as it sounds. Both images are compared, pixel by pixel, to determine the difference between the two. ContextMod uses [pixelmatch](https://github.com/mapbox/pixelmatch) to do the comparison.
|
||||
|
||||
**Advantages**
|
||||
|
||||
* Extremely accurate, high-confidence on difference percentage
|
||||
* Strong when comparing text-based images or color-only differences
|
||||
|
||||
**Disadvantages**
|
||||
|
||||
* High memory requirements (10-30MB per comparison) and CPU intensive
|
||||
* Weak against similar images with different aspect ratios
|
||||
* Requires image transformations (resize, crop) before comparison
|
||||
* Can only store image-to-image results (no single image fingerprints)
|
||||
|
||||
**When should I use it?**
|
||||
|
||||
* Require very high accuracy in comparison results
|
||||
* Comparing mostly text-based images or subtle color/detail differences
|
||||
* As a secondary, high-confidence confirmation of comparison result after hashing
|
||||
|
||||
### How To Use
|
||||
|
||||
By default pixel comparisons **are not enabled.** They must be explicitly enabled in configuration.
|
||||
|
||||
Pixel comparisons will be performed in either of these scenarios:
|
||||
|
||||
* pixel is enabled, hashing is enabled and `hash.softThreshold` is defined
|
||||
* When a comparison occurs that is less different than `softThreshold` but more different then `hardThreshold` (or `"hardThreshold": null`), then pixel comparison will occur as a high-confidence check
|
||||
* Example
|
||||
* hash comparison => 7% difference
|
||||
* `"softThreshold": 10`
|
||||
* `"hardThreshold": 4`
|
||||
* `hash.enable` is `false` and `pixel.enable` is true
|
||||
* hashing is skipped entirely and only pixel comparisons are performed
|
||||
|
||||
To configure pixel comparisons refer to this code block:
|
||||
|
||||
```json5
|
||||
{
|
||||
"name": "ruleWithImageDetectionAndPixelEnabled",
|
||||
"kind": "recentActivity",
|
||||
"imageDetection": {
|
||||
//"hash": {...}
|
||||
"pixel": {
|
||||
// enable or disable pixel comparisons (disabled by default)
|
||||
"enable": true,
|
||||
// if the comparison difference percentage is equal to or less than this value the images are considered the same
|
||||
//
|
||||
// if not defined the value from imageDetection.threshold will be used
|
||||
"threshold": 5
|
||||
}
|
||||
},
|
||||
//...
|
||||
```
|
||||
291
docs/operatorConfiguration.md
Normal file
291
docs/operatorConfiguration.md
Normal file
@@ -0,0 +1,291 @@
|
||||
The **Operator** configuration refers to configuration used configure to the actual application/bot. This is different
|
||||
from the **Subreddit** configuration that is defined in each Subreddit's wiki and determines the rules/actions for
|
||||
activities the Bot runs on.
|
||||
|
||||
# Table of Contents
|
||||
|
||||
* [Minimum Required Configuration](#minimum-required-configuration)
|
||||
* [Defining Configuration](#defining-configuration)
|
||||
* [CLI Usage](#cli-usage)
|
||||
* [Examples](#example-configurations)
|
||||
* [Minimum Config](#minimum-config)
|
||||
* [Using Config Overrides](#using-config-overrides)
|
||||
* [Cache Configuration](#cache-configuration)
|
||||
|
||||
# Minimum Required Configuration
|
||||
|
||||
| property | Server And Web | Server Only | Web/Bot-Auth Only |
|
||||
|:--------------:|:------------------:|:------------------:|:------------------:|
|
||||
| `clientId` | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| `clientSecret` | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| `redirectUri` | :heavy_check_mark: | :x: | :heavy_check_mark: |
|
||||
| `refreshToken` | :heavy_check_mark: | :heavy_check_mark: | :x: |
|
||||
| `accessToken` | :heavy_check_mark: | :heavy_check_mark: | :x: |
|
||||
|
||||
Refer to the **[Bot Authentication guide](/docs/botAuthentication.md)** to retrieve credentials.
|
||||
|
||||
# Defining Configuration
|
||||
|
||||
CM can be configured using **any or all** of the approaches below. Note that **at each level ALL configuration values are
|
||||
optional** but the "required configuration" mentioned above must be available when all levels are combined.
|
||||
|
||||
Any values defined at a **lower-listed** level of configuration will override any values from a higher-listed
|
||||
configuration.
|
||||
|
||||
* **ENV** -- Environment variables loaded from an [`.env`](https://github.com/toddbluhm/env-cmd) file (path may be
|
||||
specified with `--file` cli argument)
|
||||
* **ENV** -- Any already existing environment variables (exported on command line/terminal profile/etc.)
|
||||
* **FILE** -- Values specified in a JSON configuration file using the structure [in the schema](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FOperatorConfig.json)
|
||||
* **ARG** -- Values specified as CLI arguments to the program (see [ClI Usage](#cli-usage) below)
|
||||
|
||||
**Note:** When reading the **schema** if the variable is available at a level of configuration other than **FILE** it will be
|
||||
noted with the same symbol as above. The value shown is the default.
|
||||
|
||||
* To load a JSON configuration (for **FILE**) **from the command line** use the `-c` cli argument EX: `node src/index.js -c /path/to/JSON/config.json`
|
||||
* To load a JSON configuration (for **FILE**) **using an environmental variable** use `OPERATOR_CONFIG` EX: `OPERATOR_CONFIG=/path/to/JSON/config.json`
|
||||
|
||||
[**See the Operator Config Schema here**](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FOperatorConfig.json)
|
||||
|
||||
## Defining Multiple Bots or CM Instances
|
||||
|
||||
One ContextMod instance can
|
||||
|
||||
* Run multiple bots (multiple reddit accounts -- each as a bot)
|
||||
* Connect to many other, independent, ContextMod instances
|
||||
|
||||
However, the default configuration (using **ENV/ARG**) assumes your intention is to run one bot (one reddit account) on one CM instance without these additional features. This is to make this mode of operation easier for users with this intention.
|
||||
|
||||
To take advantage of this additional features you **must** use a **FILE** configuration. Learn about how this works and how to configure this scenario in the [Architecture Documentation.](/docs/serverClientArchitecture.md)
|
||||
|
||||
## CLI Usage
|
||||
|
||||
Running CM from the command line is accomplished with the following command:
|
||||
|
||||
```bash
|
||||
|
||||
node src/index.js run
|
||||
|
||||
```
|
||||
|
||||
Run `node src/index.js run help` to get a list of available command line options (denoted by **ARG** above):
|
||||
|
||||
<details>
|
||||
|
||||
```
|
||||
Usage: index [options] [command]
|
||||
|
||||
Options:
|
||||
-h, --help display help for command
|
||||
|
||||
Commands:
|
||||
run [options] [interface] Monitor new activities from configured subreddits.
|
||||
check [options] <activityIdentifier> [type] Run check(s) on a specific activity
|
||||
unmoderated [options] <subreddits...> Run checks on all unmoderated activity in the modqueue
|
||||
help [command] display help for command
|
||||
|
||||
|
||||
Options:
|
||||
-c, --operatorConfig <path> An absolute path to a JSON file to load all parameters from (default: process.env.OPERATOR_CONFIG)
|
||||
-i, --clientId <id> Client ID for your Reddit application (default: process.env.CLIENT_ID)
|
||||
-e, --clientSecret <secret> Client Secret for your Reddit application (default: process.env.CLIENT_SECRET)
|
||||
-a, --accessToken <token> Access token retrieved from authenticating an account with your Reddit Application (default: process.env.ACCESS_TOKEN)
|
||||
-r, --refreshToken <token> Refresh token retrieved from authenticating an account with your Reddit Application (default: process.env.REFRESH_TOKEN)
|
||||
-u, --redirectUri <uri> Redirect URI for your Reddit application (default: process.env.REDIRECT_URI)
|
||||
-t, --sessionSecret <secret> Secret use to encrypt session id/data (default: process.env.SESSION_SECRET || a random string)
|
||||
-s, --subreddits <list...> List of subreddits to run on. Bot will run on all subs it has access to if not defined (default: process.env.SUBREDDITS)
|
||||
-d, --logDir [dir] Absolute path to directory to store rotated logs in. Leaving undefined disables rotating logs (default: process.env.LOG_DIR)
|
||||
-l, --logLevel <level> Minimum level to log at (default: process.env.LOG_LEVEL || verbose)
|
||||
-w, --wikiConfig <path> Relative url to contextbot wiki page EX https://reddit.com/r/subreddit/wiki/<path> (default: process.env.WIKI_CONFIG || 'botconfig/contextbot')
|
||||
--snooDebug Set Snoowrap to debug. If undefined will be on if logLevel='debug' (default: process.env.SNOO_DEBUG)
|
||||
--authorTTL <s> Set the TTL (seconds) for the Author Activities shared cache (default: process.env.AUTHOR_TTL || 60)
|
||||
--heartbeat <s> Interval, in seconds, between heartbeat checks. (default: process.env.HEARTBEAT || 300)
|
||||
--softLimit <limit> When API limit remaining (600/10min) is lower than this subreddits will have SLOW MODE enabled (default: process.env.SOFT_LIMIT || 250)
|
||||
--hardLimit <limit> When API limit remaining (600/10min) is lower than this all subreddit polling will be paused until api limit reset (default: process.env.SOFT_LIMIT || 250)
|
||||
--dryRun Set all subreddits in dry run mode, overriding configurations (default: process.env.DRYRUN || false)
|
||||
--proxy <proxyEndpoint> Proxy Snoowrap requests through this endpoint (default: process.env.PROXY)
|
||||
--operator <name...> Username(s) of the reddit user(s) operating this application, used for displaying OP level info/actions in UI (default: process.env.OPERATOR)
|
||||
--operatorDisplay <name> An optional name to display who is operating this application in the UI (default: process.env.OPERATOR_DISPLAY || Anonymous)
|
||||
-p, --port <port> Port for web server to listen on (default: process.env.PORT || 8085)
|
||||
-q, --shareMod If enabled then all subreddits using the default settings to poll "unmoderated" or "modqueue" will retrieve results from a shared request to /r/mod (default: process.env.SHARE_MOD || false)
|
||||
-h, --help display help for command
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
# Example Configurations
|
||||
|
||||
## Minimum Config
|
||||
|
||||
Below are examples of the minimum required config to run the application using all three config approaches independently.
|
||||
|
||||
Using **FILE**
|
||||
<details>
|
||||
|
||||
```json5
|
||||
{
|
||||
"bots": [
|
||||
{
|
||||
"credentials": {
|
||||
"clientId": "f4b4df1c7b2",
|
||||
"clientSecret": "34v5q1c56ub",
|
||||
"refreshToken": "34_f1w1v4",
|
||||
"accessToken": "p75_1c467b2"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Using **ENV** (`.env`)
|
||||
|
||||
<details>
|
||||
|
||||
```
|
||||
CLIENT_ID=f4b4df1c7b2
|
||||
CLIENT_SECRET=34v5q1c56ub
|
||||
REFRESH_TOKEN=34_f1w1v4
|
||||
ACCESS_TOKEN=p75_1c467b2
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Using **ARG**
|
||||
|
||||
<details>
|
||||
|
||||
```
|
||||
node src/index.js run --clientId=f4b4df1c7b2 --clientSecret=34v5q1c56ub --refreshToken=34_f1w1v4 --accessToken=p75_1c467b2
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Using Config Overrides
|
||||
|
||||
An example of using multiple configuration levels together IE all are provided to the application:
|
||||
|
||||
**FILE**
|
||||
<details>
|
||||
|
||||
```json
|
||||
{
|
||||
"logging": {
|
||||
"level": "debug"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
**ENV** (`.env`)
|
||||
|
||||
<details>
|
||||
|
||||
```
|
||||
CLIENT_SECRET=34v5q1c56ub
|
||||
REFRESH_TOKEN=34_f1w1v4
|
||||
ACCESS_TOKEN=p75_1c467b2
|
||||
SUBREDDITS=sub1,sub2,sub3
|
||||
PORT=9008
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
**ARG**
|
||||
|
||||
<details>
|
||||
|
||||
```
|
||||
node src/index.js run --subreddits=sub1 --clientId=34v5q1c56ub
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
When all three are used together they produce these variables at runtime for the application:
|
||||
|
||||
```
|
||||
clientId: f4b4df1c7b2
|
||||
clientSecret: 34v5q1c56ub
|
||||
refreshToken: 34_f1w1v4
|
||||
accessToken: accessToken
|
||||
subreddits: sub1
|
||||
port: 9008
|
||||
log level: debug
|
||||
```
|
||||
|
||||
## Configuring Client for Many Instances
|
||||
|
||||
See the [Architecture Docs](/docs/serverClientArchitecture.md) for more information.
|
||||
|
||||
<details>
|
||||
|
||||
```json5
|
||||
{
|
||||
"bots": [
|
||||
{
|
||||
"credentials": {
|
||||
"clientId": "f4b4df1c7b2",
|
||||
"clientSecret": "34v5q1c56ub",
|
||||
"refreshToken": "34_f1w1v4",
|
||||
"accessToken": "p75_1c467b2"
|
||||
}
|
||||
}
|
||||
],
|
||||
"web": {
|
||||
"credentials": {
|
||||
"clientId": "f4b4df1c7b2",
|
||||
"clientSecret": "34v5q1c56ub",
|
||||
"redirectUri": "http://localhost:8085/callback"
|
||||
},
|
||||
"clients": [
|
||||
// server application running on this same CM instance
|
||||
{
|
||||
"host": "localhost:8095",
|
||||
"secret": "localSecret"
|
||||
},
|
||||
// a server application running somewhere else
|
||||
{
|
||||
// api endpoint and port
|
||||
"host": "mySecondContextMod.com:8095",
|
||||
"secret": "anotherSecret"
|
||||
}
|
||||
]
|
||||
},
|
||||
"api": {
|
||||
"secret": "localSecret",
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
# Cache Configuration
|
||||
|
||||
CM implements two caching backend **providers**. By default all providers use `memory`:
|
||||
|
||||
* `memory` -- in-memory (non-persistent) backend
|
||||
* `redis` -- [Redis](https://redis.io/) backend
|
||||
|
||||
Each `provider` object in configuration can be specified as:
|
||||
|
||||
* one of the above **strings** to use the **defaults settings** or
|
||||
* an **object** with keys to override default settings
|
||||
|
||||
A caching object in the json configuration:
|
||||
|
||||
```json5
|
||||
{
|
||||
"provider": {
|
||||
"store": "memory", // one of "memory" or "redis"
|
||||
"ttl": 60, // the default max age of a key in seconds
|
||||
"max": 500, // the maximum number of keys in the cache (for "memory" only)
|
||||
|
||||
// the below properties only apply to 'redis' provider
|
||||
"host": 'localhost',
|
||||
"port": 6379,
|
||||
"auth_pass": null,
|
||||
"db": 0,
|
||||
}
|
||||
}
|
||||
```
|
||||
BIN
docs/screenshots/editor.jpg
Normal file
BIN
docs/screenshots/editor.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 125 KiB |
BIN
docs/screenshots/oauth-invite.jpg
Normal file
BIN
docs/screenshots/oauth-invite.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 148 KiB |
BIN
docs/screenshots/oauth.jpg
Normal file
BIN
docs/screenshots/oauth.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 226 KiB |
BIN
docs/screenshots/subredditStatus.jpg
Normal file
BIN
docs/screenshots/subredditStatus.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 479 KiB |
71
docs/serverClientArchitecture.md
Normal file
71
docs/serverClientArchitecture.md
Normal file
@@ -0,0 +1,71 @@
|
||||
|
||||
# Overview
|
||||
|
||||
ContextMod's high-level functionality is separated into two **independently run** applications.
|
||||
|
||||
Each application consists of an [Express](https://expressjs.com/) web server that executes the core logic for that application and communicates via HTTP API calls:
|
||||
|
||||
Applications:
|
||||
|
||||
* **Server** -- Responsible for **running the bots** and providing an API to retrieve information on and interact with them EX start/stop bot, reload config, retrieve operational status, etc.
|
||||
* **Client** -- Responsible for serving the **web interface** and handling the bot oauth authentication flow between operators and moderators.
|
||||
|
||||
Both applications operate independently and can be run individually. The determination for which is run is made by environmental variables, operator config, or cli arguments.
|
||||
|
||||
# Authentication
|
||||
|
||||
Communication between the applications is secured using [Json Web Tokens](https://github.com/mikenicholson/passport-jwt) signed/encoded by a **shared secret** (HMAC algorithm). The secret is defined in the operator configuration.
|
||||
|
||||
# Configuration
|
||||
|
||||
## Default Mode
|
||||
|
||||
**ContextMod is designed to operate in a "monolith" mode by default.**
|
||||
|
||||
This is done by assuming that when configuration is provided by **environmental variables or CLI arguments** the user's intention is to run the client/server together with only one bot, as if ContextMod is a monolith application. When using these configuration types the same values are parsed to both the server/client to ensure interoperability/transparent usage for the operator. Some examples of this in the **operator configuration**:
|
||||
|
||||
* The **shared secret** for both client/secret cannot be defined using env/cli -- at runtime a random string is generated that is set for the value `secret` on both the `api` and `web` properties.
|
||||
* The `bots` array cannot be defined using env/cli -- a single entry is generated by the configuration parser using the combined values provided from env/cli
|
||||
* The `PORT` env/cli argument only applies to the `client` wev server to guarantee the default port for the `server` web server is used (so the `client` can connect to `server`)
|
||||
|
||||
**The end result of this default behavior is that an operator who does not care about running multiple CM instances does not need to know or understand anything about the client/server architecture.**
|
||||
|
||||
## Server
|
||||
|
||||
To run a ContextMod instance as **sever only (headless):**
|
||||
|
||||
* Config file -- define top-level `"mode":"server"`
|
||||
* ENV -- `MODE=server`
|
||||
* CLI - `node src/index.js run server`
|
||||
|
||||
The relevant sections of the **operator configuration** for the **Server** are:
|
||||
|
||||
* [`operator.name`](https://json-schema.app/view/%23/%23%2Fproperties%2Foperator?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FOperatorConfig.json) -- Define the reddit users who will be able to have full access to this server regardless of moderator status
|
||||
* `api`
|
||||
|
||||
### [`api`](https://json-schema.app/view/%23/%23%2Fproperties%2Fapi?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FOperatorConfig.json)
|
||||
|
||||
* `port` - The port the Server will listen on for incoming api requests. Cannot be the same as the Client (when running on the same host)
|
||||
* `secret` - The **shared secret** that will be used to verify incoming api requests coming from an authenticated Client.
|
||||
* `friendly` - An optional string to identify this **Server** on the client. It is recommended to provide this otherwise it will default to `host:port`
|
||||
|
||||
## Client
|
||||
|
||||
To run a ContextMod instance as **client only:**
|
||||
|
||||
* Config file -- define top-level `"mode":"client"`
|
||||
* ENV -- `MODE=client`
|
||||
* CLI - `node src/index.js run client`
|
||||
|
||||
### [`web`](https://json-schema.app/view/%23/%23%2Fproperties%2Fweb?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FOperatorConfig.json)
|
||||
|
||||
In the **operator configuration** the top-level `web` property defines the configuration for the **Client** application.
|
||||
|
||||
* `web.credentials` -- Defines the reddit oauth credentials used to authenticate users for the web interface
|
||||
* Must contain a `redirectUri` property to work
|
||||
* Credentials are parsed from ENV/CLI credentials when not specified (IE will be same as default bot)
|
||||
* `web.operators` -- Parsed from `operator.name` if not specified IE will use same users as defined for the bot operators
|
||||
* `port` -- the port the web interface will be served from, defaults to `8085`
|
||||
* `clients` -- An array of `BotConnection` objects that specify what **Server** instances the web interface should connect to. Each object should have:
|
||||
* `host` -- The URL specifying where the server api is listening ie `localhost:8085`
|
||||
* `secret` -- The **shared secret** used to sign api calls. **This should be the same as `api.secret` on the server being connected to.**
|
||||
29
heroku.Dockerfile
Normal file
29
heroku.Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM node:16-alpine3.14
|
||||
|
||||
ENV TZ=Etc/GMT
|
||||
|
||||
# vips required to run sharp library for image comparison
|
||||
RUN echo "http://dl-4.alpinelinux.org/alpine/v3.14/community" >> /etc/apk/repositories \
|
||||
&& apk --update add vips
|
||||
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
WORKDIR /usr/app
|
||||
|
||||
COPY package*.json ./
|
||||
COPY tsconfig.json .
|
||||
|
||||
RUN npm install
|
||||
|
||||
ADD . /usr/app
|
||||
|
||||
RUN npm run build
|
||||
|
||||
ENV NPM_CONFIG_LOGLEVEL debug
|
||||
|
||||
ARG log_dir=/home/node/logs
|
||||
RUN mkdir -p $log_dir
|
||||
VOLUME $log_dir
|
||||
ENV LOG_DIR=$log_dir
|
||||
|
||||
CMD [ "node", "src/index.js", "run", "all", "--port $PORT"]
|
||||
4
heroku.yml
Normal file
4
heroku.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
build:
|
||||
docker:
|
||||
web: heroku.Dockerfile
|
||||
worker: heroku.Dockerfile
|
||||
5603
package-lock.json
generated
5603
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
99
package.json
99
package.json
@@ -1,19 +1,22 @@
|
||||
{
|
||||
"name": "redditcontextbot",
|
||||
"version": "1.0.0",
|
||||
"version": "0.5.1",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no tests installed\" && exit 1",
|
||||
"build": "tsc",
|
||||
"start": "node server.js",
|
||||
"start": "node src/index.js run",
|
||||
"guard": "ts-auto-guard src/JsonConfig.ts",
|
||||
"schema": "npm run -s schema-app & npm run -s schema-ruleset & npm run -s schema-rule & npm run -s schema-action",
|
||||
"schema-app": "typescript-json-schema tsconfig.json JSONConfig --out src/Schema/App.json --required --tsNodeRegister --refs --propOrder",
|
||||
"schema-ruleset": "typescript-json-schema tsconfig.json RuleSetJSONConfig --out src/Schema/RuleSet.json --required --tsNodeRegister --refs --propOrder",
|
||||
"schema-rule": "typescript-json-schema tsconfig.json RuleJSONConfig --out src/Schema/Rule.json --required --tsNodeRegister --refs --propOrder",
|
||||
"schema-action": "typescript-json-schema tsconfig.json ActionJSONConfig --out src/Schema/Action.json --required --tsNodeRegister --refs --propOrder",
|
||||
"schemaNotWorking": "./node_modules/.bin/ts-json-schema-generator -f tsconfig.json -p src/JsonConfig.ts -t JSONConfig --out src/Schema/vegaSchema.json"
|
||||
"schema": "npm run -s schema-app & npm run -s schema-ruleset & npm run -s schema-rule & npm run -s schema-action & npm run -s schema-config",
|
||||
"schema-app": "typescript-json-schema tsconfig.json JSONConfig --out src/Schema/App.json --required --tsNodeRegister --refs",
|
||||
"schema-ruleset": "typescript-json-schema tsconfig.json RuleSetJson --out src/Schema/RuleSet.json --required --tsNodeRegister --refs",
|
||||
"schema-rule": "typescript-json-schema tsconfig.json RuleJson --out src/Schema/Rule.json --required --tsNodeRegister --refs",
|
||||
"schema-action": "typescript-json-schema tsconfig.json ActionJson --out src/Schema/Action.json --required --tsNodeRegister --refs",
|
||||
"schema-config": "typescript-json-schema tsconfig.json OperatorJsonConfig --out src/Schema/OperatorConfig.json --required --tsNodeRegister --refs",
|
||||
"schemaNotWorking": "./node_modules/.bin/ts-json-schema-generator -f tsconfig.json -p src/JsonConfig.ts -t JSONConfig --out src/Schema/vegaSchema.json",
|
||||
"circular": "madge --circular --extensions ts src/index.ts",
|
||||
"circular-graph": "madge --image graph.svg --circular --extensions ts src/index.ts"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=15"
|
||||
@@ -22,26 +25,96 @@
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"ajv": "^6.12.6",
|
||||
"@awaitjs/express": "^0.8.0",
|
||||
"@stdlib/regexp-regexp": "^0.0.6",
|
||||
"ajv": "^7.2.4",
|
||||
"async": "^3.2.0",
|
||||
"autolinker": "^3.14.3",
|
||||
"body-parser": "^1.19.0",
|
||||
"cache-manager": "^3.4.4",
|
||||
"cache-manager-redis-store": "^2.0.0",
|
||||
"commander": "^8.0.0",
|
||||
"cookie-parser": "^1.3.5",
|
||||
"dayjs": "^1.10.5",
|
||||
"deepmerge": "^4.2.2",
|
||||
"delimiter-stream": "^3.0.1",
|
||||
"ejs": "^3.1.6",
|
||||
"env-cmd": "^10.1.0",
|
||||
"es6-error": "^4.1.1",
|
||||
"express": "^4.17.1",
|
||||
"express-session": "^1.17.2",
|
||||
"express-session-cache-manager": "^1.0.2",
|
||||
"express-socket.io-session": "^1.3.5",
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
"minimist": "^1.2.5",
|
||||
"fuse.js": "^6.4.6",
|
||||
"got": "^11.8.2",
|
||||
"he": "^1.2.0",
|
||||
"http-proxy": "^1.18.1",
|
||||
"image-size": "^1.0.0",
|
||||
"js-yaml": "^4.1.0",
|
||||
"json5": "^2.2.0",
|
||||
"jsonwebtoken": "^8.5.1",
|
||||
"leven": "^3.1.0",
|
||||
"lodash": "^4.17.21",
|
||||
"lru-cache": "^6.0.0",
|
||||
"monaco-editor": "^0.27.0",
|
||||
"mustache": "^4.2.0",
|
||||
"node-fetch": "^2.6.1",
|
||||
"normalize-url": "^6.1.0",
|
||||
"object-hash": "^2.2.0",
|
||||
"p-event": "^4.2.0",
|
||||
"p-map": "^4.0.0",
|
||||
"passport": "^0.4.1",
|
||||
"passport-custom": "^1.1.1",
|
||||
"passport-jwt": "^4.0.0",
|
||||
"pixelmatch": "^5.2.1",
|
||||
"pretty-print-json": "^1.0.3",
|
||||
"safe-stable-stringify": "^1.1.1",
|
||||
"set-random-interval": "^1.1.0",
|
||||
"snoostorm": "^1.5.2",
|
||||
"snoowrap": "^1.23.0",
|
||||
"winston": "FoxxMD/winston#9639da027cd4f3b46b055b0193f240639ef53409",
|
||||
"winston-daily-rotate-file": "^4.5.5"
|
||||
"socket.io": "^4.1.3",
|
||||
"tcp-port-used": "^1.0.2",
|
||||
"triple-beam": "^1.3.0",
|
||||
"typescript": "^4.3.4",
|
||||
"webhook-discord": "^3.7.7",
|
||||
"winston": "FoxxMD/winston#fbab8de969ecee578981c77846156c7f43b5f01e",
|
||||
"winston-daily-rotate-file": "^4.5.5",
|
||||
"winston-duplex": "^0.1.1",
|
||||
"winston-transport": "^4.4.0",
|
||||
"zlib": "^1.0.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tsconfig/node14": "^1.0.0",
|
||||
"@types/minimist": "^1.2.1",
|
||||
"@types/async": "^3.2.7",
|
||||
"@types/cache-manager": "^3.4.2",
|
||||
"@types/cache-manager-redis-store": "^2.0.0",
|
||||
"@types/cookie-parser": "^1.4.2",
|
||||
"@types/express": "^4.17.13",
|
||||
"@types/express-session": "^1.17.4",
|
||||
"@types/express-socket.io-session": "^1.3.6",
|
||||
"@types/he": "^1.1.1",
|
||||
"@types/http-proxy": "^1.17.7",
|
||||
"@types/js-yaml": "^4.0.1",
|
||||
"@types/jsonwebtoken": "^8.5.4",
|
||||
"@types/lodash": "^4.14.171",
|
||||
"@types/lru-cache": "^5.1.1",
|
||||
"@types/memory-cache": "^0.2.1",
|
||||
"@types/mustache": "^4.1.1",
|
||||
"@types/node": "^15.6.1",
|
||||
"@types/node-fetch": "^2.5.10",
|
||||
"@types/object-hash": "^2.1.0",
|
||||
"@types/passport": "^1.0.7",
|
||||
"@types/passport-jwt": "^3.0.6",
|
||||
"@types/pixelmatch": "^5.2.4",
|
||||
"@types/sharp": "^0.29.2",
|
||||
"@types/tcp-port-used": "^1.0.0",
|
||||
"@types/triple-beam": "^1.3.2",
|
||||
"ts-auto-guard": "*",
|
||||
"ts-json-schema-generator": "^0.93.0",
|
||||
"typescript-json-schema": "^0.50.1"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"sharp": "^0.29.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +1,38 @@
|
||||
import {CommentAction, CommentActionJSONConfig} from "./CommentAction";
|
||||
import {CommentAction, CommentActionJson} from "./CommentAction";
|
||||
import LockAction from "./LockAction";
|
||||
import {RemoveAction} from "./RemoveAction";
|
||||
import {ReportAction, ReportActionJSONConfig} from "./ReportAction";
|
||||
import {FlairAction, FlairActionJSONConfig} from "./SubmissionAction/FlairAction";
|
||||
import Action, {ActionJSONConfig} from "./index";
|
||||
import {ReportAction, ReportActionJson} from "./ReportAction";
|
||||
import {FlairAction, FlairActionJson} from "./SubmissionAction/FlairAction";
|
||||
import Action, {ActionJson} from "./index";
|
||||
import {Logger} from "winston";
|
||||
import {UserNoteAction, UserNoteActionJson} from "./UserNoteAction";
|
||||
import ApproveAction, {ApproveActionConfig} from "./ApproveAction";
|
||||
import BanAction, {BanActionJson} from "./BanAction";
|
||||
import {MessageAction, MessageActionJson} from "./MessageAction";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import Snoowrap from "snoowrap";
|
||||
|
||||
export function actionFactory
|
||||
(config: ActionJSONConfig): Action {
|
||||
(config: ActionJson, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Action {
|
||||
switch (config.kind) {
|
||||
case 'comment':
|
||||
return new CommentAction(config as CommentActionJSONConfig);
|
||||
return new CommentAction({...config as CommentActionJson, logger, subredditName, resources, client});
|
||||
case 'lock':
|
||||
return new LockAction();
|
||||
return new LockAction({...config, logger, subredditName, resources, client});
|
||||
case 'remove':
|
||||
return new RemoveAction();
|
||||
return new RemoveAction({...config, logger, subredditName, resources, client});
|
||||
case 'report':
|
||||
return new ReportAction(config as ReportActionJSONConfig);
|
||||
return new ReportAction({...config as ReportActionJson, logger, subredditName, resources, client});
|
||||
case 'flair':
|
||||
return new FlairAction(config as FlairActionJSONConfig);
|
||||
return new FlairAction({...config as FlairActionJson, logger, subredditName, resources, client});
|
||||
case 'approve':
|
||||
return new ApproveAction({...config as ApproveActionConfig, logger, subredditName, resources, client});
|
||||
case 'usernote':
|
||||
return new UserNoteAction({...config as UserNoteActionJson, logger, subredditName, resources, client});
|
||||
case 'ban':
|
||||
return new BanAction({...config as BanActionJson, logger, subredditName, resources, client});
|
||||
case 'message':
|
||||
return new MessageAction({...config as MessageActionJson, logger, subredditName, resources, client});
|
||||
default:
|
||||
throw new Error('rule "kind" was not recognized.');
|
||||
}
|
||||
|
||||
46
src/Action/ApproveAction.ts
Normal file
46
src/Action/ApproveAction.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import {ActionJson, ActionConfig} from "./index";
|
||||
import Action from "./index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {ActionProcessResult} from "../Common/interfaces";
|
||||
|
||||
export class ApproveAction extends Action {
|
||||
getKind() {
|
||||
return 'Approve';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
if (item.approved) {
|
||||
this.logger.warn('Item is already approved');
|
||||
return {
|
||||
dryRun,
|
||||
success: false,
|
||||
result: 'Item is already approved'
|
||||
}
|
||||
}
|
||||
if (!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.approve();
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export interface ApproveActionConfig extends ActionConfig {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Ban the Author of the Activity this Check is run on
|
||||
* */
|
||||
export interface ApproveActionJson extends ApproveActionConfig, ActionJson {
|
||||
kind: 'approve'
|
||||
}
|
||||
|
||||
export default ApproveAction;
|
||||
115
src/Action/BanAction.ts
Normal file
115
src/Action/BanAction.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import {ActionJson, ActionConfig, ActionOptions} from "./index";
|
||||
import Action from "./index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {renderContent} from "../Utils/SnoowrapUtils";
|
||||
import {ActionProcessResult, Footer} from "../Common/interfaces";
|
||||
|
||||
export class BanAction extends Action {
|
||||
|
||||
message?: string;
|
||||
reason?: string;
|
||||
duration?: number;
|
||||
note?: string;
|
||||
footer?: false | string;
|
||||
|
||||
constructor(options: BanActionOptions) {
|
||||
super(options);
|
||||
const {
|
||||
message,
|
||||
reason,
|
||||
duration,
|
||||
note,
|
||||
footer,
|
||||
} = options;
|
||||
this.footer = footer;
|
||||
this.message = message;
|
||||
this.reason = reason;
|
||||
this.duration = duration;
|
||||
this.note = note;
|
||||
}
|
||||
|
||||
getKind() {
|
||||
return 'Ban';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const content = this.message === undefined ? undefined : await this.resources.getContent(this.message, item.subreddit);
|
||||
const renderedBody = content === undefined ? undefined : await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
const renderedContent = renderedBody === undefined ? undefined : `${renderedBody}${await this.resources.generateFooter(item, this.footer)}`;
|
||||
|
||||
let banPieces = [];
|
||||
banPieces.push(`Message: ${renderedContent === undefined ? 'None' : `${renderedContent.length > 100 ? `\r\n${renderedContent}` : renderedContent}`}`);
|
||||
banPieces.push(`Reason: ${this.reason || 'None'}`);
|
||||
banPieces.push(`Note: ${this.note || 'None'}`);
|
||||
const durText = this.duration === undefined ? 'permanently' : `for ${this.duration} days`;
|
||||
this.logger.info(`Banning ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`);
|
||||
this.logger.verbose(`\r\n${banPieces.join('\r\n')}`);
|
||||
if (!dryRun) {
|
||||
// @ts-ignore
|
||||
const fetchedSub = await item.subreddit.fetch();
|
||||
const fetchedName = await item.author.name;
|
||||
await fetchedSub.banUser({
|
||||
name: fetchedName,
|
||||
banMessage: renderedContent === undefined ? undefined : renderedContent,
|
||||
banReason: this.reason,
|
||||
banNote: this.note,
|
||||
duration: this.duration
|
||||
});
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: `Banned ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export interface BanActionConfig extends ActionConfig, Footer {
|
||||
/**
|
||||
* The message that is sent in the ban notification. `message` is interpreted as reddit-flavored Markdown.
|
||||
*
|
||||
* If value starts with `wiki:` then the proceeding value will be used to get a wiki page
|
||||
*
|
||||
* EX `wiki:botconfig/mybot` tries to get `https://reddit.com/mySubredditExample/wiki/botconfig/mybot`
|
||||
*
|
||||
* EX `this is plain text` => "this is plain text"
|
||||
*
|
||||
* EX `this is **bold** markdown text` => "this is **bold** markdown text"
|
||||
*
|
||||
* @examples ["This is the content of a comment/report/usernote", "this is **bold** markdown text", "wiki:botconfig/acomment" ]
|
||||
* */
|
||||
message?: string
|
||||
/**
|
||||
* Reason for ban.
|
||||
* @maxLength 100
|
||||
* @examples ["repeat spam"]
|
||||
* */
|
||||
reason?: string
|
||||
/**
|
||||
* Number of days to ban the Author. If not specified Author will be banned permanently.
|
||||
* @minimum 1
|
||||
* @maximum 999
|
||||
* @examples [90]
|
||||
* */
|
||||
duration?: number
|
||||
/**
|
||||
* A mod note for this ban
|
||||
* @maxLength 100
|
||||
* @examples ["Sock puppet for u/AnotherUser"]
|
||||
* */
|
||||
note?: string
|
||||
}
|
||||
|
||||
export interface BanActionOptions extends BanActionConfig, ActionOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* Ban the Author of the Activity this Check is run on
|
||||
* */
|
||||
export interface BanActionJson extends BanActionConfig, ActionJson {
|
||||
kind: 'ban',
|
||||
}
|
||||
|
||||
export default BanAction;
|
||||
@@ -1,22 +1,17 @@
|
||||
import Action, {ActionJSONConfig, ActionConfig, ActionOptions} from "./index";
|
||||
import Snoowrap, {Comment} from "snoowrap";
|
||||
import Action, {ActionJson, ActionOptions} from "./index";
|
||||
import {Comment} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {renderContent} from "../Utils/SnoowrapUtils";
|
||||
import {RichContent} from "../Common/interfaces";
|
||||
import {ActionProcessResult, Footer, RequiredRichContent, RichContent} from "../Common/interfaces";
|
||||
import {RuleResult} from "../Rule";
|
||||
|
||||
export const WIKI_DESCRIM = 'wiki:';
|
||||
import {truncateStringToLength} from "../util";
|
||||
|
||||
export class CommentAction extends Action {
|
||||
content: string;
|
||||
hasWiki: boolean;
|
||||
wiki?: string;
|
||||
wikiFetched?: Dayjs;
|
||||
lock: boolean = false;
|
||||
sticky: boolean = false;
|
||||
distinguish: boolean = false;
|
||||
name?: string = 'Comment';
|
||||
footer?: false | string;
|
||||
|
||||
constructor(options: CommentActionOptions) {
|
||||
super(options);
|
||||
@@ -25,42 +20,70 @@ export class CommentAction extends Action {
|
||||
lock = false,
|
||||
sticky = false,
|
||||
distinguish = false,
|
||||
footer,
|
||||
} = options;
|
||||
this.hasWiki = content.trim().substring(0, WIKI_DESCRIM.length) === WIKI_DESCRIM;
|
||||
this.footer = footer;
|
||||
this.content = content;
|
||||
if (this.hasWiki) {
|
||||
this.wiki = this.content.trim().substring(WIKI_DESCRIM.length);
|
||||
}
|
||||
this.lock = lock;
|
||||
this.sticky = sticky;
|
||||
this.distinguish = distinguish;
|
||||
}
|
||||
|
||||
async handle(item: Comment | Submission, ruleResults: RuleResult[]): Promise<void> {
|
||||
if (this.hasWiki && (this.wikiFetched === undefined || Math.abs(dayjs().diff(this.wikiFetched, 'minute')) > 5)) {
|
||||
try {
|
||||
const wiki = item.subreddit.getWikiPage(this.wiki as string);
|
||||
this.content = await wiki.content_md;
|
||||
this.wikiFetched = dayjs();
|
||||
} catch (err) {
|
||||
this.logger.error(err);
|
||||
throw new Error(`Could not read wiki page. Please ensure the page '${this.wiki}' exists and is readable`);
|
||||
getKind() {
|
||||
return 'Comment';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const content = await this.resources.getContent(this.content, item.subreddit);
|
||||
const body = await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
|
||||
const footer = await this.resources.generateFooter(item, this.footer);
|
||||
|
||||
const renderedContent = `${body}${footer}`;
|
||||
this.logger.verbose(`Contents:\r\n${renderedContent.length > 100 ? `\r\n${renderedContent}` : renderedContent}`);
|
||||
|
||||
if(item.archived) {
|
||||
this.logger.warn('Cannot comment because Item is archived');
|
||||
return {
|
||||
dryRun,
|
||||
success: false,
|
||||
result: 'Cannot comment because Item is archived'
|
||||
};
|
||||
}
|
||||
let reply: Comment;
|
||||
if(!dryRun) {
|
||||
// @ts-ignore
|
||||
reply = await item.reply(renderedContent);
|
||||
}
|
||||
if (this.lock) {
|
||||
if (!dryRun) {
|
||||
// snoopwrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
await item.lock();
|
||||
}
|
||||
}
|
||||
// @ts-ignore
|
||||
const reply: Comment = await item.reply(renderContent(this.content, item, ruleResults));
|
||||
if (this.lock && item instanceof Submission) {
|
||||
// @ts-ignore
|
||||
await item.lock();
|
||||
}
|
||||
if (this.distinguish) {
|
||||
if (this.distinguish && !dryRun) {
|
||||
// @ts-ignore
|
||||
await reply.distinguish({sticky: this.sticky});
|
||||
}
|
||||
let modifiers = [];
|
||||
if(this.distinguish) {
|
||||
modifiers.push('Distinguished');
|
||||
}
|
||||
if(this.sticky) {
|
||||
modifiers.push('Stickied');
|
||||
}
|
||||
const modifierStr = modifiers.length === 0 ? '' : `[${modifiers.join(' | ')}]`;
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: `${modifierStr}${this.lock ? ' - Locked Author\'s Activity - ' : ''}${truncateStringToLength(100)(body)}`
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export interface CommentActionConfig extends RichContent {
|
||||
export interface CommentActionConfig extends RequiredRichContent, Footer {
|
||||
/**
|
||||
* Lock the comment after creation?
|
||||
* */
|
||||
@@ -75,12 +98,12 @@ export interface CommentActionConfig extends RichContent {
|
||||
distinguish?: boolean,
|
||||
}
|
||||
|
||||
export interface CommentActionOptions extends CommentActionConfig,ActionOptions {
|
||||
export interface CommentActionOptions extends CommentActionConfig, ActionOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* Reply to the Activity. For a submission the reply will be a top-level comment.
|
||||
* */
|
||||
export interface CommentActionJSONConfig extends CommentActionConfig, ActionJSONConfig {
|
||||
|
||||
export interface CommentActionJson extends CommentActionConfig, ActionJson {
|
||||
kind: 'comment'
|
||||
}
|
||||
|
||||
@@ -1,15 +1,35 @@
|
||||
import {ActionJSONConfig, ActionConfig} from "./index";
|
||||
import {ActionJson, ActionConfig} from "./index";
|
||||
import Action from "./index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {ActionProcessResult} from "../Common/interfaces";
|
||||
|
||||
export class LockAction extends Action {
|
||||
name?: string = 'Lock';
|
||||
async handle(item: Comment|Submission, ruleResults: RuleResult[]): Promise<void> {
|
||||
if (item instanceof Submission) {
|
||||
getKind() {
|
||||
return 'Lock';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
if (item.locked) {
|
||||
this.logger.warn('Item is already locked');
|
||||
return {
|
||||
dryRun,
|
||||
success: false,
|
||||
result: 'Item is already locked'
|
||||
};
|
||||
}
|
||||
if (!dryRun) {
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
await item.lock();
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,8 +40,8 @@ export interface LockActionConfig extends ActionConfig {
|
||||
/**
|
||||
* Lock the Activity
|
||||
* */
|
||||
export interface LockActionJSONConfig extends LockActionConfig, ActionJSONConfig {
|
||||
|
||||
export interface LockActionJson extends LockActionConfig, ActionJson {
|
||||
kind: 'lock'
|
||||
}
|
||||
|
||||
export default LockAction;
|
||||
|
||||
145
src/Action/MessageAction.ts
Normal file
145
src/Action/MessageAction.ts
Normal file
@@ -0,0 +1,145 @@
|
||||
import Action, {ActionJson, ActionOptions} from "./index";
|
||||
import {Comment, ComposeMessageParams} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {renderContent} from "../Utils/SnoowrapUtils";
|
||||
import {ActionProcessResult, Footer, RequiredRichContent, RichContent} from "../Common/interfaces";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {
|
||||
asSubmission,
|
||||
boolToString,
|
||||
isSubmission,
|
||||
parseRedditEntity,
|
||||
REDDIT_ENTITY_REGEX_URL,
|
||||
truncateStringToLength
|
||||
} from "../util";
|
||||
import SimpleError from "../Utils/SimpleError";
|
||||
|
||||
export class MessageAction extends Action {
|
||||
content: string;
|
||||
lock: boolean = false;
|
||||
sticky: boolean = false;
|
||||
distinguish: boolean = false;
|
||||
footer?: false | string;
|
||||
|
||||
title?: string;
|
||||
to?: string;
|
||||
asSubreddit: boolean;
|
||||
|
||||
constructor(options: MessageActionOptions) {
|
||||
super(options);
|
||||
const {
|
||||
content,
|
||||
asSubreddit,
|
||||
title,
|
||||
footer,
|
||||
to,
|
||||
} = options;
|
||||
this.to = to;
|
||||
this.footer = footer;
|
||||
this.content = content;
|
||||
this.asSubreddit = asSubreddit;
|
||||
this.title = title;
|
||||
}
|
||||
|
||||
getKind() {
|
||||
return 'Message';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const content = await this.resources.getContent(this.content);
|
||||
const body = await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
|
||||
const footer = await this.resources.generateFooter(item, this.footer);
|
||||
|
||||
const renderedContent = `${body}${footer}`;
|
||||
|
||||
let recipient = item.author.name;
|
||||
if(this.to !== undefined) {
|
||||
// parse to value
|
||||
try {
|
||||
const entityData = parseRedditEntity(this.to);
|
||||
if(entityData.type === 'user') {
|
||||
recipient = entityData.name;
|
||||
} else {
|
||||
recipient = `/r/${entityData.name}`;
|
||||
}
|
||||
} catch (err) {
|
||||
this.logger.error(`'to' field for message was not in a valid format. See ${REDDIT_ENTITY_REGEX_URL} for valid examples`);
|
||||
this.logger.error(err);
|
||||
err.logged = true;
|
||||
throw err;
|
||||
}
|
||||
if(recipient.includes('/r/') && this.asSubreddit) {
|
||||
throw new SimpleError(`Cannot send a message as a subreddit to another subreddit. Requested recipient: ${recipient}`);
|
||||
}
|
||||
}
|
||||
|
||||
const msgOpts: ComposeMessageParams = {
|
||||
to: recipient,
|
||||
text: renderedContent,
|
||||
// @ts-ignore
|
||||
fromSubreddit: this.asSubreddit ? await item.subreddit.fetch() : undefined,
|
||||
subject: this.title || `Concerning your ${isSubmission(item) ? 'Submission' : 'Comment'}`,
|
||||
};
|
||||
|
||||
const msgPreview = `\r\n
|
||||
TO: ${recipient}\r\n
|
||||
Subject: ${msgOpts.subject}\r\n
|
||||
Sent As Modmail: ${boolToString(this.asSubreddit)}\r\n\r\n
|
||||
${renderedContent}`;
|
||||
|
||||
this.logger.verbose(`Message Preview => \r\n ${msgPreview}`);
|
||||
|
||||
if (!dryRun) {
|
||||
await this.client.composeMessage(msgOpts);
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: truncateStringToLength(200)(msgPreview)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export interface MessageActionConfig extends RequiredRichContent, Footer {
|
||||
/**
|
||||
* Should this message be sent from modmail (as the subreddit) or as the bot user?
|
||||
* */
|
||||
asSubreddit: boolean
|
||||
|
||||
/**
|
||||
* Entity to send message to.
|
||||
*
|
||||
* If not present Message be will sent to the Author of the Activity being checked.
|
||||
*
|
||||
* Valid formats:
|
||||
*
|
||||
* * `aUserName` -- send to /u/aUserName
|
||||
* * `u/aUserName` -- send to /u/aUserName
|
||||
* * `r/aSubreddit` -- sent to modmail of /r/aSubreddit
|
||||
*
|
||||
* **Note:** Reddit does not support sending a message AS a subreddit TO another subreddit
|
||||
*
|
||||
* @pattern ^\s*(\/[ru]\/|[ru]\/)*(\w+)*\s*$
|
||||
* @examples ["aUserName","u/aUserName","r/aSubreddit"]
|
||||
* */
|
||||
to?: string
|
||||
|
||||
/**
|
||||
* The title of the message
|
||||
*
|
||||
* If not specified will be defaulted to `Concerning your [Submission/Comment]`
|
||||
* */
|
||||
title?: string
|
||||
}
|
||||
|
||||
export interface MessageActionOptions extends MessageActionConfig, ActionOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a private message to the Author of the Activity.
|
||||
* */
|
||||
export interface MessageActionJson extends MessageActionConfig, ActionJson {
|
||||
kind: 'message'
|
||||
}
|
||||
@@ -1,13 +1,35 @@
|
||||
import {ActionJSONConfig, ActionConfig} from "./index";
|
||||
import {ActionJson, ActionConfig} from "./index";
|
||||
import Action from "./index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {activityIsRemoved} from "../Utils/SnoowrapUtils";
|
||||
import {ActionProcessResult} from "../Common/interfaces";
|
||||
|
||||
export class RemoveAction extends Action {
|
||||
name?: string = 'Remove';
|
||||
async handle(item: Comment|Submission, ruleResults: RuleResult[]): Promise<void> {
|
||||
getKind() {
|
||||
return 'Remove';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
// issue with snoowrap typings, doesn't think prop exists on Submission
|
||||
// @ts-ignore
|
||||
await item.remove();
|
||||
if (activityIsRemoved(item)) {
|
||||
return {
|
||||
dryRun,
|
||||
success: false,
|
||||
result: 'Item is already removed',
|
||||
}
|
||||
}
|
||||
if (!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.remove();
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +40,6 @@ export interface RemoveActionConfig extends ActionConfig {
|
||||
/**
|
||||
* Remove the Activity
|
||||
* */
|
||||
export interface RemoveActionJSONConfig extends RemoveActionConfig, ActionJSONConfig {
|
||||
|
||||
export interface RemoveActionJson extends RemoveActionConfig, ActionJson {
|
||||
kind: 'remove'
|
||||
}
|
||||
|
||||
@@ -1,36 +1,48 @@
|
||||
import {ActionJSONConfig, ActionConfig, ActionOptions} from "./index";
|
||||
import {ActionJson, ActionConfig, ActionOptions} from "./index";
|
||||
import Action from "./index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {truncateStringToLength} from "../util";
|
||||
import {renderContent} from "../Utils/SnoowrapUtils";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {ActionProcessResult, RichContent} from "../Common/interfaces";
|
||||
|
||||
// https://www.reddit.com/dev/api/oauth#POST_api_report
|
||||
// denotes 100 characters maximum
|
||||
const reportTrunc = truncateStringToLength(100);
|
||||
// actually only applies to VISIBLE text on OLD reddit... on old reddit rest of text is visible on hover. on new reddit the whole thing displays (up to at least 400 characters)
|
||||
|
||||
export class ReportAction extends Action {
|
||||
content: string;
|
||||
name?: string = 'Report';
|
||||
|
||||
constructor(options: ReportActionOptions) {
|
||||
super(options);
|
||||
this.content = options.content || '';
|
||||
}
|
||||
|
||||
async handle(item: Comment | Submission, ruleResults: RuleResult[]): Promise<void> {
|
||||
const renderedContent = await renderContent(this.content, item, ruleResults);
|
||||
getKind() {
|
||||
return 'Report';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const content = await this.resources.getContent(this.content, item.subreddit);
|
||||
const renderedContent = await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
this.logger.verbose(`Contents:\r\n${renderedContent}`);
|
||||
const truncatedContent = reportTrunc(renderedContent);
|
||||
// @ts-ignore
|
||||
await item.report({reason: truncatedContent});
|
||||
if(!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.report({reason: truncatedContent});
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: truncatedContent
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export interface ReportActionConfig {
|
||||
/**
|
||||
* The text of the report. If longer than 100 characters will be truncated to "[content]..."
|
||||
* */
|
||||
content: string,
|
||||
export interface ReportActionConfig extends RichContent {
|
||||
}
|
||||
|
||||
export interface ReportActionOptions extends ReportActionConfig, ActionOptions {
|
||||
@@ -39,6 +51,6 @@ export interface ReportActionOptions extends ReportActionConfig, ActionOptions {
|
||||
/**
|
||||
* Report the Activity
|
||||
* */
|
||||
export interface ReportActionJSONConfig extends ReportActionConfig, ActionJSONConfig {
|
||||
|
||||
export interface ReportActionJson extends ReportActionConfig, ActionJson {
|
||||
kind: 'report'
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import {SubmissionActionConfig} from "./index";
|
||||
import Action, {ActionJSONConfig} from "../index";
|
||||
import Action, {ActionJson, ActionOptions} from "../index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {RuleResult} from "../../Rule";
|
||||
import {ActionProcessResult} from "../../Common/interfaces";
|
||||
|
||||
export class FlairAction extends Action {
|
||||
text: string;
|
||||
css: string;
|
||||
name?: string = 'Flair';
|
||||
|
||||
constructor(options: FlairActionOptions) {
|
||||
super(options);
|
||||
@@ -17,10 +17,38 @@ export class FlairAction extends Action {
|
||||
this.css = options.css || '';
|
||||
}
|
||||
|
||||
async handle(item: Comment | Submission, ruleResults: RuleResult[]): Promise<void> {
|
||||
getKind() {
|
||||
return 'Flair';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
let flairParts = [];
|
||||
if(this.text !== '') {
|
||||
flairParts.push(`Text: ${this.text}`);
|
||||
}
|
||||
if(this.css !== '') {
|
||||
flairParts.push(`CSS: ${this.css}`);
|
||||
}
|
||||
const flairSummary = flairParts.length === 0 ? 'No flair (unflaired)' : flairParts.join(' | ');
|
||||
this.logger.verbose(flairSummary);
|
||||
if (item instanceof Submission) {
|
||||
// @ts-ignore
|
||||
await item.assignFlair({text: this.text, cssClass: this.css})
|
||||
if(!this.dryRun) {
|
||||
// @ts-ignore
|
||||
await item.assignFlair({text: this.text, cssClass: this.css})
|
||||
}
|
||||
} else {
|
||||
this.logger.warn('Cannot flair Comment');
|
||||
return {
|
||||
dryRun,
|
||||
success: false,
|
||||
result: 'Cannot flair Comment',
|
||||
}
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: flairSummary
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -29,7 +57,7 @@ export class FlairAction extends Action {
|
||||
* @minProperties 1
|
||||
* @additionalProperties false
|
||||
* */
|
||||
export interface FlairActionOptions extends SubmissionActionConfig {
|
||||
export interface FlairActionConfig extends SubmissionActionConfig {
|
||||
/**
|
||||
* The text of the flair to apply
|
||||
* */
|
||||
@@ -40,9 +68,13 @@ export interface FlairActionOptions extends SubmissionActionConfig {
|
||||
css?: string,
|
||||
}
|
||||
|
||||
export interface FlairActionOptions extends FlairActionConfig,ActionOptions {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Flair the Submission
|
||||
* */
|
||||
export interface FlairActionJSONConfig extends FlairActionOptions, ActionJSONConfig {
|
||||
|
||||
export interface FlairActionJson extends FlairActionConfig, ActionJson {
|
||||
kind: 'flair'
|
||||
}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SubmissionAction = void 0;
|
||||
const index_1 = __importDefault(require("../index"));
|
||||
class SubmissionAction extends index_1.default {
|
||||
}
|
||||
exports.SubmissionAction = SubmissionAction;
|
||||
//# sourceMappingURL=index.js.map
|
||||
@@ -1 +0,0 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":";;;;;;AAAA,qDAA8C;AAE9C,MAAsB,gBAAiB,SAAQ,eAAM;CAEpD;AAFD,4CAEC"}
|
||||
76
src/Action/UserNoteAction.ts
Normal file
76
src/Action/UserNoteAction.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import {ActionJson, ActionConfig, ActionOptions} from "./index";
|
||||
import Action from "./index";
|
||||
import {Comment} from "snoowrap";
|
||||
import {renderContent} from "../Utils/SnoowrapUtils";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {UserNote, UserNoteJson} from "../Subreddit/UserNotes";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {ActionProcessResult} from "../Common/interfaces";
|
||||
|
||||
|
||||
export class UserNoteAction extends Action {
|
||||
content: string;
|
||||
type: string;
|
||||
allowDuplicate: boolean;
|
||||
|
||||
constructor(options: UserNoteActionOptions) {
|
||||
super(options);
|
||||
const {type, content = '', allowDuplicate = false} = options;
|
||||
this.type = type;
|
||||
this.content = content;
|
||||
this.allowDuplicate = allowDuplicate;
|
||||
}
|
||||
|
||||
getKind() {
|
||||
return 'User Note';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const content = await this.resources.getContent(this.content, item.subreddit);
|
||||
const renderedContent = await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
this.logger.verbose(`Note:\r\n(${this.type}) ${renderedContent}`);
|
||||
|
||||
if (!this.allowDuplicate) {
|
||||
const notes = await this.resources.userNotes.getUserNotes(item.author);
|
||||
const existingNote = notes.find((x) => x.link.includes(item.id));
|
||||
if (existingNote) {
|
||||
this.logger.info(`Will not add note because one already exists for this Activity (${existingNote.time.local().format()}) and allowDuplicate=false`);
|
||||
return {
|
||||
dryRun,
|
||||
success: false,
|
||||
result: `Will not add note because one already exists for this Activity (${existingNote.time.local().format()}) and allowDuplicate=false`
|
||||
};
|
||||
}
|
||||
}
|
||||
if (!dryRun) {
|
||||
await this.resources.userNotes.addUserNote(item, this.type, renderedContent);
|
||||
} else if (!await this.resources.userNotes.warningExists(this.type)) {
|
||||
this.logger.warn(`UserNote type '${this.type}' does not exist. If you meant to use this please add it through Toolbox first.`);
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
dryRun,
|
||||
result: `(${this.type}) ${renderedContent}`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export interface UserNoteActionConfig extends ActionConfig,UserNoteJson {
|
||||
/**
|
||||
* Add Note even if a Note already exists for this Activity
|
||||
* @examples [false]
|
||||
* @default false
|
||||
* */
|
||||
allowDuplicate?: boolean,
|
||||
}
|
||||
|
||||
export interface UserNoteActionOptions extends UserNoteActionConfig, ActionOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a Toolbox User Note to the Author of this Activity
|
||||
* */
|
||||
export interface UserNoteActionJson extends UserNoteActionConfig, ActionJson {
|
||||
kind: 'usernote'
|
||||
}
|
||||
@@ -1,55 +1,172 @@
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {Logger} from "winston";
|
||||
import {createLabelledLogger, loggerMetaShuffle} from "../util";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import {ActionProcessResult, ActionResult, ChecksActivityState, TypedActivityStates} from "../Common/interfaces";
|
||||
import Author, {AuthorOptions} from "../Author/Author";
|
||||
import {mergeArr} from "../util";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
|
||||
export abstract class Action {
|
||||
name?: string;
|
||||
logger: Logger;
|
||||
resources: SubredditResources;
|
||||
client: Snoowrap
|
||||
authorIs: AuthorOptions;
|
||||
itemIs: TypedActivityStates;
|
||||
dryRun: boolean;
|
||||
enabled: boolean;
|
||||
|
||||
constructor(options: ActionOptions = {}) {
|
||||
constructor(options: ActionOptions) {
|
||||
const {
|
||||
name,
|
||||
loggerPrefix = '',
|
||||
enable = true,
|
||||
name = this.getKind(),
|
||||
resources,
|
||||
client,
|
||||
logger,
|
||||
subredditName,
|
||||
dryRun = false,
|
||||
authorIs: {
|
||||
include = [],
|
||||
exclude = [],
|
||||
} = {},
|
||||
itemIs = [],
|
||||
} = options;
|
||||
if (name !== undefined) {
|
||||
this.name = name;
|
||||
|
||||
this.name = name;
|
||||
this.dryRun = dryRun;
|
||||
this.enabled = enable;
|
||||
this.resources = resources;
|
||||
this.client = client;
|
||||
this.logger = logger.child({labels: [`Action ${this.getActionUniqueName()}`]}, mergeArr);
|
||||
|
||||
this.authorIs = {
|
||||
exclude: exclude.map(x => new Author(x)),
|
||||
include: include.map(x => new Author(x)),
|
||||
}
|
||||
if (logger === undefined) {
|
||||
const prefix = `${loggerPrefix}|${this.name}`;
|
||||
this.logger = createLabelledLogger(prefix, prefix);
|
||||
} else {
|
||||
this.logger = logger.child(loggerMetaShuffle(logger, name || 'Action', undefined, {truncateLength: 100}));
|
||||
|
||||
this.itemIs = itemIs;
|
||||
}
|
||||
|
||||
abstract getKind(): string;
|
||||
|
||||
getActionUniqueName() {
|
||||
return this.name === this.getKind() ? this.getKind() : `${this.getKind()} - ${this.name}`;
|
||||
}
|
||||
|
||||
async handle(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
|
||||
let actRes: ActionResult = {
|
||||
kind: this.getKind(),
|
||||
name: this.getActionUniqueName(),
|
||||
run: false,
|
||||
dryRun,
|
||||
success: false,
|
||||
};
|
||||
try {
|
||||
const itemPass = await this.resources.testItemCriteria(item, this.itemIs);
|
||||
if (!itemPass) {
|
||||
this.logger.verbose(`Activity did not pass 'itemIs' test, Action not run`);
|
||||
actRes.runReason = `Activity did not pass 'itemIs' test, Action not run`;
|
||||
return actRes;
|
||||
}
|
||||
if (this.authorIs.include !== undefined && this.authorIs.include.length > 0) {
|
||||
for (const auth of this.authorIs.include) {
|
||||
if (await this.resources.testAuthorCriteria(item, auth)) {
|
||||
actRes.run = true;
|
||||
const results = await this.process(item, ruleResults, runtimeDryrun);
|
||||
return {...actRes, ...results};
|
||||
}
|
||||
}
|
||||
this.logger.verbose('Inclusive author criteria not matched, Action not run');
|
||||
actRes.runReason = 'Inclusive author criteria not matched';
|
||||
return actRes;
|
||||
} else if (this.authorIs.exclude !== undefined && this.authorIs.exclude.length > 0) {
|
||||
for (const auth of this.authorIs.exclude) {
|
||||
if (await this.resources.testAuthorCriteria(item, auth, false)) {
|
||||
actRes.run = true;
|
||||
const results = await this.process(item, ruleResults, runtimeDryrun);
|
||||
return {...actRes, ...results};
|
||||
}
|
||||
}
|
||||
this.logger.verbose('Exclusive author criteria not matched, Action not run');
|
||||
actRes.runReason = 'Exclusive author criteria not matched';
|
||||
return actRes;
|
||||
}
|
||||
|
||||
actRes.run = true;
|
||||
const results = await this.process(item, ruleResults, runtimeDryrun);
|
||||
return {...actRes, ...results};
|
||||
} catch (err) {
|
||||
if(!(err instanceof LoggedError)) {
|
||||
this.logger.error(`Encountered error while running`, err);
|
||||
}
|
||||
actRes.success = false;
|
||||
actRes.result = err.message;
|
||||
return actRes;
|
||||
}
|
||||
}
|
||||
|
||||
abstract handle(item: Comment | Submission, ruleResults: RuleResult[]): Promise<void>;
|
||||
abstract process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryun?: boolean): Promise<ActionProcessResult>;
|
||||
}
|
||||
|
||||
export interface ActionOptions {
|
||||
name?: string;
|
||||
logger?: Logger,
|
||||
loggerPrefix?: string,
|
||||
export interface ActionOptions extends ActionConfig {
|
||||
logger: Logger;
|
||||
subredditName: string;
|
||||
resources: SubredditResources
|
||||
client: Snoowrap
|
||||
}
|
||||
|
||||
export interface ActionConfig {
|
||||
export interface ActionConfig extends ChecksActivityState {
|
||||
/**
|
||||
* An optional, but highly recommended, friendly name for this Action. If not present will default to `kind`.
|
||||
*
|
||||
* Can only contain letters, numbers, underscore, spaces, and dashes
|
||||
*
|
||||
* @pattern ^[a-zA-Z]([\w -]*[\w])?$
|
||||
* @examples ["myDescriptiveAction"]
|
||||
* */
|
||||
name?: string;
|
||||
/**
|
||||
* If `true` the Action will not make the API request to Reddit to perform its action.
|
||||
*
|
||||
* @default false
|
||||
* @examples [false, true]
|
||||
* */
|
||||
dryRun?: boolean;
|
||||
|
||||
/**
|
||||
* If present then these Author criteria are checked before running the Action. If criteria fails then the Action is not run.
|
||||
* */
|
||||
authorIs?: AuthorOptions
|
||||
|
||||
/**
|
||||
* A list of criteria to test the state of the `Activity` against before running the Action.
|
||||
*
|
||||
* If any set of criteria passes the Action will be run.
|
||||
*
|
||||
* */
|
||||
itemIs?: TypedActivityStates
|
||||
|
||||
/**
|
||||
* If set to `false` the Action will not be run
|
||||
*
|
||||
* @default true
|
||||
* @examples [true]
|
||||
* */
|
||||
enable?: boolean
|
||||
}
|
||||
|
||||
/** @see {isActionConfig} ts-auto-guard:type-guard */
|
||||
export interface ActionJSONConfig extends ActionConfig {
|
||||
export interface ActionJson extends ActionConfig {
|
||||
/**
|
||||
* The type of action that will be performed
|
||||
*/
|
||||
kind: 'comment' | 'lock' | 'remove' | 'report' | 'flair'
|
||||
kind: 'comment' | 'lock' | 'remove' | 'report' | 'approve' | 'ban' | 'flair' | 'usernote' | 'message'
|
||||
}
|
||||
|
||||
export const isActionJson = (obj: object): obj is ActionJson => {
|
||||
return (obj as ActionJson).kind !== undefined;
|
||||
}
|
||||
|
||||
export default Action;
|
||||
|
||||
96
src/App.ts
Normal file
96
src/App.ts
Normal file
@@ -0,0 +1,96 @@
|
||||
import winston, {Logger} from "winston";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {getLogger} from "./Utils/loggerFactory";
|
||||
import {Invokee, OperatorConfig} from "./Common/interfaces";
|
||||
import Bot from "./Bot";
|
||||
import LoggedError from "./Utils/LoggedError";
|
||||
import {sleep} from "./util";
|
||||
|
||||
export class App {
|
||||
|
||||
bots: Bot[]
|
||||
logger: Logger;
|
||||
startedAt: Dayjs = dayjs();
|
||||
|
||||
error: any;
|
||||
|
||||
constructor(config: OperatorConfig) {
|
||||
const {
|
||||
operator: {
|
||||
name,
|
||||
},
|
||||
notifications,
|
||||
bots = [],
|
||||
} = config;
|
||||
|
||||
this.logger = getLogger(config.logging);
|
||||
|
||||
this.logger.info(`Operators: ${name.length === 0 ? 'None Specified' : name.join(', ')}`)
|
||||
|
||||
this.bots = bots.map(x => new Bot(x, this.logger));
|
||||
|
||||
process.on('uncaughtException', (e) => {
|
||||
this.error = e;
|
||||
});
|
||||
process.on('unhandledRejection', (e) => {
|
||||
this.error = e;
|
||||
});
|
||||
process.on('exit', async (code) => {
|
||||
if(code === 0) {
|
||||
await this.onTerminate();
|
||||
} else if(this.error !== undefined) {
|
||||
let errMsg;
|
||||
if(typeof this.error === 'object' && this.error.message !== undefined) {
|
||||
errMsg = this.error.message;
|
||||
} else if(typeof this.error === 'string') {
|
||||
errMsg = this.error;
|
||||
}
|
||||
await this.onTerminate(`Application exited due to an unexpected error${errMsg !== undefined ? `: ${errMsg}` : ''}`);
|
||||
} else {
|
||||
await this.onTerminate(`Application exited with unclean exit signal (${code})`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async onTerminate(reason = 'The application was shutdown') {
|
||||
for(const b of this.bots) {
|
||||
for(const m of b.subManagers) {
|
||||
await m.notificationManager.handle('runStateChanged', 'Application Shutdown', reason);
|
||||
}
|
||||
//await b.notificationManager.handle('runStateChanged', 'Application Shutdown', reason);
|
||||
}
|
||||
}
|
||||
|
||||
async initBots(causedBy: Invokee = 'system') {
|
||||
for (const b of this.bots) {
|
||||
if (b.error === undefined) {
|
||||
try {
|
||||
await b.testClient();
|
||||
await b.buildManagers();
|
||||
await sleep(2000);
|
||||
b.runManagers(causedBy).catch((err) => {
|
||||
this.logger.error(`Unexpected error occurred while running Bot ${b.botName}. Bot must be re-built to restart`);
|
||||
if (!err.logged || !(err instanceof LoggedError)) {
|
||||
this.logger.error(err);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
if (b.error === undefined) {
|
||||
b.error = err.message;
|
||||
}
|
||||
this.logger.error(`Bot ${b.botName} cannot recover from this error and must be re-built`);
|
||||
if (!err.logged || !(err instanceof LoggedError)) {
|
||||
this.logger.error(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async destroy(causedBy: Invokee) {
|
||||
this.logger.info('Stopping all bots...');
|
||||
for(const b of this.bots) {
|
||||
await b.destroy(causedBy);
|
||||
}
|
||||
}
|
||||
}
|
||||
152
src/Author/Author.ts
Normal file
152
src/Author/Author.ts
Normal file
@@ -0,0 +1,152 @@
|
||||
import {UserNoteCriteria} from "../Rule";
|
||||
import {CompareValue, CompareValueOrPercent, DurationComparor} from "../Common/interfaces";
|
||||
import {parseStringToRegex} from "../util";
|
||||
|
||||
/**
|
||||
* If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.
|
||||
* @examples [{"include": [{"flairText": ["Contributor","Veteran"]}, {"isMod": true}]}]
|
||||
* */
|
||||
export interface AuthorOptions {
|
||||
/**
|
||||
* Will "pass" if any set of AuthorCriteria passes
|
||||
* */
|
||||
include?: AuthorCriteria[];
|
||||
/**
|
||||
* Only runs if `include` is not present. Will "pass" if any of set of the AuthorCriteria **does not** pass
|
||||
* */
|
||||
exclude?: AuthorCriteria[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Criteria with which to test against the author of an Activity. The outcome of the test is based on:
|
||||
*
|
||||
* 1. All present properties passing and
|
||||
* 2. If a property is a list then any value from the list matching
|
||||
*
|
||||
* @minProperties 1
|
||||
* @additionalProperties false
|
||||
* @examples [{"flairText": ["Contributor","Veteran"], "isMod": true, "name": ["FoxxMD", "AnotherUser"] }]
|
||||
* */
|
||||
export interface AuthorCriteria {
|
||||
/**
|
||||
* A list of reddit usernames (case-insensitive) to match against. Do not include the "u/" prefix
|
||||
*
|
||||
* EX to match against /u/FoxxMD and /u/AnotherUser use ["FoxxMD","AnotherUser"]
|
||||
* @examples ["FoxxMD","AnotherUser"]
|
||||
* */
|
||||
name?: string[],
|
||||
/**
|
||||
* A list of (user) flair css class values from the subreddit to match against
|
||||
* @examples ["red"]
|
||||
* */
|
||||
flairCssClass?: string[],
|
||||
/**
|
||||
* A list of (user) flair text values from the subreddit to match against
|
||||
* @examples ["Approved"]
|
||||
* */
|
||||
flairText?: string[],
|
||||
/**
|
||||
* Is the author a moderator?
|
||||
* */
|
||||
isMod?: boolean,
|
||||
/**
|
||||
* A list of UserNote properties to check against the User Notes attached to this Author in this Subreddit (must have Toolbox enabled and used User Notes at least once)
|
||||
* */
|
||||
userNotes?: UserNoteCriteria[]
|
||||
|
||||
/**
|
||||
* Test the age of the Author's account (when it was created) against this comparison
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number> <unit>`
|
||||
*
|
||||
* * EX `> 100 days` => Passes if Author's account is older than 100 days
|
||||
* * EX `<= 2 months` => Passes if Author's account is younger than or equal to 2 months
|
||||
*
|
||||
* Unit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)
|
||||
*
|
||||
* [See] https://regexr.com/609n8 for example
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)\s*$
|
||||
* */
|
||||
age?: DurationComparor
|
||||
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare link karma against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign]`
|
||||
*
|
||||
* * EX `> 100` => greater than 100 link karma
|
||||
* * EX `<= 75%` => link karma is less than or equal to 75% of **all karma**
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* */
|
||||
linkKarma?: CompareValueOrPercent
|
||||
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare karma against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign]`
|
||||
*
|
||||
* * EX `> 100` => greater than 100 comment karma
|
||||
* * EX `<= 75%` => comment karma is less than or equal to 75% of **all karma**
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* */
|
||||
commentKarma?: CompareValueOrPercent
|
||||
|
||||
totalKarma?: CompareValue
|
||||
|
||||
/**
|
||||
* Does Author's account have a verified email?
|
||||
* */
|
||||
verified?: boolean
|
||||
|
||||
/**
|
||||
* Is the author shadowbanned?
|
||||
*
|
||||
* This is determined by trying to retrieve the author's profile. If a 404 is returned it is likely they are shadowbanned
|
||||
* */
|
||||
shadowBanned?: boolean
|
||||
|
||||
/**
|
||||
* An (array of) string/regular expression to test contents of an Author's profile description against
|
||||
*
|
||||
* If no flags are specified then the **insensitive** flag is used by default
|
||||
*
|
||||
* If using an array then if **any** value in the array passes the description test passes
|
||||
*
|
||||
* @examples [["/test$/i", "look for this string literal"]]
|
||||
* */
|
||||
description?: string | string[]
|
||||
}
|
||||
|
||||
export class Author implements AuthorCriteria {
|
||||
name?: string[];
|
||||
flairCssClass?: string[];
|
||||
flairText?: string[];
|
||||
isMod?: boolean;
|
||||
userNotes?: UserNoteCriteria[];
|
||||
age?: string;
|
||||
commentKarma?: string;
|
||||
linkKarma?: string;
|
||||
totalKarma?: string;
|
||||
verified?: boolean;
|
||||
shadowBanned?: boolean;
|
||||
description?: string[];
|
||||
|
||||
constructor(options: AuthorCriteria) {
|
||||
this.name = options.name;
|
||||
this.flairCssClass = options.flairCssClass;
|
||||
this.flairText = options.flairText;
|
||||
this.isMod = options.isMod;
|
||||
this.userNotes = options.userNotes;
|
||||
this.age = options.age;
|
||||
this.commentKarma = options.commentKarma;
|
||||
this.linkKarma = options.linkKarma;
|
||||
this.totalKarma = options.totalKarma;
|
||||
this.shadowBanned = options.shadowBanned;
|
||||
this.description = options.description === undefined ? undefined : Array.isArray(options.description) ? options.description : [options.description];
|
||||
}
|
||||
}
|
||||
|
||||
export default Author;
|
||||
600
src/Bot/index.ts
Normal file
600
src/Bot/index.ts
Normal file
@@ -0,0 +1,600 @@
|
||||
import Snoowrap, {Comment, Submission, Subreddit} from "snoowrap";
|
||||
import {Logger} from "winston";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {Duration} from "dayjs/plugin/duration";
|
||||
import EventEmitter from "events";
|
||||
import {BotInstanceConfig, Invokee, PAUSED, RUNNING, STOPPED, SYSTEM, USER} from "../Common/interfaces";
|
||||
import {
|
||||
createRetryHandler,
|
||||
formatNumber,
|
||||
mergeArr,
|
||||
parseBool,
|
||||
parseDuration,
|
||||
parseSubredditName,
|
||||
sleep,
|
||||
snooLogWrapper
|
||||
} from "../util";
|
||||
import {Manager} from "../Subreddit/Manager";
|
||||
import {ExtendedSnoowrap, ProxiedSnoowrap} from "../Utils/SnoowrapClients";
|
||||
import {ModQueueStream, UnmoderatedStream} from "../Subreddit/Streams";
|
||||
import {BotResourcesManager} from "../Subreddit/SubredditResources";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
import pEvent from "p-event";
|
||||
|
||||
|
||||
class Bot {
|
||||
|
||||
client!: ExtendedSnoowrap;
|
||||
logger!: Logger;
|
||||
wikiLocation: string;
|
||||
dryRun?: true | undefined;
|
||||
running: boolean = false;
|
||||
subreddits: string[];
|
||||
excludeSubreddits: string[];
|
||||
subManagers: Manager[] = [];
|
||||
heartbeatInterval: number;
|
||||
nextHeartbeat: Dayjs = dayjs();
|
||||
heartBeating: boolean = false;
|
||||
|
||||
softLimit: number | string = 250;
|
||||
hardLimit: number | string = 50;
|
||||
nannyMode?: 'soft' | 'hard';
|
||||
nannyRunning: boolean = false;
|
||||
nextNannyCheck: Dayjs = dayjs().add(10, 'second');
|
||||
nannyRetryHandler: Function;
|
||||
nextExpiration: Dayjs = dayjs();
|
||||
botName?: string;
|
||||
botLink?: string;
|
||||
botAccount?: string;
|
||||
maxWorkers: number;
|
||||
startedAt: Dayjs = dayjs();
|
||||
sharedModqueue: boolean = false;
|
||||
streamListedOnce: string[] = [];
|
||||
|
||||
apiSample: number[] = [];
|
||||
apiRollingAvg: number = 0;
|
||||
apiEstDepletion?: Duration;
|
||||
depletedInSecs: number = 0;
|
||||
|
||||
error: any;
|
||||
emitter: EventEmitter = new EventEmitter();
|
||||
|
||||
cacheManager: BotResourcesManager;
|
||||
|
||||
getBotName = () => {
|
||||
return this.botName;
|
||||
}
|
||||
|
||||
getUserAgent = () => {
|
||||
return `web:contextMod:${this.botName}`
|
||||
}
|
||||
|
||||
constructor(config: BotInstanceConfig, logger: Logger) {
|
||||
const {
|
||||
notifications,
|
||||
name,
|
||||
subreddits: {
|
||||
names = [],
|
||||
exclude = [],
|
||||
wikiConfig,
|
||||
dryRun,
|
||||
heartbeatInterval,
|
||||
},
|
||||
credentials: {
|
||||
clientId,
|
||||
clientSecret,
|
||||
refreshToken,
|
||||
accessToken,
|
||||
},
|
||||
snoowrap: {
|
||||
proxy,
|
||||
debug,
|
||||
},
|
||||
polling: {
|
||||
sharedMod,
|
||||
stagger,
|
||||
},
|
||||
queue: {
|
||||
maxWorkers,
|
||||
},
|
||||
caching: {
|
||||
authorTTL,
|
||||
provider: {
|
||||
store
|
||||
}
|
||||
},
|
||||
nanny: {
|
||||
softLimit,
|
||||
hardLimit,
|
||||
}
|
||||
} = config;
|
||||
|
||||
this.cacheManager = new BotResourcesManager(config);
|
||||
|
||||
this.dryRun = parseBool(dryRun) === true ? true : undefined;
|
||||
this.softLimit = softLimit;
|
||||
this.hardLimit = hardLimit;
|
||||
this.wikiLocation = wikiConfig;
|
||||
this.heartbeatInterval = heartbeatInterval;
|
||||
this.sharedModqueue = sharedMod;
|
||||
if(name !== undefined) {
|
||||
this.botName = name;
|
||||
}
|
||||
|
||||
const getBotName = this.getBotName;
|
||||
const getUserName = this.getUserAgent;
|
||||
|
||||
this.logger = logger.child({
|
||||
get bot() {
|
||||
return getBotName();
|
||||
}
|
||||
}, mergeArr);
|
||||
|
||||
let mw = maxWorkers;
|
||||
if(maxWorkers < 1) {
|
||||
this.logger.warn(`Max queue workers must be greater than or equal to 1 (Specified: ${maxWorkers})`);
|
||||
mw = 1;
|
||||
}
|
||||
this.maxWorkers = mw;
|
||||
|
||||
if (this.dryRun) {
|
||||
this.logger.info('Running in DRYRUN mode');
|
||||
}
|
||||
|
||||
this.subreddits = names.map(parseSubredditName);
|
||||
this.excludeSubreddits = exclude.map(parseSubredditName);
|
||||
|
||||
let creds: any = {
|
||||
get userAgent() { return getUserName() },
|
||||
clientId,
|
||||
clientSecret,
|
||||
refreshToken,
|
||||
accessToken,
|
||||
};
|
||||
|
||||
const missingCreds = [];
|
||||
for(const [k,v] of Object.entries(creds)) {
|
||||
if(v === undefined || v === '' || v === null) {
|
||||
missingCreds.push(k);
|
||||
}
|
||||
}
|
||||
if(missingCreds.length > 0) {
|
||||
this.logger.error('There are credentials missing that would prevent initializing the Reddit API Client and subsequently the rest of the application');
|
||||
this.logger.error(`Missing credentials: ${missingCreds.join(', ')}`)
|
||||
this.logger.info(`If this is a first-time setup use the 'web' command for a web-based guide to configuring your application`);
|
||||
this.logger.info(`Or check the USAGE section of the readme for the correct naming of these arguments/environment variables`);
|
||||
this.error = `Missing credentials: ${missingCreds.join(', ')}`;
|
||||
//throw new LoggedError(`Missing credentials: ${missingCreds.join(', ')}`);
|
||||
}
|
||||
|
||||
try {
|
||||
this.client = proxy === undefined ? new ExtendedSnoowrap(creds) : new ProxiedSnoowrap({...creds, proxy});
|
||||
this.client.config({
|
||||
warnings: true,
|
||||
maxRetryAttempts: 5,
|
||||
debug,
|
||||
logger: snooLogWrapper(this.logger.child({labels: ['Snoowrap']}, mergeArr)),
|
||||
continueAfterRatelimitError: true,
|
||||
});
|
||||
} catch (err) {
|
||||
if(this.error === undefined) {
|
||||
this.error = err.message;
|
||||
this.logger.error(err);
|
||||
}
|
||||
}
|
||||
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 1}, this.logger);
|
||||
this.nannyRetryHandler = createRetryHandler({maxRequestRetry: 5, maxOtherRetry: 1}, this.logger);
|
||||
|
||||
const modStreamErrorListener = (name: string) => async (err: any) => {
|
||||
this.logger.error('Polling error occurred', err);
|
||||
const shouldRetry = await retryHandler(err);
|
||||
if(shouldRetry) {
|
||||
defaultUnmoderatedStream.startInterval();
|
||||
} else {
|
||||
for(const m of this.subManagers) {
|
||||
if(m.modStreamCallbacks.size > 0) {
|
||||
m.notificationManager.handle('runStateChanged', `${name.toUpperCase()} Polling Stopped`, 'Encountered too many errors from Reddit while polling. Will try to restart on next heartbeat.');
|
||||
}
|
||||
}
|
||||
this.logger.error(`Mod stream ${name.toUpperCase()} encountered too many errors while polling. Will try to restart on next heartbeat.`);
|
||||
}
|
||||
}
|
||||
|
||||
const modStreamListingListener = (name: string) => async (listing: (Comment|Submission)[]) => {
|
||||
// dole out in order they were received
|
||||
if(!this.streamListedOnce.includes(name)) {
|
||||
this.streamListedOnce.push(name);
|
||||
return;
|
||||
}
|
||||
for(const i of listing) {
|
||||
const foundManager = this.subManagers.find(x => x.subreddit.display_name === i.subreddit.display_name && x.modStreamCallbacks.get(name) !== undefined);
|
||||
if(foundManager !== undefined) {
|
||||
foundManager.modStreamCallbacks.get(name)(i);
|
||||
if(stagger !== undefined) {
|
||||
await sleep(stagger);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const defaultUnmoderatedStream = new UnmoderatedStream(this.client, {subreddit: 'mod', limit: 100, clearProcessed: { size: 100, retain: 100 }});
|
||||
// @ts-ignore
|
||||
defaultUnmoderatedStream.on('error', modStreamErrorListener('unmoderated'));
|
||||
defaultUnmoderatedStream.on('listing', modStreamListingListener('unmoderated'));
|
||||
const defaultModqueueStream = new ModQueueStream(this.client, {subreddit: 'mod', limit: 100, clearProcessed: { size: 100, retain: 100 }});
|
||||
// @ts-ignore
|
||||
defaultModqueueStream.on('error', modStreamErrorListener('modqueue'));
|
||||
defaultModqueueStream.on('listing', modStreamListingListener('modqueue'));
|
||||
this.cacheManager.modStreams.set('unmoderated', defaultUnmoderatedStream);
|
||||
this.cacheManager.modStreams.set('modqueue', defaultModqueueStream);
|
||||
|
||||
process.on('uncaughtException', (e) => {
|
||||
this.error = e;
|
||||
});
|
||||
process.on('unhandledRejection', (e) => {
|
||||
this.error = e;
|
||||
});
|
||||
process.on('exit', async (code) => {
|
||||
if(code === 0) {
|
||||
await this.onTerminate();
|
||||
} else if(this.error !== undefined) {
|
||||
let errMsg;
|
||||
if(typeof this.error === 'object' && this.error.message !== undefined) {
|
||||
errMsg = this.error.message;
|
||||
} else if(typeof this.error === 'string') {
|
||||
errMsg = this.error;
|
||||
}
|
||||
await this.onTerminate(`Application exited due to an unexpected error${errMsg !== undefined ? `: ${errMsg}` : ''}`);
|
||||
} else {
|
||||
await this.onTerminate(`Application exited with unclean exit signal (${code})`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async onTerminate(reason = 'The application was shutdown') {
|
||||
for(const m of this.subManagers) {
|
||||
await m.notificationManager.handle('runStateChanged', 'Application Shutdown', reason);
|
||||
}
|
||||
}
|
||||
|
||||
async testClient() {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.getMe();
|
||||
this.logger.info('Test API call successful');
|
||||
} catch (err) {
|
||||
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
|
||||
if(err.name === 'StatusCodeError') {
|
||||
const authHeader = err.response.headers['www-authenticate'];
|
||||
if (authHeader !== undefined && authHeader.includes('insufficient_scope')) {
|
||||
this.logger.error('Reddit responded with a 403 insufficient_scope. Please ensure you have chosen the correct scopes when authorizing your account.');
|
||||
} else if(err.statusCode === 401) {
|
||||
this.logger.error('It is likely a credential is missing or incorrect. Check clientId, clientSecret, refreshToken, and accessToken');
|
||||
}
|
||||
this.logger.error(`Error Message: ${err.message}`);
|
||||
} else {
|
||||
this.logger.error(err);
|
||||
}
|
||||
this.error = `Error occurred while testing Reddit API client: ${err.message}`;
|
||||
err.logged = true;
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async buildManagers(subreddits: string[] = []) {
|
||||
let availSubs = [];
|
||||
// @ts-ignore
|
||||
const user = await this.client.getMe().fetch();
|
||||
this.botLink = `https://reddit.com/user/${user.name}`;
|
||||
this.botAccount = `u/${user.name}`;
|
||||
this.logger.info(`Reddit API Limit Remaining: ${this.client.ratelimitRemaining}`);
|
||||
this.logger.info(`Authenticated Account: u/${user.name}`);
|
||||
|
||||
const botNameFromConfig = this.botName !== undefined;
|
||||
if(this.botName === undefined) {
|
||||
this.botName = `u/${user.name}`;
|
||||
}
|
||||
this.logger.info(`Bot Name${botNameFromConfig ? ' (from config)' : ''}: ${this.botName}`);
|
||||
|
||||
for (const sub of await this.client.getModeratedSubreddits()) {
|
||||
// TODO don't know a way to check permissions yet
|
||||
availSubs.push(sub);
|
||||
}
|
||||
this.logger.info(`u/${user.name} is a moderator of these subreddits: ${availSubs.map(x => x.display_name_prefixed).join(', ')}`);
|
||||
|
||||
let subsToRun: Subreddit[] = [];
|
||||
const subsToUse = subreddits.length > 0 ? subreddits.map(parseSubredditName) : this.subreddits;
|
||||
if (subsToUse.length > 0) {
|
||||
this.logger.info(`Operator-defined subreddit constraints detected (CLI argument or environmental variable), will try to run on: ${subsToUse.join(', ')}`);
|
||||
for (const sub of subsToUse) {
|
||||
const asub = availSubs.find(x => x.display_name.toLowerCase() === sub.toLowerCase())
|
||||
if (asub === undefined) {
|
||||
this.logger.warn(`Will not run on ${sub} because is not modded by, or does not have appropriate permissions to mod with, for this client.`);
|
||||
} else {
|
||||
// @ts-ignore
|
||||
const fetchedSub = await asub.fetch();
|
||||
subsToRun.push(fetchedSub);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if(this.excludeSubreddits.length > 0) {
|
||||
this.logger.info(`Will run on all moderated subreddits but user-defined excluded: ${this.excludeSubreddits.join(', ')}`);
|
||||
const normalExcludes = this.excludeSubreddits.map(x => x.toLowerCase());
|
||||
subsToRun = availSubs.filter(x => !normalExcludes.includes(x.display_name.toLowerCase()));
|
||||
} else {
|
||||
this.logger.info(`No user-defined subreddit constraints detected, will run on all moderated subreddits EXCEPT own profile (${this.botAccount})`);
|
||||
subsToRun = availSubs.filter(x => x.display_name_prefixed !== this.botAccount);
|
||||
}
|
||||
}
|
||||
|
||||
let subSchedule: Manager[] = [];
|
||||
// get configs for subs we want to run on and build/validate them
|
||||
for (const sub of subsToRun) {
|
||||
const manager = new Manager(sub, this.client, this.logger, this.cacheManager, {dryRun: this.dryRun, sharedModqueue: this.sharedModqueue, wikiLocation: this.wikiLocation, botName: this.botName, maxWorkers: this.maxWorkers});
|
||||
try {
|
||||
await manager.parseConfiguration('system', true, {suppressNotification: true});
|
||||
} catch (err) {
|
||||
if (!(err instanceof LoggedError)) {
|
||||
this.logger.error(`Config was not valid:`, {subreddit: sub.display_name_prefixed});
|
||||
this.logger.error(err, {subreddit: sub.display_name_prefixed});
|
||||
}
|
||||
}
|
||||
subSchedule.push(manager);
|
||||
}
|
||||
this.subManagers = subSchedule;
|
||||
}
|
||||
|
||||
async destroy(causedBy: Invokee) {
|
||||
this.logger.info('Stopping heartbeat and nanny processes, may take up to 5 seconds...');
|
||||
const processWait = pEvent(this.emitter, 'healthStopped');
|
||||
this.running = false;
|
||||
await processWait;
|
||||
for (const manager of this.subManagers) {
|
||||
await manager.stop(causedBy, {reason: 'App rebuild'});
|
||||
}
|
||||
this.logger.info('Bot is stopped.');
|
||||
}
|
||||
|
||||
async runModStreams(notify = false) {
|
||||
for(const [k,v] of this.cacheManager.modStreams) {
|
||||
if(!v.running && this.subManagers.some(x => x.modStreamCallbacks.get(k) !== undefined)) {
|
||||
v.startInterval();
|
||||
this.logger.info(`Starting default ${k.toUpperCase()} mod stream`);
|
||||
if(notify) {
|
||||
for(const m of this.subManagers) {
|
||||
if(m.modStreamCallbacks.size > 0) {
|
||||
await m.notificationManager.handle('runStateChanged', `${k.toUpperCase()} Polling Started`, 'Polling was successfully restarted on heartbeat.');
|
||||
}
|
||||
}
|
||||
}
|
||||
await sleep(2000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async runManagers(causedBy: Invokee = 'system') {
|
||||
if(this.subManagers.every(x => !x.validConfigLoaded)) {
|
||||
this.logger.warn('All managers have invalid configs!');
|
||||
this.error = 'All managers have invalid configs';
|
||||
}
|
||||
for (const manager of this.subManagers) {
|
||||
if (manager.validConfigLoaded && manager.botState.state !== RUNNING) {
|
||||
await manager.start(causedBy, {reason: 'Caused by application startup'});
|
||||
await sleep(2000);
|
||||
}
|
||||
}
|
||||
|
||||
await this.runModStreams();
|
||||
|
||||
this.running = true;
|
||||
this.nextNannyCheck = dayjs().add(10, 'second');
|
||||
this.nextHeartbeat = dayjs().add(this.heartbeatInterval, 'second');
|
||||
await this.healthLoop();
|
||||
}
|
||||
|
||||
async healthLoop() {
|
||||
while (this.running) {
|
||||
await sleep(5000);
|
||||
if (!this.running) {
|
||||
break;
|
||||
}
|
||||
if (dayjs().isSameOrAfter(this.nextNannyCheck)) {
|
||||
try {
|
||||
await this.runApiNanny();
|
||||
this.nextNannyCheck = dayjs().add(10, 'second');
|
||||
} catch (err) {
|
||||
this.logger.info('Delaying next nanny check for 1 minute due to emitted error');
|
||||
this.nextNannyCheck = dayjs().add(120, 'second');
|
||||
}
|
||||
}
|
||||
if(dayjs().isSameOrAfter(this.nextHeartbeat)) {
|
||||
try {
|
||||
await this.heartbeat();
|
||||
} catch (err) {
|
||||
this.logger.error(`Error occurred during heartbeat check: ${err.message}`);
|
||||
}
|
||||
this.nextHeartbeat = dayjs().add(this.heartbeatInterval, 'second');
|
||||
}
|
||||
}
|
||||
this.emitter.emit('healthStopped');
|
||||
}
|
||||
|
||||
async heartbeat() {
|
||||
const heartbeat = `HEARTBEAT -- API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ~${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion === undefined ? 'N/A' : this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`
|
||||
this.logger.info(heartbeat);
|
||||
for (const s of this.subManagers) {
|
||||
if(s.botState.state === STOPPED && s.botState.causedBy === USER) {
|
||||
this.logger.debug('Skipping config check/restart on heartbeat due to previously being stopped by user', {subreddit: s.displayLabel});
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
const newConfig = await s.parseConfiguration();
|
||||
if(newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startQueue('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running queue'});
|
||||
}
|
||||
if(newConfig || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startEvents('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running events'});
|
||||
}
|
||||
if(s.botState.state !== RUNNING && s.eventsState.state === RUNNING && s.queueState.state === RUNNING) {
|
||||
s.botState = {
|
||||
state: RUNNING,
|
||||
causedBy: 'system',
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
this.logger.info('Stopping event polling to prevent activity processing queue from backing up. Will be restarted when config update succeeds.')
|
||||
await s.stopEvents('system', {reason: 'Invalid config will cause events to pile up in queue. Will be restarted when config update succeeds (next heartbeat).'});
|
||||
if(!(err instanceof LoggedError)) {
|
||||
this.logger.error(err, {subreddit: s.displayLabel});
|
||||
}
|
||||
if(this.nextHeartbeat !== undefined) {
|
||||
this.logger.info(`Will retry parsing config on next heartbeat (in ${dayjs.duration(this.nextHeartbeat.diff(dayjs())).humanize()})`, {subreddit: s.displayLabel});
|
||||
}
|
||||
}
|
||||
}
|
||||
await this.runModStreams(true);
|
||||
}
|
||||
|
||||
async runApiNanny() {
|
||||
try {
|
||||
this.nextExpiration = dayjs(this.client.ratelimitExpiration);
|
||||
const nowish = dayjs().add(10, 'second');
|
||||
if (nowish.isAfter(this.nextExpiration)) {
|
||||
// it's possible no api calls are being made because of a hard limit
|
||||
// need to make an api call to update this
|
||||
let shouldRetry = true;
|
||||
while (shouldRetry) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.getMe();
|
||||
shouldRetry = false;
|
||||
} catch (err) {
|
||||
shouldRetry = await this.nannyRetryHandler(err);
|
||||
if (!shouldRetry) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.nextExpiration = dayjs(this.client.ratelimitExpiration);
|
||||
}
|
||||
const rollingSample = this.apiSample.slice(0, 7)
|
||||
rollingSample.unshift(this.client.ratelimitRemaining);
|
||||
this.apiSample = rollingSample;
|
||||
const diff = this.apiSample.reduceRight((acc: number[], curr, index) => {
|
||||
if (this.apiSample[index + 1] !== undefined) {
|
||||
const d = Math.abs(curr - this.apiSample[index + 1]);
|
||||
if (d === 0) {
|
||||
return [...acc, 0];
|
||||
}
|
||||
return [...acc, d / 10];
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
this.apiRollingAvg = diff.reduce((acc, curr) => acc + curr, 0) / diff.length; // api requests per second
|
||||
this.depletedInSecs = this.client.ratelimitRemaining / this.apiRollingAvg; // number of seconds until current remaining limit is 0
|
||||
this.apiEstDepletion = dayjs.duration({seconds: this.depletedInSecs});
|
||||
this.logger.debug(`API Usage Rolling Avg: ${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`);
|
||||
|
||||
|
||||
let hardLimitHit = false;
|
||||
if (typeof this.hardLimit === 'string') {
|
||||
const hardDur = parseDuration(this.hardLimit);
|
||||
hardLimitHit = hardDur.asSeconds() > this.apiEstDepletion.asSeconds();
|
||||
} else {
|
||||
hardLimitHit = this.hardLimit > this.client.ratelimitRemaining;
|
||||
}
|
||||
|
||||
if (hardLimitHit) {
|
||||
if (this.nannyMode === 'hard') {
|
||||
return;
|
||||
}
|
||||
this.logger.info(`Detected HARD LIMIT of ${this.hardLimit} remaining`, {leaf: 'Api Nanny'});
|
||||
this.logger.info(`API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ${this.apiRollingAvg}/s | Est Depletion: ${this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`, {leaf: 'Api Nanny'});
|
||||
this.logger.info(`All subreddit event polling has been paused`, {leaf: 'Api Nanny'});
|
||||
|
||||
for (const m of this.subManagers) {
|
||||
m.pauseEvents('system');
|
||||
m.notificationManager.handle('runStateChanged', 'Hard Limit Triggered', `Hard Limit of ${this.hardLimit} hit (API Remaining: ${this.client.ratelimitRemaining}). Subreddit event polling has been paused.`, 'system', 'warn');
|
||||
}
|
||||
|
||||
this.nannyMode = 'hard';
|
||||
return;
|
||||
}
|
||||
|
||||
let softLimitHit = false;
|
||||
if (typeof this.softLimit === 'string') {
|
||||
const softDur = parseDuration(this.softLimit);
|
||||
softLimitHit = softDur.asSeconds() > this.apiEstDepletion.asSeconds();
|
||||
} else {
|
||||
softLimitHit = this.softLimit > this.client.ratelimitRemaining;
|
||||
}
|
||||
|
||||
if (softLimitHit) {
|
||||
if (this.nannyMode === 'soft') {
|
||||
return;
|
||||
}
|
||||
this.logger.info(`Detected SOFT LIMIT of ${this.softLimit} remaining`, {leaf: 'Api Nanny'});
|
||||
this.logger.info(`API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`, {leaf: 'Api Nanny'});
|
||||
this.logger.info('Trying to detect heavy usage subreddits...', {leaf: 'Api Nanny'});
|
||||
let threshold = 0.5;
|
||||
let offenders = this.subManagers.filter(x => {
|
||||
const combinedPerSec = x.eventsRollingAvg + x.rulesUniqueRollingAvg;
|
||||
return combinedPerSec > threshold;
|
||||
});
|
||||
if (offenders.length === 0) {
|
||||
threshold = 0.25;
|
||||
// reduce threshold
|
||||
offenders = this.subManagers.filter(x => {
|
||||
const combinedPerSec = x.eventsRollingAvg + x.rulesUniqueRollingAvg;
|
||||
return combinedPerSec > threshold;
|
||||
});
|
||||
}
|
||||
|
||||
if (offenders.length > 0) {
|
||||
this.logger.info(`Slowing subreddits using >- ${threshold}req/s:`, {leaf: 'Api Nanny'});
|
||||
for (const m of offenders) {
|
||||
m.delayBy = 1.5;
|
||||
m.logger.info(`SLOW MODE (Currently ~${formatNumber(m.eventsRollingAvg + m.rulesUniqueRollingAvg)}req/sec)`, {leaf: 'Api Nanny'});
|
||||
m.notificationManager.handle('runStateChanged', 'Soft Limit Triggered', `Soft Limit of ${this.softLimit} hit (API Remaining: ${this.client.ratelimitRemaining}). Subreddit queue processing will be slowed to 1.5 seconds per.`, 'system', 'warn');
|
||||
}
|
||||
} else {
|
||||
this.logger.info(`Couldn't detect specific offenders, slowing all...`, {leaf: 'Api Nanny'});
|
||||
for (const m of this.subManagers) {
|
||||
m.delayBy = 1.5;
|
||||
m.logger.info(`SLOW MODE (Currently ~${formatNumber(m.eventsRollingAvg + m.rulesUniqueRollingAvg)}req/sec)`, {leaf: 'Api Nanny'});
|
||||
m.notificationManager.handle('runStateChanged', 'Soft Limit Triggered', `Soft Limit of ${this.softLimit} hit (API Remaining: ${this.client.ratelimitRemaining}). Subreddit queue processing will be slowed to 1.5 seconds per.`, 'system', 'warn');
|
||||
}
|
||||
}
|
||||
this.nannyMode = 'soft';
|
||||
return
|
||||
}
|
||||
|
||||
if (this.nannyMode !== undefined) {
|
||||
this.logger.info('Turning off due to better conditions...', {leaf: 'Api Nanny'});
|
||||
for (const m of this.subManagers) {
|
||||
if (m.delayBy !== undefined) {
|
||||
m.delayBy = undefined;
|
||||
m.notificationManager.handle('runStateChanged', 'Normal Processing Resumed', 'Slow Mode has been turned off due to better API conditions', 'system');
|
||||
}
|
||||
if (m.queueState.state === PAUSED && m.queueState.causedBy === SYSTEM) {
|
||||
m.startQueue('system', {reason: 'API Nanny has been turned off due to better API conditions'});
|
||||
}
|
||||
if (m.eventsState.state === PAUSED && m.eventsState.causedBy === SYSTEM) {
|
||||
await m.startEvents('system', {reason: 'API Nanny has been turned off due to better API conditions'});
|
||||
}
|
||||
}
|
||||
this.nannyMode = undefined;
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
this.logger.error(`Error occurred during nanny loop: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default Bot;
|
||||
@@ -1,8 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.CommentCheck = void 0;
|
||||
const index_1 = require("./index");
|
||||
class CommentCheck extends index_1.Check {
|
||||
}
|
||||
exports.CommentCheck = CommentCheck;
|
||||
//# sourceMappingURL=CommentCheck.js.map
|
||||
@@ -1 +0,0 @@
|
||||
{"version":3,"file":"CommentCheck.js","sourceRoot":"","sources":["CommentCheck.ts"],"names":[],"mappings":";;;AAAA,mCAA8B;AAE9B,MAAa,YAAa,SAAQ,aAAK;CAEtC;AAFD,oCAEC"}
|
||||
@@ -1,5 +1,56 @@
|
||||
import {Check} from "./index";
|
||||
import {Check, CheckOptions, userResultCacheDefault, UserResultCacheOptions} from "./index";
|
||||
import {CommentState, UserResultCache} from "../Common/interfaces";
|
||||
import {Submission, Comment} from "snoowrap/dist/objects";
|
||||
import {RuleResult} from "../Rule";
|
||||
|
||||
export interface CommentCheckOptions extends CheckOptions {
|
||||
cacheUserResult?: UserResultCacheOptions;
|
||||
}
|
||||
|
||||
export class CommentCheck extends Check {
|
||||
itemIs: CommentState[];
|
||||
|
||||
constructor(options: CommentCheckOptions) {
|
||||
super(options);
|
||||
const {
|
||||
itemIs = [],
|
||||
} = options;
|
||||
|
||||
this.itemIs = itemIs;
|
||||
this.logSummary();
|
||||
}
|
||||
|
||||
logSummary() {
|
||||
super.logSummary('comment');
|
||||
}
|
||||
|
||||
async getCacheResult(item: Submission | Comment): Promise<UserResultCache | undefined> {
|
||||
if (this.cacheUserResult.enable) {
|
||||
return await this.resources.getCommentCheckCacheResult(item as Comment, {
|
||||
name: this.name,
|
||||
authorIs: this.authorIs,
|
||||
itemIs: this.itemIs
|
||||
})
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
async setCacheResult(item: Submission | Comment, result: UserResultCache): Promise<void> {
|
||||
if (this.cacheUserResult.enable) {
|
||||
const {result: outcome, ruleResults} = result;
|
||||
|
||||
const res: UserResultCache = {
|
||||
result: outcome,
|
||||
// don't need to cache rule results if check was not triggered
|
||||
// since we only use rule results for actions
|
||||
ruleResults: outcome ? ruleResults : []
|
||||
};
|
||||
|
||||
await this.resources.setCommentCheckCacheResult(item as Comment, {
|
||||
name: this.name,
|
||||
authorIs: this.authorIs,
|
||||
itemIs: this.itemIs
|
||||
}, res, this.cacheUserResult.ttl)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SubmissionCheck = void 0;
|
||||
const index_1 = require("./index");
|
||||
class SubmissionCheck extends index_1.Check {
|
||||
}
|
||||
exports.SubmissionCheck = SubmissionCheck;
|
||||
//# sourceMappingURL=SubmissionCheck.js.map
|
||||
@@ -1 +0,0 @@
|
||||
{"version":3,"file":"SubmissionCheck.js","sourceRoot":"","sources":["SubmissionCheck.ts"],"names":[],"mappings":";;;AACA,mCAA8B;AAE9B,MAAa,eAAgB,SAAQ,aAAK;CAEzC;AAFD,0CAEC"}
|
||||
@@ -1,6 +1,19 @@
|
||||
|
||||
import {Check} from "./index";
|
||||
import {Check, CheckOptions} from "./index";
|
||||
import {SubmissionState, UserResultCache} from "../Common/interfaces";
|
||||
import {Submission, Comment} from "snoowrap/dist/objects";
|
||||
import {RuleResult} from "../Rule";
|
||||
|
||||
export class SubmissionCheck extends Check {
|
||||
itemIs: SubmissionState[];
|
||||
|
||||
constructor(options: CheckOptions) {
|
||||
super(options);
|
||||
const {itemIs = []} = options;
|
||||
this.itemIs = itemIs;
|
||||
this.logSummary();
|
||||
}
|
||||
|
||||
logSummary() {
|
||||
super.logSummary('submission');
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,54 +1,101 @@
|
||||
import {RuleSet, IRuleSet, RuleSetJSONConfig} from "../Rule/RuleSet";
|
||||
import {IRule, Triggerable, Rule, RuleJSONConfig, RuleResult} from "../Rule";
|
||||
import Action, {ActionConfig, ActionJSONConfig} from "../Action";
|
||||
import {RuleSet, IRuleSet, RuleSetJson, RuleSetObjectJson} from "../Rule/RuleSet";
|
||||
import {IRule, isRuleSetResult, Rule, RuleJSONConfig, RuleResult, RuleSetResult} from "../Rule";
|
||||
import Action, {ActionConfig, ActionJson} from "../Action";
|
||||
import {Logger} from "winston";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {RecentActivityRuleJSONConfig} from "../Rule/RecentActivityRule";
|
||||
import {RepeatSubmissionJSONConfig} from "../Rule/SubmissionRule/RepeatSubmissionRule";
|
||||
import {FlairActionJSONConfig} from "../Action/SubmissionAction/FlairAction";
|
||||
import {CommentActionJSONConfig} from "../Action/CommentAction";
|
||||
import {actionFactory} from "../Action/ActionFactory";
|
||||
import {ruleFactory} from "../Rule/RuleFactory";
|
||||
import {createLabelledLogger, determineNewResults, loggerMetaShuffle, mergeArr} from "../util";
|
||||
import {AuthorRuleJSONConfig} from "../Rule/AuthorRule";
|
||||
import {ReportActionJSONConfig} from "../Action/ReportAction";
|
||||
import {LockActionJSONConfig} from "../Action/LockAction";
|
||||
import {RemoveActionJSONConfig} from "../Action/RemoveAction";
|
||||
import {JoinCondition, JoinOperands} from "../Common/interfaces";
|
||||
import {
|
||||
boolToString,
|
||||
createAjvFactory,
|
||||
FAIL,
|
||||
mergeArr,
|
||||
PASS,
|
||||
resultsSummary,
|
||||
ruleNamesFromResults,
|
||||
truncateStringToLength
|
||||
} from "../util";
|
||||
import {
|
||||
ActionResult,
|
||||
ChecksActivityState,
|
||||
CommentState,
|
||||
JoinCondition,
|
||||
JoinOperands,
|
||||
SubmissionState,
|
||||
TypedActivityStates, UserResultCache
|
||||
} from "../Common/interfaces";
|
||||
import * as RuleSchema from '../Schema/Rule.json';
|
||||
import * as RuleSetSchema from '../Schema/RuleSet.json';
|
||||
import * as ActionSchema from '../Schema/Action.json';
|
||||
import Ajv from 'ajv';
|
||||
import {ActionObjectJson, RuleJson, RuleObjectJson, ActionJson as ActionTypeJson} from "../Common/types";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import {Author, AuthorCriteria, AuthorOptions} from "../Author/Author";
|
||||
|
||||
const ajv = new Ajv();
|
||||
const checkLogName = truncateStringToLength(25);
|
||||
|
||||
export class Check implements ICheck {
|
||||
export abstract class Check implements ICheck {
|
||||
actions: Action[] = [];
|
||||
description?: string;
|
||||
name: string;
|
||||
enabled: boolean;
|
||||
condition: JoinOperands;
|
||||
rules: Array<RuleSet | Rule> = [];
|
||||
logger: Logger;
|
||||
itemIs: TypedActivityStates;
|
||||
authorIs: {
|
||||
include: AuthorCriteria[],
|
||||
exclude: AuthorCriteria[]
|
||||
};
|
||||
cacheUserResult: Required<UserResultCacheOptions>;
|
||||
dryRun?: boolean;
|
||||
notifyOnTrigger: boolean;
|
||||
resources: SubredditResources;
|
||||
client: Snoowrap;
|
||||
|
||||
constructor(options: CheckOptions) {
|
||||
const {
|
||||
enable = true,
|
||||
name,
|
||||
resources,
|
||||
description,
|
||||
client,
|
||||
condition = 'AND',
|
||||
rules = [],
|
||||
actions = [],
|
||||
notifyOnTrigger = false,
|
||||
subredditName,
|
||||
cacheUserResult = {},
|
||||
itemIs = [],
|
||||
authorIs: {
|
||||
include = [],
|
||||
exclude = [],
|
||||
} = {},
|
||||
dryRun,
|
||||
} = options;
|
||||
|
||||
if (options.logger !== undefined) {
|
||||
// @ts-ignore
|
||||
this.logger = options.logger.child(loggerMetaShuffle(options.logger, undefined, [`CHK ${name}`]), mergeArr);
|
||||
} else {
|
||||
this.logger = createLabelledLogger('Check');
|
||||
}
|
||||
this.enabled = enable;
|
||||
|
||||
this.logger = options.logger.child({labels: [`CHK ${checkLogName(name)}`]}, mergeArr);
|
||||
|
||||
const ajv = createAjvFactory(this.logger);
|
||||
|
||||
this.resources = resources;
|
||||
this.client = client;
|
||||
|
||||
this.name = name;
|
||||
this.description = description;
|
||||
this.notifyOnTrigger = notifyOnTrigger;
|
||||
this.condition = condition;
|
||||
this.itemIs = itemIs;
|
||||
this.authorIs = {
|
||||
exclude: exclude.map(x => new Author(x)),
|
||||
include: include.map(x => new Author(x)),
|
||||
}
|
||||
this.cacheUserResult = {
|
||||
...userResultCacheDefault,
|
||||
...cacheUserResult
|
||||
}
|
||||
this.dryRun = dryRun;
|
||||
for (const r of rules) {
|
||||
if (r instanceof Rule || r instanceof RuleSet) {
|
||||
this.rules.push(r);
|
||||
@@ -57,16 +104,13 @@ export class Check implements ICheck {
|
||||
let setErrors: any = [];
|
||||
let ruleErrors: any = [];
|
||||
if (valid) {
|
||||
// @ts-ignore
|
||||
r.logger = this.logger;
|
||||
this.rules.push(new RuleSet(r as RuleSetJSONConfig));
|
||||
const ruleConfig = r as RuleSetObjectJson;
|
||||
this.rules.push(new RuleSet({...ruleConfig, logger: this.logger, subredditName, resources: this.resources, client: this.client}));
|
||||
} else {
|
||||
setErrors = ajv.errors;
|
||||
valid = ajv.validate(RuleSchema, r);
|
||||
if (valid) {
|
||||
// @ts-ignore
|
||||
r.logger = this.logger;
|
||||
this.rules.push(ruleFactory(r as RuleJSONConfig));
|
||||
this.rules.push(ruleFactory(r as RuleJSONConfig, this.logger, subredditName, this.resources, this.client));
|
||||
} else {
|
||||
ruleErrors = ajv.errors;
|
||||
const leastErrorType = setErrors.length < ruleErrors ? 'RuleSet' : 'Rule';
|
||||
@@ -86,7 +130,11 @@ export class Check implements ICheck {
|
||||
} else {
|
||||
let valid = ajv.validate(ActionSchema, a);
|
||||
if (valid) {
|
||||
this.actions.push(actionFactory(a as ActionJSONConfig));
|
||||
const aj = a as ActionJson;
|
||||
this.actions.push(actionFactory({
|
||||
...aj,
|
||||
dryRun: this.dryRun || aj.dryRun
|
||||
}, this.logger, subredditName, this.resources, this.client));
|
||||
// @ts-ignore
|
||||
a.logger = this.logger;
|
||||
} else {
|
||||
@@ -94,79 +142,312 @@ export class Check implements ICheck {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
async run(item: Submission | Comment, existingResults: RuleResult[] = []): Promise<[boolean, RuleResult[]]> {
|
||||
this.logger.debug('Starting check');
|
||||
let allResults: RuleResult[] = [];
|
||||
let runOne = false;
|
||||
logSummary(type: string) {
|
||||
const runStats = [];
|
||||
const ruleSetCount = this.rules.reduce((x, r) => r instanceof RuleSet ? x + 1 : x, 0);
|
||||
const rulesInSetsCount = this.rules.reduce((x, r) => r instanceof RuleSet ? x + r.rules.length : x, 0);
|
||||
if (ruleSetCount > 0) {
|
||||
runStats.push(`${ruleSetCount} Rule Sets (${rulesInSetsCount} Rules)`);
|
||||
}
|
||||
const topRuleCount = this.rules.reduce((x, r) => r instanceof Rule ? x + 1 : x, 0);
|
||||
if (topRuleCount > 0) {
|
||||
runStats.push(`${topRuleCount} Top-Level Rules`);
|
||||
}
|
||||
runStats.push(`${this.actions.length} Actions`);
|
||||
// not sure if this should be info or verbose
|
||||
this.logger.info(`=${this.enabled ? 'Enabled' : 'Disabled'}= ${type.toUpperCase()} (${this.condition})${this.notifyOnTrigger ? ' ||Notify on Trigger|| ' : ''} => ${runStats.join(' | ')}${this.description !== undefined ? ` => ${this.description}` : ''}`);
|
||||
if (this.rules.length === 0 && this.itemIs.length === 0 && this.authorIs.exclude.length === 0 && this.authorIs.include.length === 0) {
|
||||
this.logger.warn('No rules, item tests, or author test found -- this check will ALWAYS PASS!');
|
||||
}
|
||||
let ruleSetIndex = 1;
|
||||
for (const r of this.rules) {
|
||||
const combinedResults = [...existingResults, ...allResults];
|
||||
const [passed, results] = await r.run(item, combinedResults);
|
||||
//allResults = allResults.concat(determineNewResults(combinedResults, results));
|
||||
allResults = allResults.concat(results);
|
||||
if (passed === null) {
|
||||
if (r instanceof RuleSet) {
|
||||
for (const ru of r.rules) {
|
||||
this.logger.verbose(`(Rule Set ${ruleSetIndex} ${r.condition}) => ${ru.getRuleUniqueName()}`);
|
||||
}
|
||||
ruleSetIndex++;
|
||||
} else {
|
||||
this.logger.verbose(`(Rule) => ${r.getRuleUniqueName()}`);
|
||||
}
|
||||
}
|
||||
for (const a of this.actions) {
|
||||
this.logger.verbose(`(Action) => ${a.getActionUniqueName()}`);
|
||||
}
|
||||
}
|
||||
|
||||
async getCacheResult(item: Submission | Comment) : Promise<UserResultCache | undefined> {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
async setCacheResult(item: Submission | Comment, result: UserResultCache): Promise<void> {
|
||||
}
|
||||
|
||||
async runRules(item: Submission | Comment, existingResults: RuleResult[] = []): Promise<[boolean, RuleResult[], boolean?]> {
|
||||
try {
|
||||
let allRuleResults: RuleResult[] = [];
|
||||
let allResults: (RuleResult | RuleSetResult)[] = [];
|
||||
|
||||
// check cache results
|
||||
const cacheResult = await this.getCacheResult(item);
|
||||
if(cacheResult !== undefined) {
|
||||
this.logger.verbose(`Skipping rules run because result was found in cache, Check Triggered Result: ${cacheResult}`);
|
||||
return [cacheResult.result, cacheResult.ruleResults, true];
|
||||
}
|
||||
|
||||
const itemPass = await this.resources.testItemCriteria(item, this.itemIs);
|
||||
if (!itemPass) {
|
||||
this.logger.verbose(`${FAIL} => Item did not pass 'itemIs' test`);
|
||||
return [false, allRuleResults];
|
||||
}
|
||||
let authorPass = null;
|
||||
if (this.authorIs.include !== undefined && this.authorIs.include.length > 0) {
|
||||
for (const auth of this.authorIs.include) {
|
||||
if (await this.resources.testAuthorCriteria(item, auth)) {
|
||||
authorPass = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!authorPass) {
|
||||
this.logger.verbose(`${FAIL} => Inclusive author criteria not matched`);
|
||||
return Promise.resolve([false, allRuleResults]);
|
||||
}
|
||||
}
|
||||
if (authorPass === null && this.authorIs.exclude !== undefined && this.authorIs.exclude.length > 0) {
|
||||
for (const auth of this.authorIs.exclude) {
|
||||
if (await this.resources.testAuthorCriteria(item, auth, false)) {
|
||||
authorPass = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!authorPass) {
|
||||
this.logger.verbose(`${FAIL} => Exclusive author criteria not matched`);
|
||||
return Promise.resolve([false, allRuleResults]);
|
||||
}
|
||||
}
|
||||
|
||||
if (this.rules.length === 0) {
|
||||
this.logger.info(`${PASS} => No rules to run, check auto-passes`);
|
||||
return [true, allRuleResults];
|
||||
}
|
||||
|
||||
let runOne = false;
|
||||
for (const r of this.rules) {
|
||||
//let results: RuleResult | RuleSetResult;
|
||||
const combinedResults = [...existingResults, ...allRuleResults];
|
||||
const [passed, results] = await r.run(item, combinedResults);
|
||||
if (isRuleSetResult(results)) {
|
||||
allRuleResults = allRuleResults.concat(results.results);
|
||||
} else {
|
||||
allRuleResults = allRuleResults.concat(results as RuleResult);
|
||||
}
|
||||
allResults.push(results);
|
||||
if (passed === null) {
|
||||
continue;
|
||||
}
|
||||
runOne = true;
|
||||
if (passed) {
|
||||
if (this.condition === 'OR') {
|
||||
this.logger.info(`${PASS} => Rules: ${resultsSummary(allResults, this.condition)}`);
|
||||
return [true, allRuleResults];
|
||||
}
|
||||
} else if (this.condition === 'AND') {
|
||||
this.logger.verbose(`${FAIL} => Rules: ${resultsSummary(allResults, this.condition)}`);
|
||||
return [false, allRuleResults];
|
||||
}
|
||||
}
|
||||
if (!runOne) {
|
||||
this.logger.verbose(`${FAIL} => All Rules skipped because of Author checks or itemIs tests`);
|
||||
return [false, allRuleResults];
|
||||
} else if (this.condition === 'OR') {
|
||||
// if OR and did not return already then none passed
|
||||
this.logger.verbose(`${FAIL} => Rules: ${resultsSummary(allResults, this.condition)}`);
|
||||
return [false, allRuleResults];
|
||||
}
|
||||
// otherwise AND and did not return already so all passed
|
||||
this.logger.info(`${PASS} => Rules: ${resultsSummary(allResults, this.condition)}`);
|
||||
return [true, allRuleResults];
|
||||
} catch (e) {
|
||||
e.logged = true;
|
||||
this.logger.warn(`Running rules failed due to uncaught exception`, e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
async runActions(item: Submission | Comment, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionResult[]> {
|
||||
const dr = runtimeDryrun || this.dryRun;
|
||||
this.logger.debug(`${dr ? 'DRYRUN - ' : ''}Running Actions`);
|
||||
const runActions: ActionResult[] = [];
|
||||
for (const a of this.actions) {
|
||||
if(!a.enabled) {
|
||||
runActions.push({
|
||||
kind: a.getKind(),
|
||||
name: a.getActionUniqueName(),
|
||||
run: false,
|
||||
success: false,
|
||||
runReason: 'Not enabled',
|
||||
dryRun: (a.dryRun || dr) || false,
|
||||
});
|
||||
this.logger.info(`Action ${a.getActionUniqueName()} not run because it is not enabled.`);
|
||||
continue;
|
||||
}
|
||||
runOne = true;
|
||||
if (passed) {
|
||||
if (this.condition === 'OR') {
|
||||
return [true, allResults];
|
||||
}
|
||||
} else if (this.condition === 'AND') {
|
||||
return [false, allResults];
|
||||
}
|
||||
}
|
||||
if (!runOne) {
|
||||
return [false, allResults];
|
||||
}
|
||||
return [true, allResults];
|
||||
}
|
||||
|
||||
async runActions(item: Submission | Comment, ruleResults: RuleResult[]): Promise<void> {
|
||||
for (const a of this.actions) {
|
||||
await a.handle(item, ruleResults);
|
||||
const res = await a.handle(item, ruleResults, runtimeDryrun);
|
||||
runActions.push(res);
|
||||
}
|
||||
this.logger.info(`${dr ? 'DRYRUN - ' : ''}Ran Actions: ${runActions.map(x => x.name).join(' | ')}`);
|
||||
return runActions;
|
||||
}
|
||||
}
|
||||
|
||||
export interface ICheck extends JoinCondition {
|
||||
export interface ICheck extends JoinCondition, ChecksActivityState {
|
||||
/**
|
||||
* Friendly name for this Check EX "crosspostSpamCheck"
|
||||
*
|
||||
* Can only contain letters, numbers, underscore, spaces, and dashes
|
||||
*
|
||||
* @pattern ^[a-zA-Z]([\w -]*[\w])?$
|
||||
* @examples ["myNewCheck"]
|
||||
* */
|
||||
name: string,
|
||||
/**
|
||||
* @examples ["A short description of what this check looks for and actions it performs"]
|
||||
* */
|
||||
description?: string,
|
||||
|
||||
/**
|
||||
* Use this option to override the `dryRun` setting for all of its `Actions`
|
||||
* @examples [false, true]
|
||||
* */
|
||||
dryRun?: boolean;
|
||||
|
||||
/**
|
||||
* A list of criteria to test the state of the `Activity` against before running the check.
|
||||
*
|
||||
* If any set of criteria passes the Check will be run. If the criteria fails then the Check will fail.
|
||||
*
|
||||
* * @examples [[{"over_18": true, "removed': false}]]
|
||||
* */
|
||||
itemIs?: TypedActivityStates
|
||||
|
||||
/**
|
||||
* If present then these Author criteria are checked before running the Check. If criteria fails then the Check will fail.
|
||||
* */
|
||||
authorIs?: AuthorOptions
|
||||
|
||||
/**
|
||||
* Should this check be run by the bot?
|
||||
*
|
||||
* @default true
|
||||
* @examples [true]
|
||||
* */
|
||||
enable?: boolean,
|
||||
}
|
||||
|
||||
export interface CheckOptions extends ICheck {
|
||||
rules: Array<IRuleSet | IRule>
|
||||
actions: ActionConfig[]
|
||||
logger?: Logger
|
||||
logger: Logger
|
||||
subredditName: string
|
||||
notifyOnTrigger?: boolean
|
||||
resources: SubredditResources
|
||||
client: Snoowrap
|
||||
cacheUserResult?: UserResultCacheOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* An object consisting of Rules (tests) and Actions to perform if Rules are triggered
|
||||
* @see {isCheckConfig} ts-auto-guard:type-guard
|
||||
* */
|
||||
export interface CheckJSONConfig extends ICheck {
|
||||
export interface CheckJson extends ICheck {
|
||||
/**
|
||||
* The type of event (new submission or new comment) this check should be run against
|
||||
* @examples ["submission", "comment"]
|
||||
*/
|
||||
kind: 'submission' | 'comment'
|
||||
/**
|
||||
* Rules are run in the order found in configuration. Can be Rules or RuleSets
|
||||
* @minItems 1
|
||||
* A list of Rules to run.
|
||||
*
|
||||
* If `Rule` objects are triggered based on `condition` then `actions` will be performed.
|
||||
*
|
||||
* Can be `Rule`, `RuleSet`, or the `name` of any **named** `Rule` in your subreddit's configuration.
|
||||
*
|
||||
* **If `rules` is an empty array or not present then `actions` are performed immediately.**
|
||||
* */
|
||||
rules: Array<RuleSetJSONConfig | RecentActivityRuleJSONConfig | RepeatSubmissionJSONConfig | AuthorRuleJSONConfig>
|
||||
rules?: Array<RuleSetJson | RuleJson>
|
||||
/**
|
||||
* The actions to run after the check is successfully triggered. ALL actions will run in the order they are listed
|
||||
* The `Actions` to run after the check is successfully triggered. ALL `Actions` will run in the order they are listed
|
||||
*
|
||||
* Can be `Action` or the `name` of any **named** `Action` in your subreddit's configuration
|
||||
*
|
||||
* @minItems 1
|
||||
* @examples [[{"kind": "comment", "content": "this is the content of the comment", "distinguish": true}, {"kind": "lock"}]]
|
||||
* */
|
||||
actions: Array<FlairActionJSONConfig | CommentActionJSONConfig | ReportActionJSONConfig | LockActionJSONConfig | RemoveActionJSONConfig>
|
||||
actions: Array<ActionTypeJson>
|
||||
|
||||
/**
|
||||
* If notifications are configured and this is `true` then an `eventActioned` event will be sent when this check is triggered.
|
||||
*
|
||||
* @default false
|
||||
* */
|
||||
notifyOnTrigger?: boolean,
|
||||
|
||||
cacheUserResult?: UserResultCacheOptions;
|
||||
}
|
||||
|
||||
export interface SubmissionCheckJson extends CheckJson {
|
||||
kind: 'submission'
|
||||
itemIs?: SubmissionState[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Cache the result of this check based on the comment author and the submission id
|
||||
*
|
||||
* This is useful in this type of scenario:
|
||||
*
|
||||
* 1. This check is configured to run on comments for specific submissions with high volume activity
|
||||
* 2. The rules being run are not dependent on the content of the comment
|
||||
* 3. The rule results are not likely to change while cache is valid
|
||||
* */
|
||||
export interface UserResultCacheOptions {
|
||||
/**
|
||||
* @default false
|
||||
* */
|
||||
enable?: boolean,
|
||||
/**
|
||||
* The amount of time, in seconds, to cache this result
|
||||
*
|
||||
* @default 60
|
||||
* @examples [60]
|
||||
* */
|
||||
ttl?: number,
|
||||
/**
|
||||
* In the event the cache returns a triggered result should the actions for the check also be run?
|
||||
*
|
||||
* @default true
|
||||
* */
|
||||
runActions?: boolean
|
||||
}
|
||||
|
||||
export const userResultCacheDefault: Required<UserResultCacheOptions> = {
|
||||
enable: false,
|
||||
ttl: 60,
|
||||
runActions: true,
|
||||
}
|
||||
|
||||
export interface CommentCheckJson extends CheckJson {
|
||||
kind: 'comment'
|
||||
itemIs?: CommentState[]
|
||||
}
|
||||
|
||||
export type CheckStructuredJson = SubmissionCheckStructuredJson | CommentCheckStructuredJson;
|
||||
// export interface CheckStructuredJson extends CheckJson {
|
||||
// rules: Array<RuleSetObjectJson | RuleObjectJson>
|
||||
// actions: Array<ActionObjectJson>
|
||||
// }
|
||||
|
||||
export interface SubmissionCheckStructuredJson extends SubmissionCheckJson {
|
||||
rules: Array<RuleSetObjectJson | RuleObjectJson>
|
||||
actions: Array<ActionObjectJson>
|
||||
}
|
||||
|
||||
export interface CommentCheckStructuredJson extends CommentCheckJson {
|
||||
rules: Array<RuleSetObjectJson | RuleObjectJson>
|
||||
actions: Array<ActionObjectJson>
|
||||
}
|
||||
|
||||
248
src/Common/ImageData.ts
Normal file
248
src/Common/ImageData.ts
Normal file
@@ -0,0 +1,248 @@
|
||||
import fetch from "node-fetch";
|
||||
import {Submission} from "snoowrap/dist/objects";
|
||||
import {URL} from "url";
|
||||
import {absPercentDifference, getSharpAsync, isValidImageURL} from "../util";
|
||||
import sizeOf from "image-size";
|
||||
import SimpleError from "../Utils/SimpleError";
|
||||
import {Sharp} from "sharp";
|
||||
import {blockhash} from "./blockhash/blockhash";
|
||||
|
||||
export interface ImageDataOptions {
|
||||
width?: number,
|
||||
height?: number,
|
||||
url: string,
|
||||
variants?: ImageData[]
|
||||
}
|
||||
|
||||
class ImageData {
|
||||
|
||||
width?: number
|
||||
height?: number
|
||||
url: URL
|
||||
variants: ImageData[] = []
|
||||
preferredResolution?: [number, number]
|
||||
sharpImg!: Sharp
|
||||
hashResult!: string
|
||||
actualResolution?: [number, number]
|
||||
|
||||
constructor(data: ImageDataOptions, aggressive = false) {
|
||||
this.width = data.width;
|
||||
this.height = data.height;
|
||||
this.url = new URL(data.url);
|
||||
if (!aggressive && !isValidImageURL(`${this.url.origin}${this.url.pathname}`)) {
|
||||
throw new Error('URL did not end with a valid image extension');
|
||||
}
|
||||
this.variants = data.variants || [];
|
||||
}
|
||||
|
||||
async data(format = 'raw'): Promise<Buffer> {
|
||||
// @ts-ignore
|
||||
return await (await this.sharp()).clone().toFormat(format).toBuffer();
|
||||
}
|
||||
|
||||
async hash(bits: number, useVariantIfPossible = true): Promise<string> {
|
||||
if(this.hashResult === undefined) {
|
||||
let ref: ImageData | undefined;
|
||||
if(useVariantIfPossible && this.preferredResolution !== undefined) {
|
||||
ref = this.getSimilarResolutionVariant(this.preferredResolution[0], this.preferredResolution[1]);
|
||||
}
|
||||
if(ref === undefined) {
|
||||
ref = this;
|
||||
}
|
||||
this.hashResult = await blockhash((await ref.sharp()).clone(), bits);
|
||||
}
|
||||
return this.hashResult;
|
||||
}
|
||||
|
||||
async sharp(): Promise<Sharp> {
|
||||
if (this.sharpImg === undefined) {
|
||||
try {
|
||||
const response = await fetch(this.url.toString())
|
||||
if (response.ok) {
|
||||
const ct = response.headers.get('Content-Type');
|
||||
if (ct !== null && ct.includes('image')) {
|
||||
const sFunc = await getSharpAsync();
|
||||
// if image is animated then we want to extract the first frame and convert it to a regular image
|
||||
// so we can compare two static images later (also because sharp can't use resize() on animated images)
|
||||
if(['gif','webp'].some(x => ct.includes(x))) {
|
||||
this.sharpImg = await sFunc(await (await sFunc(await response.buffer(), {pages: 1, animated: false})).png().toBuffer());
|
||||
} else {
|
||||
this.sharpImg = await sFunc(await response.buffer());
|
||||
}
|
||||
const meta = await this.sharpImg.metadata();
|
||||
if (this.width === undefined || this.height === undefined) {
|
||||
this.width = meta.width;
|
||||
this.height = meta.height;
|
||||
}
|
||||
this.actualResolution = [meta.width as number, meta.height as number];
|
||||
} else {
|
||||
throw new SimpleError(`Content-Type for fetched URL ${this.url} did not contain "image"`);
|
||||
}
|
||||
} else {
|
||||
throw new SimpleError(`URL response was not OK: (${response.status})${response.statusText}`);
|
||||
}
|
||||
|
||||
|
||||
} catch (err) {
|
||||
if(!(err instanceof SimpleError)) {
|
||||
throw new Error(`Error occurred while fetching response from URL: ${err.message}`);
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
return this.sharpImg;
|
||||
}
|
||||
|
||||
get pixels() {
|
||||
if (this.actualResolution !== undefined) {
|
||||
return this.actualResolution[0] * this.actualResolution[1];
|
||||
}
|
||||
if (this.width === undefined || this.height === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return this.width * this.height;
|
||||
}
|
||||
|
||||
get hasDimensions() {
|
||||
return this.width !== undefined && this.height !== undefined;
|
||||
}
|
||||
|
||||
get baseUrl() {
|
||||
return `${this.url.origin}${this.url.pathname}`;
|
||||
}
|
||||
|
||||
setPreferredResolutionByWidth(prefWidth: number) {
|
||||
let height: number | undefined = undefined,
|
||||
width: number | undefined = undefined;
|
||||
if (this.variants.length === 0) {
|
||||
return;
|
||||
}
|
||||
for (const v of this.variants) {
|
||||
if (v.hasDimensions && (v.width as number) <= prefWidth) {
|
||||
width = v.width as number;
|
||||
height = v.height as number;
|
||||
}
|
||||
}
|
||||
if (width !== undefined) {
|
||||
this.preferredResolution = [width, (height as number)];
|
||||
}
|
||||
}
|
||||
|
||||
getSimilarResolutionVariant(width: number, height: number, allowablePercentDiff = 0): ImageData | undefined {
|
||||
if (this.variants.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
return this.variants.find(x => {
|
||||
return x.hasDimensions && (absPercentDifference(width, x.width as number) <= allowablePercentDiff) && (absPercentDifference(height, x.height as number) <= allowablePercentDiff);
|
||||
});
|
||||
}
|
||||
|
||||
isSameDimensions(otherImage: ImageData) {
|
||||
if (!this.hasDimensions || !otherImage.hasDimensions) {
|
||||
return false;
|
||||
}
|
||||
return this.width === otherImage.width && this.height === otherImage.height;
|
||||
}
|
||||
|
||||
async sameAspectRatio(otherImage: ImageData) {
|
||||
let thisRes = this.actualResolution;
|
||||
let otherRes = otherImage.actualResolution;
|
||||
if(thisRes === undefined) {
|
||||
const tMeta = await (await this.sharp()).metadata();
|
||||
const thisMeta = {width: tMeta.width as number, height: tMeta.height as number };
|
||||
this.actualResolution = [thisMeta.width, thisMeta.height];
|
||||
thisRes = this.actualResolution;
|
||||
}
|
||||
if(otherRes === undefined) {
|
||||
const otherMeta = await (await otherImage.sharp()).metadata();
|
||||
otherRes = [otherMeta.width as number, otherMeta.height as number];
|
||||
}
|
||||
const thisRatio = thisRes[0] / thisRes[1];
|
||||
const otherRatio = otherRes[0] / otherRes[1];
|
||||
|
||||
// a little leeway
|
||||
return Math.abs(thisRatio - otherRatio) < 0.1;
|
||||
}
|
||||
|
||||
static async dimensionsFromMetadata(img: Sharp) {
|
||||
const {width, height, ...rest} = await img.metadata();
|
||||
return {width: width as number, height: height as number};
|
||||
}
|
||||
|
||||
async normalizeImagesForComparison(compareLibrary: ('pixel' | 'resemble'), imgToCompare: ImageData): Promise<[Sharp, Sharp, number, number]> {
|
||||
const sFunc = await getSharpAsync();
|
||||
|
||||
let refImage = this as ImageData;
|
||||
let compareImage = imgToCompare;
|
||||
if (this.preferredResolution !== undefined) {
|
||||
const matchingVariant = compareImage.getSimilarResolutionVariant(this.preferredResolution[0], this.preferredResolution[1]);
|
||||
if (matchingVariant !== undefined) {
|
||||
compareImage = matchingVariant;
|
||||
refImage = this.getSimilarResolutionVariant(this.preferredResolution[0], this.preferredResolution[1]) as ImageData;
|
||||
}
|
||||
}
|
||||
|
||||
let refSharp = (await refImage.sharp()).clone();
|
||||
let refMeta = await ImageData.dimensionsFromMetadata(refSharp);
|
||||
let compareSharp = (await compareImage.sharp()).clone();
|
||||
let compareMeta = await ImageData.dimensionsFromMetadata(compareSharp);
|
||||
|
||||
// if dimensions on not the same we need to crop or resize before final resize
|
||||
if (refMeta.width !== compareMeta.width || refMeta.height !== compareMeta.height) {
|
||||
const thisRatio = refMeta.width / (refMeta.height);
|
||||
const otherRatio = compareMeta.width / compareMeta.height;
|
||||
|
||||
const sameRatio = Math.abs(thisRatio - otherRatio) < 0.04;
|
||||
if (sameRatio) {
|
||||
// then resize first since its most likely the same image
|
||||
// can be fairly sure a downscale will get pixels close to the same
|
||||
if (refMeta.width > compareMeta.width) {
|
||||
refSharp = sFunc(await refSharp.resize(compareMeta.width, null, {fit: 'outside'}).toBuffer());
|
||||
} else {
|
||||
compareSharp = sFunc(await compareSharp.resize(refMeta.width, null, {fit: 'outside'}).toBuffer());
|
||||
}
|
||||
refMeta = await ImageData.dimensionsFromMetadata(refSharp);
|
||||
compareMeta = await ImageData.dimensionsFromMetadata(compareSharp);
|
||||
}
|
||||
// find smallest common dimensions
|
||||
const sWidth = refMeta.width <= compareMeta.width ? refMeta.width : compareMeta.width;
|
||||
const sHeight = refMeta.height <= compareMeta.height ? refMeta.height : compareMeta.height;
|
||||
|
||||
// crop if necessary
|
||||
if(sWidth !== refMeta.width || sHeight !== refMeta.height) {
|
||||
refSharp = sFunc(await refSharp.extract({left: 0, top: 0, width: sWidth, height: sHeight}).toBuffer());
|
||||
}
|
||||
if(sWidth !== compareMeta.width || sHeight !== compareMeta.height) {
|
||||
compareSharp = sFunc(await compareSharp.extract({left: 0, top: 0, width: sWidth, height: sHeight}).toBuffer());
|
||||
}
|
||||
}
|
||||
|
||||
// final resize to reduce memory/cpu usage during comparison
|
||||
refSharp = sFunc(await refSharp.resize(400, null, {fit: 'outside'}).toBuffer());
|
||||
compareSharp = sFunc(await compareSharp.resize(400, null, {fit: 'outside'}).toBuffer());
|
||||
|
||||
const {width, height} = await ImageData.dimensionsFromMetadata(refSharp);
|
||||
return [refSharp, compareSharp, width, height];
|
||||
}
|
||||
|
||||
static fromSubmission(sub: Submission, aggressive = false): ImageData {
|
||||
const url = new URL(sub.url);
|
||||
const data: any = {
|
||||
url,
|
||||
};
|
||||
let variants = [];
|
||||
if (sub.preview !== undefined && sub.preview.enabled && sub.preview.images.length > 0) {
|
||||
const firstImg = sub.preview.images[0];
|
||||
const ref = sub.preview.images[0].source;
|
||||
data.width = ref.width;
|
||||
data.height = ref.height;
|
||||
|
||||
variants = firstImg.resolutions.map(x => new ImageData(x));
|
||||
data.variants = variants;
|
||||
}
|
||||
return new ImageData(data, aggressive);
|
||||
}
|
||||
}
|
||||
|
||||
export default ImageData;
|
||||
234
src/Common/blockhash/blockhash.ts
Normal file
234
src/Common/blockhash/blockhash.ts
Normal file
@@ -0,0 +1,234 @@
|
||||
// Perceptual image hash calculation tool based on algorithm descibed in
|
||||
// Block Mean Value Based Image Perceptual Hashing by Bian Yang, Fan Gu and Xiamu Niu
|
||||
//
|
||||
// Copyright 2014 Commons Machinery http://commonsmachinery.se/
|
||||
// Distributed under an MIT license, please see LICENSE in the top dir.
|
||||
|
||||
|
||||
// https://github.com/commonsmachinery/blockhash-js/blob/master/index.js
|
||||
|
||||
import {Sharp} from "sharp";
|
||||
|
||||
interface BlockImageData {
|
||||
data: Buffer,
|
||||
width: number,
|
||||
height: number
|
||||
}
|
||||
|
||||
var one_bits = [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4];
|
||||
|
||||
/* Calculate the hamming distance for two hashes in hex format */
|
||||
export const hammingDistance = (hash1: string, hash2: string) => {
|
||||
var d = 0;
|
||||
var i;
|
||||
|
||||
if (hash1.length !== hash2.length) {
|
||||
throw new Error("Can't compare hashes with different length");
|
||||
}
|
||||
|
||||
for (i = 0; i < hash1.length; i++) {
|
||||
var n1 = parseInt(hash1[i], 16);
|
||||
var n2 = parseInt(hash2[i], 16);
|
||||
d += one_bits[n1 ^ n2];
|
||||
}
|
||||
return d;
|
||||
};
|
||||
|
||||
var median = function(data: number[]) {
|
||||
var mdarr = data.slice(0);
|
||||
mdarr.sort(function(a, b) { return a-b; });
|
||||
if (mdarr.length % 2 === 0) {
|
||||
return (mdarr[mdarr.length/2 - 1] + mdarr[mdarr.length/2]) / 2.0;
|
||||
}
|
||||
return mdarr[Math.floor(mdarr.length/2)];
|
||||
};
|
||||
|
||||
var translate_blocks_to_bits = function(blocks: number[], pixels_per_block: number) {
|
||||
var half_block_value = pixels_per_block * 256 * 3 / 2;
|
||||
var bandsize = blocks.length / 4;
|
||||
|
||||
// Compare medians across four horizontal bands
|
||||
for (var i = 0; i < 4; i++) {
|
||||
var m = median(blocks.slice(i * bandsize, (i + 1) * bandsize));
|
||||
for (var j = i * bandsize; j < (i + 1) * bandsize; j++) {
|
||||
var v = blocks[j];
|
||||
|
||||
// Output a 1 if the block is brighter than the median.
|
||||
// With images dominated by black or white, the median may
|
||||
// end up being 0 or the max value, and thus having a lot
|
||||
// of blocks of value equal to the median. To avoid
|
||||
// generating hashes of all zeros or ones, in that case output
|
||||
// 0 if the median is in the lower value space, 1 otherwise
|
||||
blocks[j] = Number(v > m || (Math.abs(v - m) < 1 && m > half_block_value));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var bits_to_hexhash = function(bitsArray: number[]) {
|
||||
var hex = [];
|
||||
for (var i = 0; i < bitsArray.length; i += 4) {
|
||||
var nibble = bitsArray.slice(i, i + 4);
|
||||
hex.push(parseInt(nibble.join(''), 2).toString(16));
|
||||
}
|
||||
|
||||
return hex.join('');
|
||||
};
|
||||
|
||||
var bmvbhash_even = function(data: BlockImageData, bits: number) {
|
||||
var blocksize_x = Math.floor(data.width / bits);
|
||||
var blocksize_y = Math.floor(data.height / bits);
|
||||
|
||||
var result = [];
|
||||
|
||||
for (var y = 0; y < bits; y++) {
|
||||
for (var x = 0; x < bits; x++) {
|
||||
var total = 0;
|
||||
|
||||
for (var iy = 0; iy < blocksize_y; iy++) {
|
||||
for (var ix = 0; ix < blocksize_x; ix++) {
|
||||
var cx = x * blocksize_x + ix;
|
||||
var cy = y * blocksize_y + iy;
|
||||
var ii = (cy * data.width + cx) * 4;
|
||||
|
||||
var alpha = data.data[ii+3];
|
||||
if (alpha === 0) {
|
||||
total += 765;
|
||||
} else {
|
||||
total += data.data[ii] + data.data[ii+1] + data.data[ii+2];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.push(total);
|
||||
}
|
||||
}
|
||||
|
||||
translate_blocks_to_bits(result, blocksize_x * blocksize_y);
|
||||
return bits_to_hexhash(result);
|
||||
};
|
||||
|
||||
var bmvbhash = function(data: BlockImageData, bits: number) {
|
||||
var result = [];
|
||||
|
||||
var i, j, x, y;
|
||||
var block_width, block_height;
|
||||
var weight_top, weight_bottom, weight_left, weight_right;
|
||||
var block_top, block_bottom, block_left, block_right;
|
||||
var y_mod, y_frac, y_int;
|
||||
var x_mod, x_frac, x_int;
|
||||
var blocks: number[][] = [];
|
||||
|
||||
var even_x = data.width % bits === 0;
|
||||
var even_y = data.height % bits === 0;
|
||||
|
||||
if (even_x && even_y) {
|
||||
return bmvbhash_even(data, bits);
|
||||
}
|
||||
|
||||
// initialize blocks array with 0s
|
||||
for (i = 0; i < bits; i++) {
|
||||
blocks.push([]);
|
||||
for (j = 0; j < bits; j++) {
|
||||
blocks[i].push(0);
|
||||
}
|
||||
}
|
||||
|
||||
block_width = data.width / bits;
|
||||
block_height = data.height / bits;
|
||||
|
||||
for (y = 0; y < data.height; y++) {
|
||||
if (even_y) {
|
||||
// don't bother dividing y, if the size evenly divides by bits
|
||||
block_top = block_bottom = Math.floor(y / block_height);
|
||||
weight_top = 1;
|
||||
weight_bottom = 0;
|
||||
} else {
|
||||
y_mod = (y + 1) % block_height;
|
||||
y_frac = y_mod - Math.floor(y_mod);
|
||||
y_int = y_mod - y_frac;
|
||||
|
||||
weight_top = (1 - y_frac);
|
||||
weight_bottom = (y_frac);
|
||||
|
||||
// y_int will be 0 on bottom/right borders and on block boundaries
|
||||
if (y_int > 0 || (y + 1) === data.height) {
|
||||
block_top = block_bottom = Math.floor(y / block_height);
|
||||
} else {
|
||||
block_top = Math.floor(y / block_height);
|
||||
block_bottom = Math.ceil(y / block_height);
|
||||
}
|
||||
}
|
||||
|
||||
for (x = 0; x < data.width; x++) {
|
||||
var ii = (y * data.width + x) * 4;
|
||||
|
||||
var avgvalue, alpha = data.data[ii+3];
|
||||
if (alpha === 0) {
|
||||
avgvalue = 765;
|
||||
} else {
|
||||
avgvalue = data.data[ii] + data.data[ii+1] + data.data[ii+2];
|
||||
}
|
||||
|
||||
if (even_x) {
|
||||
block_left = block_right = Math.floor(x / block_width);
|
||||
weight_left = 1;
|
||||
weight_right = 0;
|
||||
} else {
|
||||
x_mod = (x + 1) % block_width;
|
||||
x_frac = x_mod - Math.floor(x_mod);
|
||||
x_int = x_mod - x_frac;
|
||||
|
||||
weight_left = (1 - x_frac);
|
||||
weight_right = x_frac;
|
||||
|
||||
// x_int will be 0 on bottom/right borders and on block boundaries
|
||||
if (x_int > 0 || (x + 1) === data.width) {
|
||||
block_left = block_right = Math.floor(x / block_width);
|
||||
} else {
|
||||
block_left = Math.floor(x / block_width);
|
||||
block_right = Math.ceil(x / block_width);
|
||||
}
|
||||
}
|
||||
|
||||
// add weighted pixel value to relevant blocks
|
||||
blocks[block_top][block_left] += avgvalue * weight_top * weight_left;
|
||||
blocks[block_top][block_right] += avgvalue * weight_top * weight_right;
|
||||
blocks[block_bottom][block_left] += avgvalue * weight_bottom * weight_left;
|
||||
blocks[block_bottom][block_right] += avgvalue * weight_bottom * weight_right;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < bits; i++) {
|
||||
for (j = 0; j < bits; j++) {
|
||||
result.push(blocks[i][j]);
|
||||
}
|
||||
}
|
||||
|
||||
translate_blocks_to_bits(result, block_width * block_height);
|
||||
return bits_to_hexhash(result);
|
||||
};
|
||||
|
||||
var blockhashData = function(imgData: BlockImageData, bits: number, method: number) {
|
||||
var hash;
|
||||
|
||||
if (method === 1) {
|
||||
hash = bmvbhash_even(imgData, bits);
|
||||
}
|
||||
else if (method === 2) {
|
||||
hash = bmvbhash(imgData, bits);
|
||||
}
|
||||
else {
|
||||
throw new Error("Bad hashing method");
|
||||
}
|
||||
|
||||
return hash;
|
||||
};
|
||||
|
||||
export const blockhash = async function(src: Sharp, bits: number, method: number = 2): Promise<string> {
|
||||
const {data: buff, info} = await src.ensureAlpha().raw().toBuffer({resolveWithObject: true});
|
||||
return blockhashData({
|
||||
width: info.width,
|
||||
height: info.height,
|
||||
data: buff,
|
||||
}, bits, method);
|
||||
};
|
||||
31
src/Common/defaults.ts
Normal file
31
src/Common/defaults.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import {HistoricalStats} from "./interfaces";
|
||||
|
||||
export const cacheOptDefaults = {ttl: 60, max: 500, checkPeriod: 600};
|
||||
export const cacheTTLDefaults = {authorTTL: 60, userNotesTTL: 300, wikiTTL: 300, submissionTTL: 60, commentTTL: 60, filterCriteriaTTL: 60, subredditTTL: 600};
|
||||
export const historicalDefaults: HistoricalStats = {
|
||||
eventsCheckedTotal: 0,
|
||||
eventsActionedTotal: 0,
|
||||
checksRun: new Map(),
|
||||
checksFromCache: new Map(),
|
||||
checksTriggered: new Map(),
|
||||
rulesRun: new Map(),
|
||||
//rulesCached: new Map(),
|
||||
rulesCachedTotal: 0,
|
||||
rulesTriggered: new Map(),
|
||||
actionsRun: new Map(),
|
||||
}
|
||||
|
||||
export const createHistoricalDefaults = (): HistoricalStats => {
|
||||
return {
|
||||
eventsCheckedTotal: 0,
|
||||
eventsActionedTotal: 0,
|
||||
checksRun: new Map(),
|
||||
checksFromCache: new Map(),
|
||||
checksTriggered: new Map(),
|
||||
rulesRun: new Map(),
|
||||
//rulesCached: new Map(),
|
||||
rulesCachedTotal: 0,
|
||||
rulesTriggered: new Map(),
|
||||
actionsRun: new Map(),
|
||||
};
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
21
src/Common/types.ts
Normal file
21
src/Common/types.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import {RecentActivityRuleJSONConfig} from "../Rule/RecentActivityRule";
|
||||
import {RepeatActivityJSONConfig} from "../Rule/RepeatActivityRule";
|
||||
import {AuthorRuleJSONConfig} from "../Rule/AuthorRule";
|
||||
import {AttributionJSONConfig} from "../Rule/AttributionRule";
|
||||
import {FlairActionJson} from "../Action/SubmissionAction/FlairAction";
|
||||
import {CommentActionJson} from "../Action/CommentAction";
|
||||
import {ReportActionJson} from "../Action/ReportAction";
|
||||
import {LockActionJson} from "../Action/LockAction";
|
||||
import {RemoveActionJson} from "../Action/RemoveAction";
|
||||
import {HistoryJSONConfig} from "../Rule/HistoryRule";
|
||||
import {UserNoteActionJson} from "../Action/UserNoteAction";
|
||||
import {ApproveActionJson} from "../Action/ApproveAction";
|
||||
import {BanActionJson} from "../Action/BanAction";
|
||||
import {RegexRuleJSONConfig} from "../Rule/RegexRule";
|
||||
import {MessageActionJson} from "../Action/MessageAction";
|
||||
|
||||
export type RuleJson = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | string;
|
||||
export type RuleObjectJson = Exclude<RuleJson, string>
|
||||
|
||||
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | string;
|
||||
export type ActionObjectJson = Exclude<ActionJson, string>;
|
||||
@@ -1,54 +1,779 @@
|
||||
import {Logger} from "winston";
|
||||
import {createLabelledLogger, loggerMetaShuffle, mergeArr} from "./util";
|
||||
import {
|
||||
buildCacheOptionsFromProvider, buildCachePrefix,
|
||||
createAjvFactory,
|
||||
mergeArr,
|
||||
normalizeName,
|
||||
overwriteMerge,
|
||||
parseBool, randomId,
|
||||
readConfigFile,
|
||||
removeUndefinedKeys
|
||||
} from "./util";
|
||||
import {CommentCheck} from "./Check/CommentCheck";
|
||||
import {SubmissionCheck} from "./Check/SubmissionCheck";
|
||||
|
||||
import Ajv from 'ajv';
|
||||
import * as schema from './Schema/App.json';
|
||||
import Ajv, {Schema} from 'ajv';
|
||||
import * as appSchema from './Schema/App.json';
|
||||
import * as operatorSchema from './Schema/OperatorConfig.json';
|
||||
import {JSONConfig} from "./JsonConfig";
|
||||
import LoggedError from "./Utils/LoggedError";
|
||||
import {ManagerOptions} from "./Subreddit/Manager";
|
||||
|
||||
const ajv = new Ajv();
|
||||
import {CheckStructuredJson} from "./Check";
|
||||
import {
|
||||
DEFAULT_POLLING_INTERVAL,
|
||||
DEFAULT_POLLING_LIMIT,
|
||||
OperatorJsonConfig,
|
||||
OperatorConfig,
|
||||
PollingOptions,
|
||||
PollingOptionsStrong,
|
||||
PollOn,
|
||||
StrongCache,
|
||||
CacheProvider,
|
||||
CacheOptions,
|
||||
BotInstanceJsonConfig,
|
||||
BotInstanceConfig,
|
||||
RequiredWebRedditCredentials
|
||||
} from "./Common/interfaces";
|
||||
import {isRuleSetJSON, RuleSetJson, RuleSetObjectJson} from "./Rule/RuleSet";
|
||||
import deepEqual from "fast-deep-equal";
|
||||
import {ActionJson, ActionObjectJson, RuleJson, RuleObjectJson} from "./Common/types";
|
||||
import {isActionJson} from "./Action";
|
||||
import {getLogger} from "./Utils/loggerFactory";
|
||||
import {GetEnvVars} from 'env-cmd';
|
||||
import {operatorConfig} from "./Utils/CommandConfig";
|
||||
import merge from 'deepmerge';
|
||||
import * as process from "process";
|
||||
import {cacheOptDefaults, cacheTTLDefaults} from "./Common/defaults";
|
||||
import objectHash from "object-hash";
|
||||
|
||||
export interface ConfigBuilderOptions {
|
||||
logger?: Logger,
|
||||
logger: Logger,
|
||||
}
|
||||
|
||||
export const validateJson = (config: object, schema: Schema, logger: Logger): any => {
|
||||
const ajv = createAjvFactory(logger);
|
||||
const valid = ajv.validate(schema, config);
|
||||
if (valid) {
|
||||
return config;
|
||||
} else {
|
||||
logger.error('Json config was not valid. Please use schema to check validity.', {leaf: 'Config'});
|
||||
if (Array.isArray(ajv.errors)) {
|
||||
for (const err of ajv.errors) {
|
||||
let parts = [
|
||||
`At: ${err.dataPath}`,
|
||||
];
|
||||
let data;
|
||||
if (typeof err.data === 'string') {
|
||||
data = err.data;
|
||||
} else if (err.data !== null && typeof err.data === 'object' && (err.data as any).name !== undefined) {
|
||||
data = `Object named '${(err.data as any).name}'`;
|
||||
}
|
||||
if (data !== undefined) {
|
||||
parts.push(`Data: ${data}`);
|
||||
}
|
||||
let suffix = '';
|
||||
// @ts-ignore
|
||||
if (err.params.allowedValues !== undefined) {
|
||||
// @ts-ignore
|
||||
suffix = err.params.allowedValues.join(', ');
|
||||
suffix = ` [${suffix}]`;
|
||||
}
|
||||
parts.push(`${err.keyword}: ${err.schemaPath} => ${err.message}${suffix}`);
|
||||
|
||||
// if we have a reference in the description parse it out so we can log it here for context
|
||||
if (err.parentSchema !== undefined && err.parentSchema.description !== undefined) {
|
||||
const desc = err.parentSchema.description as string;
|
||||
const seeIndex = desc.indexOf('[See]');
|
||||
if (seeIndex !== -1) {
|
||||
let newLineIndex: number | undefined = desc.indexOf('\n', seeIndex);
|
||||
if (newLineIndex === -1) {
|
||||
newLineIndex = undefined;
|
||||
}
|
||||
const seeFragment = desc.slice(seeIndex + 5, newLineIndex);
|
||||
parts.push(`See:${seeFragment}`);
|
||||
}
|
||||
}
|
||||
|
||||
logger.error(`Schema Error:\r\n${parts.join('\r\n')}`, {leaf: 'Config'});
|
||||
}
|
||||
}
|
||||
throw new LoggedError('Config schema validity failure');
|
||||
}
|
||||
}
|
||||
|
||||
export class ConfigBuilder {
|
||||
configLogger: Logger;
|
||||
logger: Logger;
|
||||
|
||||
constructor(options: ConfigBuilderOptions) {
|
||||
|
||||
if (options.logger !== undefined) {
|
||||
this.logger = options.logger.child(loggerMetaShuffle(options.logger, 'Config'), mergeArr);
|
||||
} else {
|
||||
this.logger = createLabelledLogger(`Config`, `Config`);
|
||||
}
|
||||
this.configLogger = options.logger.child({leaf: 'Config'}, mergeArr);
|
||||
this.logger = options.logger;
|
||||
}
|
||||
|
||||
buildFromJson(config: object): [Array<SubmissionCheck>,Array<CommentCheck>,ManagerOptions] {
|
||||
const commentChecks: Array<CommentCheck> = [];
|
||||
const subChecks: Array<SubmissionCheck> = [];
|
||||
const valid = ajv.validate(schema, config);
|
||||
let managerOptions: ManagerOptions = {};
|
||||
if(valid) {
|
||||
const validConfig = config as JSONConfig;
|
||||
const {checks = [], ...rest} = validConfig;
|
||||
managerOptions = rest;
|
||||
for (const jCheck of checks) {
|
||||
if (jCheck.kind === 'comment') {
|
||||
commentChecks.push(new CommentCheck({...jCheck, logger: this.logger}));
|
||||
} else if (jCheck.kind === 'submission') {
|
||||
subChecks.push(new SubmissionCheck({...jCheck, logger: this.logger}));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
this.logger.error('Json config was not valid. Please use schema to check validity.', ajv.errors);
|
||||
this.logger.error(ajv.errors);
|
||||
throw new LoggedError();
|
||||
validateJson(config: object): JSONConfig {
|
||||
const validConfig = validateJson(config, appSchema, this.logger);
|
||||
return validConfig as JSONConfig;
|
||||
}
|
||||
|
||||
parseToStructured(config: JSONConfig): CheckStructuredJson[] {
|
||||
let namedRules: Map<string, RuleObjectJson> = new Map();
|
||||
let namedActions: Map<string, ActionObjectJson> = new Map();
|
||||
const {checks = []} = config;
|
||||
for (const c of checks) {
|
||||
const {rules = []} = c;
|
||||
namedRules = extractNamedRules(rules, namedRules);
|
||||
namedActions = extractNamedActions(c.actions, namedActions);
|
||||
}
|
||||
|
||||
return [subChecks, commentChecks, managerOptions];
|
||||
const structuredChecks: CheckStructuredJson[] = [];
|
||||
for (const c of checks) {
|
||||
const {rules = []} = c;
|
||||
const strongRules = insertNamedRules(rules, namedRules);
|
||||
const strongActions = insertNamedActions(c.actions, namedActions);
|
||||
const strongCheck = {...c, rules: strongRules, actions: strongActions} as CheckStructuredJson;
|
||||
structuredChecks.push(strongCheck);
|
||||
}
|
||||
|
||||
return structuredChecks;
|
||||
}
|
||||
}
|
||||
|
||||
export const buildPollingOptions = (values: (string | PollingOptions)[]): PollingOptionsStrong[] => {
|
||||
let opts: PollingOptionsStrong[] = [];
|
||||
for (const v of values) {
|
||||
if (typeof v === 'string') {
|
||||
opts.push({
|
||||
pollOn: v as PollOn,
|
||||
interval: DEFAULT_POLLING_INTERVAL,
|
||||
limit: DEFAULT_POLLING_LIMIT,
|
||||
clearProcessed: {
|
||||
size: DEFAULT_POLLING_LIMIT,
|
||||
retain: DEFAULT_POLLING_LIMIT,
|
||||
}
|
||||
});
|
||||
} else {
|
||||
const {
|
||||
pollOn: p,
|
||||
interval = DEFAULT_POLLING_INTERVAL,
|
||||
limit = DEFAULT_POLLING_LIMIT,
|
||||
delayUntil,
|
||||
clearProcessed = {size: limit, retain: limit},
|
||||
} = v;
|
||||
opts.push({
|
||||
pollOn: p as PollOn,
|
||||
interval,
|
||||
limit,
|
||||
delayUntil,
|
||||
clearProcessed
|
||||
});
|
||||
}
|
||||
}
|
||||
return opts;
|
||||
}
|
||||
|
||||
export const extractNamedRules = (rules: Array<RuleSetJson | RuleJson>, namedRules: Map<string, RuleObjectJson> = new Map()): Map<string, RuleObjectJson> => {
|
||||
//const namedRules = new Map();
|
||||
for (const r of rules) {
|
||||
let rulesToAdd: RuleObjectJson[] = [];
|
||||
if ((typeof r === 'object')) {
|
||||
if ((r as RuleObjectJson).kind !== undefined) {
|
||||
// itsa rule
|
||||
const rule = r as RuleObjectJson;
|
||||
if (rule.name !== undefined) {
|
||||
rulesToAdd.push(rule);
|
||||
}
|
||||
} else {
|
||||
const ruleSet = r as RuleSetJson;
|
||||
const nestedNamed = extractNamedRules(ruleSet.rules);
|
||||
rulesToAdd = [...nestedNamed.values()];
|
||||
}
|
||||
for (const rule of rulesToAdd) {
|
||||
const name = rule.name as string;
|
||||
const normalName = normalizeName(name);
|
||||
const {name: n, ...rest} = rule;
|
||||
const ruleNoName = {...rest};
|
||||
|
||||
if (namedRules.has(normalName)) {
|
||||
const {name: nn, ...ruleRest} = namedRules.get(normalName) as RuleObjectJson;
|
||||
if (!deepEqual(ruleRest, ruleNoName)) {
|
||||
throw new Error(`Rule names must be unique (case-insensitive). Conflicting name: ${name}`);
|
||||
}
|
||||
} else {
|
||||
namedRules.set(normalName, rule);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return namedRules;
|
||||
}
|
||||
|
||||
export const insertNamedRules = (rules: Array<RuleSetJson | RuleJson>, namedRules: Map<string, RuleObjectJson> = new Map()): Array<RuleSetObjectJson | RuleObjectJson> => {
|
||||
const strongRules: Array<RuleSetObjectJson | RuleObjectJson> = [];
|
||||
for (const r of rules) {
|
||||
if (typeof r === 'string') {
|
||||
const foundRule = namedRules.get(r.toLowerCase());
|
||||
if (foundRule === undefined) {
|
||||
throw new Error(`No named Rule with the name ${r} was found`);
|
||||
}
|
||||
strongRules.push(foundRule);
|
||||
} else if (isRuleSetJSON(r)) {
|
||||
const {rules: sr, ...rest} = r;
|
||||
const setRules = insertNamedRules(sr, namedRules);
|
||||
const strongSet = {rules: setRules, ...rest} as RuleSetObjectJson;
|
||||
strongRules.push(strongSet);
|
||||
} else {
|
||||
strongRules.push(r);
|
||||
}
|
||||
}
|
||||
|
||||
return strongRules;
|
||||
}
|
||||
|
||||
export const extractNamedActions = (actions: Array<ActionJson>, namedActions: Map<string, ActionObjectJson> = new Map()): Map<string, ActionObjectJson> => {
|
||||
for (const a of actions) {
|
||||
if (!(typeof a === 'string')) {
|
||||
if (isActionJson(a) && a.name !== undefined) {
|
||||
const normalName = a.name.toLowerCase();
|
||||
const {name: n, ...rest} = a;
|
||||
const actionNoName = {...rest};
|
||||
if (namedActions.has(normalName)) {
|
||||
// @ts-ignore
|
||||
const {name: nn, ...aRest} = namedActions.get(normalName) as ActionObjectJson;
|
||||
if (!deepEqual(aRest, actionNoName)) {
|
||||
throw new Error(`Actions names must be unique (case-insensitive). Conflicting name: ${a.name}`);
|
||||
}
|
||||
} else {
|
||||
namedActions.set(normalName, a);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return namedActions;
|
||||
}
|
||||
|
||||
export const insertNamedActions = (actions: Array<ActionJson>, namedActions: Map<string, ActionObjectJson> = new Map()): Array<ActionObjectJson> => {
|
||||
const strongActions: Array<ActionObjectJson> = [];
|
||||
for (const a of actions) {
|
||||
if (typeof a === 'string') {
|
||||
const foundAction = namedActions.get(a.toLowerCase());
|
||||
if (foundAction === undefined) {
|
||||
throw new Error(`No named Action with the name ${a} was found`);
|
||||
}
|
||||
strongActions.push(foundAction);
|
||||
} else {
|
||||
strongActions.push(a);
|
||||
}
|
||||
}
|
||||
|
||||
return strongActions;
|
||||
}
|
||||
|
||||
export const parseDefaultBotInstanceFromArgs = (args: any): BotInstanceJsonConfig => {
|
||||
const {
|
||||
subreddits,
|
||||
clientId,
|
||||
clientSecret,
|
||||
accessToken,
|
||||
refreshToken,
|
||||
wikiConfig,
|
||||
dryRun,
|
||||
softLimit,
|
||||
heartbeat,
|
||||
hardLimit,
|
||||
authorTTL,
|
||||
snooProxy,
|
||||
snooDebug,
|
||||
sharedMod,
|
||||
caching,
|
||||
} = args || {};
|
||||
|
||||
const data = {
|
||||
credentials: {
|
||||
clientId,
|
||||
clientSecret,
|
||||
accessToken,
|
||||
refreshToken,
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: snooProxy,
|
||||
debug: snooDebug,
|
||||
},
|
||||
subreddits: {
|
||||
names: subreddits,
|
||||
wikiConfig,
|
||||
dryRun,
|
||||
heartbeatInterval: heartbeat,
|
||||
},
|
||||
polling: {
|
||||
sharedMod,
|
||||
},
|
||||
nanny: {
|
||||
softLimit,
|
||||
hardLimit
|
||||
}
|
||||
}
|
||||
return removeUndefinedKeys(data) as BotInstanceJsonConfig;
|
||||
}
|
||||
|
||||
export const parseOpConfigFromArgs = (args: any): OperatorJsonConfig => {
|
||||
const {
|
||||
clientId,
|
||||
clientSecret,
|
||||
redirectUri,
|
||||
operator,
|
||||
operatorDisplay,
|
||||
logLevel,
|
||||
logDir,
|
||||
port,
|
||||
sessionSecret,
|
||||
web,
|
||||
mode,
|
||||
caching,
|
||||
authorTTL,
|
||||
} = args || {};
|
||||
|
||||
const data = {
|
||||
mode,
|
||||
operator: {
|
||||
name: operator,
|
||||
display: operatorDisplay
|
||||
},
|
||||
logging: {
|
||||
level: logLevel,
|
||||
path: logDir === true ? `${process.cwd()}/logs` : undefined,
|
||||
},
|
||||
caching: {
|
||||
provider: caching,
|
||||
authorTTL
|
||||
},
|
||||
web: {
|
||||
enabled: web,
|
||||
port,
|
||||
session: {
|
||||
secret: sessionSecret
|
||||
},
|
||||
credentials: {
|
||||
clientId,
|
||||
clientSecret,
|
||||
redirectUri,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return removeUndefinedKeys(data) as OperatorJsonConfig;
|
||||
}
|
||||
|
||||
const parseListFromEnv = (val: string | undefined) => {
|
||||
let listVals: undefined | string[];
|
||||
if (val === undefined) {
|
||||
return listVals;
|
||||
}
|
||||
const trimmedVal = val.trim();
|
||||
if (trimmedVal.includes(',')) {
|
||||
// try to parse using comma
|
||||
listVals = trimmedVal.split(',').map(x => x.trim()).filter(x => x !== '');
|
||||
} else {
|
||||
// otherwise try spaces
|
||||
listVals = trimmedVal.split(' ')
|
||||
// remove any extraneous spaces
|
||||
.filter(x => x !== ' ' && x !== '');
|
||||
}
|
||||
if (listVals.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
return listVals;
|
||||
}
|
||||
|
||||
export const parseDefaultBotInstanceFromEnv = (): BotInstanceJsonConfig => {
|
||||
const data = {
|
||||
credentials: {
|
||||
clientId: process.env.CLIENT_ID,
|
||||
clientSecret: process.env.CLIENT_SECRET,
|
||||
accessToken: process.env.ACCESS_TOKEN,
|
||||
refreshToken: process.env.REFRESH_TOKEN,
|
||||
},
|
||||
subreddits: {
|
||||
names: parseListFromEnv(process.env.SUBREDDITS),
|
||||
wikiConfig: process.env.WIKI_CONFIG,
|
||||
dryRun: parseBool(process.env.DRYRUN, undefined),
|
||||
heartbeatInterval: process.env.HEARTBEAT !== undefined ? parseInt(process.env.HEARTBEAT) : undefined,
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: process.env.PROXY,
|
||||
debug: parseBool(process.env.SNOO_DEBUG, undefined),
|
||||
},
|
||||
polling: {
|
||||
sharedMod: parseBool(process.env.SHARE_MOD),
|
||||
},
|
||||
nanny: {
|
||||
softLimit: process.env.SOFT_LIMIT !== undefined ? parseInt(process.env.SOFT_LIMIT) : undefined,
|
||||
hardLimit: process.env.HARD_LIMIT !== undefined ? parseInt(process.env.HARD_LIMIT) : undefined
|
||||
},
|
||||
};
|
||||
return removeUndefinedKeys(data) as BotInstanceJsonConfig;
|
||||
}
|
||||
|
||||
export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
|
||||
const data = {
|
||||
mode: process.env.MODE !== undefined ? process.env.MODE as ('all' | 'server' | 'client') : undefined,
|
||||
operator: {
|
||||
name: parseListFromEnv(process.env.OPERATOR),
|
||||
display: process.env.OPERATOR_DISPLAY
|
||||
},
|
||||
logging: {
|
||||
// @ts-ignore
|
||||
level: process.env.LOG_LEVEL,
|
||||
path: process.env.LOG_DIR === 'true' ? `${process.cwd()}/logs` : undefined,
|
||||
},
|
||||
caching: {
|
||||
provider: {
|
||||
// @ts-ignore
|
||||
store: process.env.CACHING as (CacheProvider | undefined)
|
||||
},
|
||||
authorTTL: process.env.AUTHOR_TTL !== undefined ? parseInt(process.env.AUTHOR_TTL) : undefined
|
||||
},
|
||||
web: {
|
||||
port: process.env.PORT !== undefined ? parseInt(process.env.PORT) : undefined,
|
||||
session: {
|
||||
provider: process.env.SESSION_PROVIDER,
|
||||
secret: process.env.SESSION_SECRET
|
||||
},
|
||||
credentials: {
|
||||
clientId: process.env.CLIENT_ID,
|
||||
clientSecret: process.env.CLIENT_SECRET,
|
||||
redirectUri: process.env.REDIRECT_URI,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return removeUndefinedKeys(data) as OperatorJsonConfig;
|
||||
}
|
||||
|
||||
// Hierarchy (lower level overwrites above)
|
||||
//
|
||||
// .env file
|
||||
// Actual ENVs (from environment)
|
||||
// json config
|
||||
// args from cli
|
||||
export const parseOperatorConfigFromSources = async (args: any): Promise<OperatorJsonConfig> => {
|
||||
const {logLevel = process.env.LOG_LEVEL, logDir = process.env.LOG_DIR || false} = args || {};
|
||||
const envPath = process.env.OPERATOR_ENV;
|
||||
|
||||
// create a pre config logger to help with debugging
|
||||
const initLogger = getLogger({logLevel, logDir: logDir === true ? `${process.cwd()}/logs` : logDir}, 'init');
|
||||
|
||||
try {
|
||||
const vars = await GetEnvVars({
|
||||
envFile: {
|
||||
filePath: envPath,
|
||||
fallback: true
|
||||
}
|
||||
});
|
||||
// if we found variables in the file of at a fallback path then add them in before we do main arg parsing
|
||||
for (const [k, v] of Object.entries(vars)) {
|
||||
// don't override existing
|
||||
if (process.env[k] === undefined) {
|
||||
process.env[k] = v;
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
let msg = 'No .env file found at default location (./env)';
|
||||
if (envPath !== undefined) {
|
||||
msg = `${msg} or OPERATOR_ENV path (${envPath})`;
|
||||
}
|
||||
initLogger.warn(`${msg} -- this may be normal if neither was provided.`);
|
||||
// mimicking --silent from env-cmd
|
||||
//swallow silently for now 😬
|
||||
}
|
||||
|
||||
const {operatorConfig = process.env.OPERATOR_CONFIG} = args;
|
||||
let configFromFile: OperatorJsonConfig = {};
|
||||
if (operatorConfig !== undefined) {
|
||||
let rawConfig;
|
||||
try {
|
||||
rawConfig = await readConfigFile(operatorConfig, {log: initLogger}) as object;
|
||||
} catch (err) {
|
||||
initLogger.error('Cannot continue app startup because operator config file was not parseable.');
|
||||
err.logged = true;
|
||||
throw err;
|
||||
}
|
||||
try {
|
||||
configFromFile = validateJson(rawConfig, operatorSchema, initLogger) as OperatorJsonConfig;
|
||||
} catch (err) {
|
||||
initLogger.error('Cannot continue app startup because operator config file was not valid.');
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
const opConfigFromArgs = parseOpConfigFromArgs(args);
|
||||
const opConfigFromEnv = parseOpConfigFromEnv();
|
||||
|
||||
const defaultBotInstanceFromArgs = parseDefaultBotInstanceFromArgs(args);
|
||||
const defaultBotInstanceFromEnv = parseDefaultBotInstanceFromEnv();
|
||||
const {bots: botInstancesFromFile = [], ...restConfigFile} = configFromFile;
|
||||
|
||||
const mergedConfig = merge.all([opConfigFromEnv, restConfigFile, opConfigFromArgs], {
|
||||
arrayMerge: overwriteMerge,
|
||||
});
|
||||
|
||||
const defaultBotInstance = merge.all([defaultBotInstanceFromEnv, defaultBotInstanceFromArgs], {
|
||||
arrayMerge: overwriteMerge,
|
||||
}) as BotInstanceJsonConfig;
|
||||
|
||||
if (configFromFile.caching !== undefined) {
|
||||
defaultBotInstance.caching = configFromFile.caching;
|
||||
}
|
||||
|
||||
let botInstances = [];
|
||||
if (botInstancesFromFile.length === 0) {
|
||||
botInstances = [defaultBotInstance];
|
||||
} else {
|
||||
botInstances = botInstancesFromFile.map(x => merge.all([defaultBotInstance, x], {arrayMerge: overwriteMerge}));
|
||||
}
|
||||
|
||||
return removeUndefinedKeys({...mergedConfig, bots: botInstances}) as OperatorJsonConfig;
|
||||
}
|
||||
|
||||
export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): OperatorConfig => {
|
||||
const {
|
||||
mode = 'all',
|
||||
operator: {
|
||||
name = [],
|
||||
display = 'Anonymous',
|
||||
} = {},
|
||||
logging: {
|
||||
level = 'verbose',
|
||||
path,
|
||||
} = {},
|
||||
caching: opCache,
|
||||
web: {
|
||||
port = 8085,
|
||||
maxLogs = 200,
|
||||
caching: webCaching = {},
|
||||
session: {
|
||||
secret = randomId(),
|
||||
maxAge: sessionMaxAge = 86400,
|
||||
} = {},
|
||||
invites: {
|
||||
maxAge: inviteMaxAge = 0,
|
||||
} = {},
|
||||
clients,
|
||||
credentials: webCredentials,
|
||||
operators,
|
||||
} = {},
|
||||
api: {
|
||||
port: apiPort = 8095,
|
||||
secret: apiSecret = randomId(),
|
||||
friendly,
|
||||
} = {},
|
||||
bots = [],
|
||||
} = data;
|
||||
|
||||
let cache: StrongCache;
|
||||
let defaultProvider: CacheOptions;
|
||||
let opActionedEventsMax: number | undefined;
|
||||
let opActionedEventsDefault: number = 25;
|
||||
|
||||
if (opCache === undefined) {
|
||||
defaultProvider = {
|
||||
store: 'memory',
|
||||
...cacheOptDefaults
|
||||
};
|
||||
cache = {
|
||||
...cacheTTLDefaults,
|
||||
provider: defaultProvider,
|
||||
actionedEventsDefault: opActionedEventsDefault,
|
||||
};
|
||||
|
||||
} else {
|
||||
const {provider, actionedEventsMax, actionedEventsDefault = opActionedEventsDefault, ...restConfig} = opCache;
|
||||
|
||||
if (actionedEventsMax !== undefined && actionedEventsMax !== null) {
|
||||
opActionedEventsMax = actionedEventsMax;
|
||||
opActionedEventsDefault = Math.min(actionedEventsDefault, actionedEventsMax);
|
||||
}
|
||||
|
||||
if (typeof provider === 'string') {
|
||||
defaultProvider = {
|
||||
store: provider as CacheProvider,
|
||||
...cacheOptDefaults
|
||||
};
|
||||
} else {
|
||||
const {ttl = 60, max = 500, store = 'memory', ...rest} = provider || {};
|
||||
defaultProvider = {
|
||||
store,
|
||||
...cacheOptDefaults,
|
||||
...rest,
|
||||
};
|
||||
}
|
||||
cache = {
|
||||
...cacheTTLDefaults,
|
||||
...restConfig,
|
||||
actionedEventsMax: opActionedEventsMax,
|
||||
actionedEventsDefault: opActionedEventsDefault,
|
||||
provider: defaultProvider,
|
||||
}
|
||||
}
|
||||
|
||||
let hydratedBots: BotInstanceConfig[] = bots.map(x => {
|
||||
const {
|
||||
name: botName,
|
||||
polling: {
|
||||
sharedMod = false,
|
||||
stagger,
|
||||
limit = 100,
|
||||
interval = 30,
|
||||
} = {},
|
||||
queue: {
|
||||
maxWorkers = 1,
|
||||
} = {},
|
||||
caching,
|
||||
nanny: {
|
||||
softLimit = 250,
|
||||
hardLimit = 50
|
||||
} = {},
|
||||
snoowrap = {},
|
||||
credentials: {
|
||||
clientId: ci,
|
||||
clientSecret: cs,
|
||||
...restCred
|
||||
} = {},
|
||||
subreddits: {
|
||||
names = [],
|
||||
exclude = [],
|
||||
wikiConfig = 'botconfig/contextbot',
|
||||
dryRun,
|
||||
heartbeatInterval = 300,
|
||||
} = {},
|
||||
} = x;
|
||||
|
||||
|
||||
let botCache: StrongCache;
|
||||
let botActionedEventsDefault: number;
|
||||
|
||||
if (caching === undefined) {
|
||||
|
||||
botCache = {
|
||||
...cacheTTLDefaults,
|
||||
actionedEventsDefault: opActionedEventsDefault,
|
||||
actionedEventsMax: opActionedEventsMax,
|
||||
provider: {...defaultProvider}
|
||||
};
|
||||
} else {
|
||||
const {
|
||||
provider,
|
||||
actionedEventsMax = opActionedEventsMax,
|
||||
actionedEventsDefault = opActionedEventsDefault,
|
||||
...restConfig
|
||||
} = caching;
|
||||
|
||||
botActionedEventsDefault = actionedEventsDefault;
|
||||
if (actionedEventsMax !== undefined) {
|
||||
botActionedEventsDefault = Math.min(actionedEventsDefault, actionedEventsMax);
|
||||
}
|
||||
|
||||
if (typeof provider === 'string') {
|
||||
botCache = {
|
||||
...cacheTTLDefaults,
|
||||
...restConfig,
|
||||
actionedEventsDefault: botActionedEventsDefault,
|
||||
provider: {
|
||||
store: provider as CacheProvider,
|
||||
...cacheOptDefaults
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const {ttl = 60, max = 500, store = 'memory', ...rest} = provider || {};
|
||||
botCache = {
|
||||
...cacheTTLDefaults,
|
||||
...restConfig,
|
||||
actionedEventsDefault: botActionedEventsDefault,
|
||||
actionedEventsMax,
|
||||
provider: {
|
||||
store,
|
||||
...cacheOptDefaults,
|
||||
...rest,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const botCreds = {
|
||||
clientId: (ci as string),
|
||||
clientSecret: (cs as string),
|
||||
...restCred,
|
||||
};
|
||||
if (botCache.provider.prefix === undefined || botCache.provider.prefix === defaultProvider.prefix) {
|
||||
// need to provide unique prefix to bot
|
||||
botCache.provider.prefix = buildCachePrefix([botCache.provider.prefix, 'bot', (botName || objectHash.sha1(botCreds))]);
|
||||
}
|
||||
|
||||
return {
|
||||
name: botName,
|
||||
snoowrap,
|
||||
subreddits: {
|
||||
names,
|
||||
exclude,
|
||||
wikiConfig,
|
||||
heartbeatInterval,
|
||||
dryRun,
|
||||
},
|
||||
credentials: botCreds,
|
||||
caching: botCache,
|
||||
polling: {
|
||||
sharedMod,
|
||||
stagger,
|
||||
limit,
|
||||
interval,
|
||||
},
|
||||
queue: {
|
||||
maxWorkers,
|
||||
},
|
||||
nanny: {
|
||||
softLimit,
|
||||
hardLimit
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
const defaultOperators = typeof name === 'string' ? [name] : name;
|
||||
|
||||
const config: OperatorConfig = {
|
||||
mode,
|
||||
operator: {
|
||||
name: defaultOperators,
|
||||
display,
|
||||
},
|
||||
logging: {
|
||||
level,
|
||||
path
|
||||
},
|
||||
caching: cache,
|
||||
web: {
|
||||
port,
|
||||
caching: {
|
||||
...defaultProvider,
|
||||
...webCaching
|
||||
},
|
||||
invites: {
|
||||
maxAge: inviteMaxAge,
|
||||
},
|
||||
session: {
|
||||
secret,
|
||||
maxAge: sessionMaxAge,
|
||||
},
|
||||
maxLogs,
|
||||
clients: clients === undefined ? [{host: 'localhost:8095', secret: apiSecret}] : clients,
|
||||
credentials: webCredentials as RequiredWebRedditCredentials,
|
||||
operators: operators || defaultOperators,
|
||||
},
|
||||
api: {
|
||||
port: apiPort,
|
||||
secret: apiSecret,
|
||||
friendly
|
||||
},
|
||||
bots: hydratedBots,
|
||||
};
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
//# sourceMappingURL=JsonConfig.js.map
|
||||
@@ -1 +0,0 @@
|
||||
{"version":3,"file":"JsonConfig.js","sourceRoot":"","sources":["JsonConfig.ts"],"names":[],"mappings":""}
|
||||
@@ -1,7 +1,7 @@
|
||||
import {CheckJSONConfig} from "./Check";
|
||||
import {PollingOptions} from "./Common/interfaces";
|
||||
import {CheckJson, CommentCheckJson, SubmissionCheckJson} from "./Check";
|
||||
import {ManagerOptions} from "./Common/interfaces";
|
||||
|
||||
export interface JSONConfig {
|
||||
export interface JSONConfig extends ManagerOptions {
|
||||
/**
|
||||
* A list of all the checks that should be run for a subreddit.
|
||||
*
|
||||
@@ -12,6 +12,5 @@ export interface JSONConfig {
|
||||
* When a check "passes", and actions are performed, then all subsequent checks are skipped.
|
||||
* @minItems 1
|
||||
* */
|
||||
checks: CheckJSONConfig[]
|
||||
polling?: PollingOptions
|
||||
checks: Array<SubmissionCheckJson|CommentCheckJson>
|
||||
}
|
||||
|
||||
48
src/Notification/DiscordNotifier.ts
Normal file
48
src/Notification/DiscordNotifier.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import webhook from 'webhook-discord';
|
||||
import {NotificationContent} from "../Common/interfaces";
|
||||
|
||||
class DiscordNotifier {
|
||||
name: string
|
||||
botName: string
|
||||
type: string = 'Discord';
|
||||
url: string;
|
||||
|
||||
constructor(name: string, botName: string, url: string) {
|
||||
this.name = name;
|
||||
this.url = url;
|
||||
this.botName = botName;
|
||||
}
|
||||
|
||||
async handle(val: NotificationContent) {
|
||||
const h = new webhook.Webhook(this.url);
|
||||
|
||||
const hook = new webhook.MessageBuilder();
|
||||
|
||||
const {logLevel, title, footer, body = ''} = val;
|
||||
|
||||
hook.setName(this.botName === 'ContextMod' ? 'ContextMod' : `(ContextMod) ${this.botName}`)
|
||||
.setTitle(title)
|
||||
.setDescription(body)
|
||||
|
||||
if (footer !== undefined) {
|
||||
// @ts-ignore
|
||||
hook.setFooter(footer, false);
|
||||
}
|
||||
|
||||
switch (logLevel) {
|
||||
case 'error':
|
||||
hook.setColor("##ff0000");
|
||||
break;
|
||||
case 'warn':
|
||||
hook.setColor("#ffe900");
|
||||
break;
|
||||
default:
|
||||
hook.setColor("#00fffa");
|
||||
break;
|
||||
}
|
||||
|
||||
await h.send(hook);
|
||||
}
|
||||
}
|
||||
|
||||
export default DiscordNotifier;
|
||||
122
src/Notification/NotificationManager.ts
Normal file
122
src/Notification/NotificationManager.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
import {
|
||||
NotificationConfig,
|
||||
NotificationEventConfig,
|
||||
NotificationEvents,
|
||||
NotificationEventType,
|
||||
Notifier
|
||||
} from "../Common/interfaces";
|
||||
import DiscordNotifier from "./DiscordNotifier";
|
||||
import {Logger} from "winston";
|
||||
import {mergeArr} from "../util";
|
||||
import Subreddit from "snoowrap/dist/objects/Subreddit";
|
||||
|
||||
class NotificationManager {
|
||||
notifiers: Notifier[] = [];
|
||||
events: NotificationEvents = [];
|
||||
logger: Logger;
|
||||
subreddit: Subreddit;
|
||||
name: string;
|
||||
|
||||
constructor(logger: Logger, subreddit: Subreddit, displayName: string, botName: string, config?: NotificationConfig) {
|
||||
this.logger = logger.child({leaf: 'Notifications'}, mergeArr);
|
||||
this.subreddit = subreddit;
|
||||
this.name = displayName;
|
||||
if (config !== undefined) {
|
||||
const {events = [], providers = []} = config;
|
||||
this.events = events;
|
||||
for (const p of providers) {
|
||||
switch (p.type) {
|
||||
case 'discord':
|
||||
this.notifiers.push(new DiscordNotifier(p.name, botName, p.url));
|
||||
break;
|
||||
default:
|
||||
this.logger.warn(`Notification provider type of ${p.type} not recognized.`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (this.events.length > 0 && this.notifiers.length === 0) {
|
||||
this.logger.warn(`Config specified ${this.events.length} event hooks but not notification providers were setup!`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
getStats() {
|
||||
let notifiers: string[] = [];
|
||||
if (this.notifiers.length > 0) {
|
||||
notifiers = this.notifiers.map(x => `${x.name} (${x.type})`);
|
||||
}
|
||||
let events: string[] = [];
|
||||
if (this.events.length > 0) {
|
||||
events = this.events.reduce((acc: string[], curr) => {
|
||||
const e = Array.isArray(curr) ? curr : curr.types;
|
||||
for (const ev of e) {
|
||||
if (!acc.includes(ev)) {
|
||||
acc.push(ev);
|
||||
}
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
}
|
||||
|
||||
return {
|
||||
notifiers,
|
||||
events,
|
||||
}
|
||||
}
|
||||
|
||||
async handle(name: NotificationEventType, title: string, body?: string, causedBy?: string, logLevel?: string) {
|
||||
|
||||
if (this.notifiers.length === 0 || this.events.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
let notifiers: Notifier[] = [];
|
||||
for (const e of this.events) {
|
||||
// array of event NotificationEventType
|
||||
if (Array.isArray(e)) {
|
||||
const ev = e as NotificationEventType[];
|
||||
for (const v of ev) {
|
||||
if (v === name) {
|
||||
// if we find the event here then we want to sent the event to all configured notifiers
|
||||
notifiers = notifiers.concat(this.notifiers);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// e is a NotificationEventConfig
|
||||
const ev = e as NotificationEventConfig;
|
||||
const hasEvent = ev.types.some(x => x === name);
|
||||
if (hasEvent) {
|
||||
const p = ev.providers.map(y => y.toLowerCase());
|
||||
const validNotifiers = this.notifiers.filter(x => p.includes(x.name.toLowerCase()));
|
||||
notifiers = notifiers.concat(validNotifiers);
|
||||
}
|
||||
}
|
||||
}
|
||||
// remove dups
|
||||
notifiers = notifiers.reduce((acc: Notifier[], curr: Notifier) => {
|
||||
if (!acc.some(x => x.name === curr.name)) {
|
||||
return acc.concat(curr);
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
let footer = [];
|
||||
if (causedBy !== undefined) {
|
||||
footer.push(`* Performed by "${causedBy}"`);
|
||||
}
|
||||
footer.push(`* Notification triggered by "${name}"`);
|
||||
|
||||
this.logger.info(`Sending notification for ${name} to providers: ${notifiers.map(x => `${x.name} (${x.type})`).join(', ')}`);
|
||||
|
||||
for (const n of notifiers) {
|
||||
await n.handle({
|
||||
title: `${title} (${this.name})`,
|
||||
body: body || '',
|
||||
footer: footer.length > 0 ? footer.join('\n') : undefined,
|
||||
logLevel
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default NotificationManager;
|
||||
479
src/Rule/AttributionRule.ts
Normal file
479
src/Rule/AttributionRule.ts
Normal file
@@ -0,0 +1,479 @@
|
||||
import {SubmissionRule, SubmissionRuleJSONConfig} from "./SubmissionRule";
|
||||
import {ActivityWindowType, CommentState, DomainInfo, ReferenceSubmission, SubmissionState} from "../Common/interfaces";
|
||||
import {Rule, RuleOptions, RuleResult} from "./index";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {getAttributionIdentifier} from "../Utils/SnoowrapUtils";
|
||||
import dayjs from "dayjs";
|
||||
import {
|
||||
asSubmission,
|
||||
comparisonTextOp, convertSubredditsRawToStrong,
|
||||
FAIL,
|
||||
formatNumber, getActivitySubredditName, isSubmission,
|
||||
parseGenericValueOrPercentComparison,
|
||||
parseSubredditName,
|
||||
PASS
|
||||
} from "../util";
|
||||
import { Comment } from "snoowrap/dist/objects";
|
||||
import SimpleError from "../Utils/SimpleError";
|
||||
import as from "async";
|
||||
|
||||
|
||||
export interface AttributionCriteria {
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare comments against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign]`
|
||||
*
|
||||
* * EX `> 12` => greater than 12 activities originate from same attribution
|
||||
* * EX `<= 10%` => less than 10% of all Activities have the same attribution
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* @default "> 10%"
|
||||
* */
|
||||
threshold: string
|
||||
window: ActivityWindowType
|
||||
/**
|
||||
* What activities to use for total count when determining what percentage an attribution comprises
|
||||
*
|
||||
* EX:
|
||||
*
|
||||
* Author has 100 activities, 40 are submissions and 60 are comments
|
||||
*
|
||||
* * If `submission` then if 10 submission are for Youtube Channel A then percentage => 10/40 = 25%
|
||||
* * If `all` then if 10 submission are for Youtube Channel A then percentage => 10/100 = 10%
|
||||
*
|
||||
* @default all
|
||||
**/
|
||||
thresholdOn?: 'submissions' | 'all'
|
||||
/**
|
||||
* The minimum number of activities that must exist for this criteria to run
|
||||
* @default 5
|
||||
* */
|
||||
minActivityCount?: number
|
||||
|
||||
/**
|
||||
* A list of domains whose Activities will be tested against `threshold`.
|
||||
*
|
||||
* The values are tested as partial strings so you do not need to include full URLs, just the part that matters.
|
||||
*
|
||||
* EX `["youtube"]` will match submissions with the domain `https://youtube.com/c/aChannel`
|
||||
* EX `["youtube.com/c/bChannel"]` will NOT match submissions with the domain `https://youtube.com/c/aChannel`
|
||||
*
|
||||
* If you wish to aggregate on self-posts for a subreddit use the syntax `self.[subreddit]` EX `self.AskReddit`
|
||||
*
|
||||
* **If this Rule is part of a Check for a Submission and you wish to aggregate on the domain of the Submission use the special string `AGG:SELF`**
|
||||
*
|
||||
* If nothing is specified or list is empty (default) aggregate using `aggregateOn`
|
||||
*
|
||||
* @default [[]]
|
||||
* */
|
||||
domains?: string[],
|
||||
/**
|
||||
* Set to `true` if you wish to combine all of the Activities from `domains` to test against `threshold` instead of testing each `domain` individually
|
||||
*
|
||||
* @default false
|
||||
* @examples [false]
|
||||
* */
|
||||
domainsCombined?: boolean,
|
||||
|
||||
/**
|
||||
* When present, Activities WILL ONLY be counted if they are found in this list of Subreddits
|
||||
*
|
||||
* Each value in the list can be either:
|
||||
*
|
||||
* * string (name of subreddit)
|
||||
* * regular expression to run on the subreddit name
|
||||
* * `SubredditState`
|
||||
*
|
||||
* EX `["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]`
|
||||
* @examples [["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]]
|
||||
* */
|
||||
include?: string[],
|
||||
/**
|
||||
* When present, Activities WILL NOT be counted if they are found in this list of Subreddits
|
||||
*
|
||||
* Each value in the list can be either:
|
||||
*
|
||||
* * string (name of subreddit)
|
||||
* * regular expression to run on the subreddit name
|
||||
* * `SubredditState`
|
||||
*
|
||||
* EX `["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]`
|
||||
* @examples [["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]]
|
||||
* */
|
||||
exclude?: string[],
|
||||
|
||||
/**
|
||||
* When present, Submissions from `window` will only be counted if they meet this criteria
|
||||
* */
|
||||
submissionState?: SubmissionState
|
||||
/**
|
||||
* When present, Comments from `window` will only be counted if they meet this criteria
|
||||
* */
|
||||
commentState?: CommentState
|
||||
|
||||
/**
|
||||
* This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`
|
||||
*
|
||||
* * If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)
|
||||
* * If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)
|
||||
* * If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`
|
||||
* * If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit
|
||||
*
|
||||
* If nothing is specified or list is empty (default) rule will only aggregate on `link` and `media` (ignores reddit-hosted content and self-posts)
|
||||
*
|
||||
* @default undefined
|
||||
* @examples [[]]
|
||||
* */
|
||||
aggregateOn?: ('media' | 'redditMedia' | 'self' | 'link')[],
|
||||
|
||||
/**
|
||||
* Should the criteria consolidate recognized media domains into the parent domain?
|
||||
*
|
||||
* Submissions to major media domains (youtube, vimeo) can be identified by individual Channel/Author...
|
||||
*
|
||||
* * If `false` then domains will be aggregated at the channel level IE Youtube Channel A (2 counts), Youtube Channel B (3 counts)
|
||||
* * If `true` then then media domains will be consolidated at domain level and then aggregated IE youtube.com (5 counts)
|
||||
*
|
||||
* @default false
|
||||
* @examples [false]
|
||||
* */
|
||||
consolidateMediaDomains?: boolean
|
||||
|
||||
name?: string
|
||||
}
|
||||
|
||||
const SUBMISSION_DOMAIN = 'AGG:SELF';
|
||||
|
||||
const defaultCriteria = [{threshold: '10%', window: 100}];
|
||||
|
||||
interface DomainAgg {
|
||||
info: DomainInfo,
|
||||
count: number
|
||||
}
|
||||
|
||||
export class AttributionRule extends Rule {
|
||||
criteria: AttributionCriteria[];
|
||||
criteriaJoin: 'AND' | 'OR';
|
||||
|
||||
constructor(options: AttributionOptions) {
|
||||
super(options);
|
||||
const {
|
||||
criteria = defaultCriteria,
|
||||
criteriaJoin = 'OR',
|
||||
} = options || {};
|
||||
|
||||
this.criteria = criteria;
|
||||
this.criteriaJoin = criteriaJoin;
|
||||
if (this.criteria.length === 0) {
|
||||
throw new Error('Must provide at least one AttributionCriteria');
|
||||
}
|
||||
}
|
||||
|
||||
getKind(): string {
|
||||
return "Attr";
|
||||
}
|
||||
|
||||
protected getSpecificPremise(): object {
|
||||
return {
|
||||
criteria: this.criteria,
|
||||
criteriaJoin: this.criteriaJoin,
|
||||
}
|
||||
}
|
||||
|
||||
protected async process(item: Comment | Submission): Promise<[boolean, RuleResult]> {
|
||||
let criteriaResults = [];
|
||||
|
||||
for (const criteria of this.criteria) {
|
||||
|
||||
const {
|
||||
threshold = '> 10%',
|
||||
window,
|
||||
thresholdOn = 'all',
|
||||
minActivityCount = 10,
|
||||
aggregateOn = ['link','media'],
|
||||
consolidateMediaDomains = false,
|
||||
domains = [],
|
||||
domainsCombined = false,
|
||||
include = [],
|
||||
exclude = [],
|
||||
commentState,
|
||||
submissionState,
|
||||
} = criteria;
|
||||
|
||||
const {operator, value, isPercent, extra = ''} = parseGenericValueOrPercentComparison(threshold);
|
||||
|
||||
let activities = thresholdOn === 'submissions' ? await this.resources.getAuthorSubmissions(item.author, {window: window}) : await this.resources.getAuthorActivities(item.author, {window: window});
|
||||
|
||||
if(include.length > 0 || exclude.length > 0) {
|
||||
const defaultOpts = {
|
||||
defaultFlags: 'i',
|
||||
generateDescription: true
|
||||
};
|
||||
if(include.length > 0) {
|
||||
const subStates = include.map(x => convertSubredditsRawToStrong(x, defaultOpts));
|
||||
activities = await this.resources.batchTestSubredditCriteria(activities, subStates);
|
||||
} else {
|
||||
const subStates = exclude.map(x => convertSubredditsRawToStrong(x, defaultOpts));
|
||||
const toExclude = (await this.resources.batchTestSubredditCriteria(activities, subStates)).map(x => x.id);
|
||||
activities = activities.filter(x => !toExclude.includes(x.id));
|
||||
}
|
||||
}
|
||||
|
||||
activities = await as.filter(activities, async (activity) => {
|
||||
if (asSubmission(activity) && submissionState !== undefined) {
|
||||
return await this.resources.testItemCriteria(activity, [submissionState]);
|
||||
} else if (commentState !== undefined) {
|
||||
return await this.resources.testItemCriteria(activity, [commentState]);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
let activityTotal = 0;
|
||||
let firstActivity, lastActivity;
|
||||
|
||||
if(activities.length === 0) {
|
||||
this.logger.debug(`No activities retrieved for criteria`);
|
||||
continue;
|
||||
}
|
||||
|
||||
activityTotal = activities.length;
|
||||
firstActivity = activities[0];
|
||||
lastActivity = activities[activities.length - 1];
|
||||
|
||||
const activityTotalWindow = dayjs.duration(dayjs(firstActivity.created_utc * 1000).diff(dayjs(lastActivity.created_utc * 1000)));
|
||||
|
||||
if (activities.length < minActivityCount) {
|
||||
criteriaResults.push({criteria, activityTotal, activityTotalWindow, triggered: false, aggDomains: [], minCountMet: false});
|
||||
this.logger.debug(`${activities.length } activities retrieved was less than min activities required to run criteria (${minActivityCount})`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const realDomains: DomainInfo[] = domains.map(x => {
|
||||
if(x === SUBMISSION_DOMAIN) {
|
||||
if(!(asSubmission(item))) {
|
||||
throw new SimpleError('Cannot run Attribution Rule with the domain SELF:AGG on a Comment');
|
||||
}
|
||||
return getAttributionIdentifier(item, consolidateMediaDomains);
|
||||
}
|
||||
return {display: x, domain: x, aliases: [x]};
|
||||
});
|
||||
const realDomainIdents = realDomains.map(x => x.aliases).flat(1).map(x => x.toLowerCase());
|
||||
|
||||
const submissions: Submission[] = thresholdOn === 'submissions' ? activities as Submission[] : activities.filter(x => isSubmission(x)) as Submission[];
|
||||
const aggregatedSubmissions = submissions.reduce((acc: Map<string, DomainAgg>, sub) => {
|
||||
const domainInfo = getAttributionIdentifier(sub, consolidateMediaDomains)
|
||||
|
||||
let domainType = 'link';
|
||||
if(sub.is_video || ['i.redd.it','v.redd.it'].includes(sub.domain)
|
||||
// @ts-ignore
|
||||
|| sub.gallery_data !== undefined) {
|
||||
domainType = 'redditMedia';
|
||||
} else if(sub.is_self || sub.domain === 'reddit.com') {
|
||||
domainType = 'self';
|
||||
} else if(sub.secure_media !== undefined && sub.secure_media !== null) {
|
||||
domainType = 'media';
|
||||
}
|
||||
|
||||
if(aggregateOn.length !== 0) {
|
||||
if(domainType === 'media' && !aggregateOn.includes('media')) {
|
||||
return acc;
|
||||
}
|
||||
if(domainType === 'redditMedia' && !aggregateOn.includes('redditMedia')) {
|
||||
return acc;
|
||||
}
|
||||
if(domainType === 'self' && !aggregateOn.includes('self')) {
|
||||
return acc;
|
||||
}
|
||||
if(domainType === 'link' && !aggregateOn.includes('link')) {
|
||||
return acc;
|
||||
}
|
||||
}
|
||||
|
||||
if(realDomains.length > 0) {
|
||||
if(domainInfo.aliases.map(x => x.toLowerCase()).some(x => realDomainIdents.includes(x))) {
|
||||
const domainAgg = acc.get(domainInfo.display) || {info: domainInfo, count: 0};
|
||||
acc.set(domainInfo.display, {...domainAgg, count: domainAgg.count + 1});
|
||||
}
|
||||
} else {
|
||||
const domainAgg = acc.get(domainInfo.display) || {info: domainInfo, count: 0};
|
||||
acc.set(domainInfo.display, {...domainAgg, count: domainAgg.count + 1});
|
||||
}
|
||||
|
||||
return acc;
|
||||
}, new Map());
|
||||
|
||||
let aggDomains = [];
|
||||
|
||||
if(domainsCombined) {
|
||||
let combinedCount = 0;
|
||||
let domains = [];
|
||||
let triggered = false;
|
||||
for (const [domain, dAgg] of aggregatedSubmissions) {
|
||||
domains.push(domain);
|
||||
combinedCount += dAgg.count;
|
||||
}
|
||||
if(isPercent) {
|
||||
triggered = comparisonTextOp(combinedCount / activityTotal, operator, (value/100));
|
||||
}
|
||||
else {
|
||||
triggered = comparisonTextOp(combinedCount, operator, value);
|
||||
}
|
||||
const combinedDomain = Array.from(aggregatedSubmissions.values()).map(x => x.info.domain).join(' and ');
|
||||
const combinedDisplay = Array.from(aggregatedSubmissions.values()).map(x => `${x.info.display}${x.info.provider !== undefined ? ` (${x.info.provider})` : ''}`).join(' and ');
|
||||
aggDomains.push({
|
||||
domain: {display: combinedDisplay, domain: combinedDomain, aliases: [combinedDomain]},
|
||||
count: combinedCount,
|
||||
percent: Math.round((combinedCount / activityTotal) * 100),
|
||||
triggered,
|
||||
});
|
||||
|
||||
} else {
|
||||
for (const [domain, dAgg] of aggregatedSubmissions) {
|
||||
let triggered = false;
|
||||
if(isPercent) {
|
||||
triggered = comparisonTextOp(dAgg.count / activityTotal, operator, (value/100));
|
||||
}
|
||||
else {
|
||||
triggered = comparisonTextOp(dAgg.count, operator, value);
|
||||
}
|
||||
|
||||
aggDomains.push({
|
||||
domain: dAgg.info,
|
||||
count: dAgg.count,
|
||||
percent: Math.round((dAgg.count / activityTotal) * 100),
|
||||
triggered,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
criteriaResults.push({criteria, activityTotal, activityTotalWindow, aggDomains, minCountMet: true});
|
||||
}
|
||||
|
||||
let criteriaMeta = false;
|
||||
if (this.criteriaJoin === 'OR') {
|
||||
criteriaMeta = criteriaResults.some(x => x.aggDomains.length > 0 && x.aggDomains.some(y => y.triggered === true));
|
||||
} else {
|
||||
criteriaMeta = criteriaResults.every(x => x.aggDomains.length > 0 && x.aggDomains.some(y => y.triggered === true));
|
||||
}
|
||||
|
||||
let usableCriteria = criteriaResults.filter(x => x.aggDomains.length > 0 && x.aggDomains.some(y => y.triggered === true));
|
||||
if (usableCriteria.length === 0) {
|
||||
usableCriteria = criteriaResults.filter(x => x.aggDomains.length > 0)
|
||||
}
|
||||
// probably none hit min count then
|
||||
if(criteriaResults.every(x => x.minCountMet === false)) {
|
||||
const result = `${FAIL} No criteria had their min activity count met`;
|
||||
this.logger.verbose(result);
|
||||
return Promise.resolve([false, this.getResult(false, {result})]);
|
||||
}
|
||||
|
||||
let result;
|
||||
const refCriteriaResults = usableCriteria.find(x => x !== undefined);
|
||||
if(refCriteriaResults === undefined) {
|
||||
result = `${FAIL} No criteria results found??`;
|
||||
return Promise.resolve([false, this.getResult(false, {result})])
|
||||
}
|
||||
|
||||
const {
|
||||
aggDomains = [],
|
||||
activityTotal,
|
||||
activityTotalWindow,
|
||||
criteria: {threshold, window}
|
||||
} = refCriteriaResults;
|
||||
|
||||
const largestCount = aggDomains.reduce((acc, curr) => Math.max(acc, curr.count), 0);
|
||||
const largestPercent = aggDomains.reduce((acc, curr) => Math.max(acc, curr.percent), 0);
|
||||
const smallestCount = aggDomains.reduce((acc, curr) => Math.min(acc, curr.count), aggDomains[0].count);
|
||||
const smallestPercent = aggDomains.reduce((acc, curr) => Math.min(acc, curr.percent), aggDomains[0].percent);
|
||||
const windowText = typeof window === 'number' ? `${activityTotal} Items` : activityTotalWindow.humanize();
|
||||
const countRange = smallestCount === largestCount ? largestCount : `${smallestCount} - ${largestCount}`
|
||||
const percentRange = formatNumber(smallestPercent, {toFixed: 0}) === formatNumber(largestPercent, {toFixed: 0}) ? `${largestPercent}%` : `${smallestPercent}% - ${largestPercent}%`
|
||||
|
||||
let data: any = {};
|
||||
const resultAgnostic = `met the threshold of ${threshold}, with ${countRange} (${percentRange}) of ${activityTotal} Total -- window: ${windowText}`;
|
||||
|
||||
if(criteriaMeta) {
|
||||
result = `${PASS} ${aggDomains.length} Attribution(s) ${resultAgnostic}`;
|
||||
data = {
|
||||
triggeredDomainCount: aggDomains.length,
|
||||
activityTotal,
|
||||
largestCount,
|
||||
largestPercent: `${largestPercent}%`,
|
||||
smallestCount,
|
||||
smallestPercent: `${smallestPercent}%`,
|
||||
countRange,
|
||||
percentRange,
|
||||
domains: aggDomains.map(x => x.domain.domain),
|
||||
domainsDelim: aggDomains.map(x => x.domain.domain).join(', '),
|
||||
titles: aggDomains.map(x => `${x.domain.display}${x.domain.provider !== undefined ? ` (${x.domain.provider})` :''}`),
|
||||
titlesDelim: aggDomains.map(x => `${x.domain.display}${x.domain.provider !== undefined ? ` (${x.domain.provider})` :''}`).join(', '),
|
||||
threshold: threshold,
|
||||
window: windowText
|
||||
};
|
||||
} else {
|
||||
result = `${FAIL} No Attributions ${resultAgnostic}`;
|
||||
}
|
||||
|
||||
this.logger.verbose(result);
|
||||
return Promise.resolve([criteriaMeta, this.getResult(criteriaMeta, {
|
||||
result,
|
||||
data,
|
||||
})]);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
interface AttributionConfig {
|
||||
|
||||
/**
|
||||
* A list threshold-window values to test attribution against
|
||||
*
|
||||
* If none is provided the default set used is:
|
||||
*
|
||||
* ```
|
||||
* threshold: 10%
|
||||
* window: 100
|
||||
* ```
|
||||
*
|
||||
* @minItems 1
|
||||
* */
|
||||
criteria?: AttributionCriteria[]
|
||||
|
||||
/**
|
||||
* * If `OR` then any set of AttributionCriteria that produce an Attribution over the threshold will trigger the rule.
|
||||
* * If `AND` then all AttributionCriteria sets must product an Attribution over the threshold to trigger the rule.
|
||||
* */
|
||||
criteriaJoin?: 'AND' | 'OR'
|
||||
}
|
||||
|
||||
export interface AttributionOptions extends AttributionConfig, RuleOptions {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Aggregates all of the domain/media accounts attributed to an author's Submission history. If any domain is over the threshold the rule is triggered
|
||||
*
|
||||
* Available data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):
|
||||
*
|
||||
* ```
|
||||
* triggeredDomainCount => Number of domains that met the threshold
|
||||
* activityTotal => Number of Activities considered from window
|
||||
* window => The date range of the Activities considered
|
||||
* largestCount => The count from the largest aggregated domain
|
||||
* largestPercentage => The percentage of Activities the largest aggregated domain comprises
|
||||
* smallestCount => The count from the smallest aggregated domain
|
||||
* smallestPercentage => The percentage of Activities the smallest aggregated domain comprises
|
||||
* countRange => A convenience string displaying "smallestCount - largestCount" or just one number if both are the same
|
||||
* percentRange => A convenience string displaying "smallestPercentage - largestPercentage" or just one percentage if both are the same
|
||||
* domains => An array of all the domain URLs that met the threshold
|
||||
* domainsDelim => A comma-delimited string of all the domain URLs that met the threshold
|
||||
* titles => The friendly-name of the domain if one is present, otherwise the URL (IE youtube.com/c/34ldfa343 => "My Youtube Channel Title")
|
||||
* titlesDelim => A comma-delimited string of all the domain friendly-names
|
||||
* threshold => The threshold you configured for this Rule to trigger
|
||||
* url => Url of the submission that triggered the rule
|
||||
* ```
|
||||
* */
|
||||
export interface AttributionJSONConfig extends AttributionConfig, SubmissionRuleJSONConfig {
|
||||
kind: 'attribution'
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
import {Author, AuthorOptions, AuthorCriteria, Rule, RuleJSONConfig, RuleOptions, RuleResult} from "./index";
|
||||
import {Rule, RuleJSONConfig, RuleOptions, RuleResult} from "./index";
|
||||
import {Comment} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {testAuthorCriteria} from "../Utils/SnoowrapUtils";
|
||||
import {Author, AuthorCriteria} from "../Author/Author";
|
||||
|
||||
/**
|
||||
* Checks the author of the Activity against AuthorCriteria. This differs from a Rule's AuthorOptions as this is a full Rule and will only pass/fail, not skip.
|
||||
@@ -12,11 +12,11 @@ export interface AuthorRuleConfig {
|
||||
/**
|
||||
* Will "pass" if any set of AuthorCriteria passes
|
||||
* */
|
||||
include: AuthorCriteria[];
|
||||
include?: AuthorCriteria[];
|
||||
/**
|
||||
* Only runs if include is not present. Will "pass" if any of set of the AuthorCriteria does not pass
|
||||
* */
|
||||
exclude: AuthorCriteria[];
|
||||
exclude?: AuthorCriteria[];
|
||||
}
|
||||
|
||||
export interface AuthorRuleOptions extends AuthorRuleConfig, RuleOptions {
|
||||
@@ -34,8 +34,13 @@ export class AuthorRule extends Rule {
|
||||
constructor(options: AuthorRuleOptions) {
|
||||
super(options);
|
||||
|
||||
this.include = options.include.map(x => new Author(x));
|
||||
this.exclude = options.exclude.map(x => new Author(x));
|
||||
const {
|
||||
include,
|
||||
exclude,
|
||||
} = options;
|
||||
|
||||
this.include = include !== undefined ? include.map(x => new Author(x)) : [];
|
||||
this.exclude = exclude !== undefined ? exclude.map(x => new Author(x)) : [];
|
||||
|
||||
if(this.include.length === 0 && this.exclude.length === 0) {
|
||||
throw new Error('At least one of the properties [include,exclude] on Author Rule must not be empty');
|
||||
@@ -43,7 +48,7 @@ export class AuthorRule extends Rule {
|
||||
}
|
||||
|
||||
getKind(): string {
|
||||
return "author";
|
||||
return "Author";
|
||||
}
|
||||
|
||||
protected getSpecificPremise(): object {
|
||||
@@ -53,21 +58,21 @@ export class AuthorRule extends Rule {
|
||||
};
|
||||
}
|
||||
|
||||
protected async process(item: Comment | Submission): Promise<[boolean, RuleResult[]]> {
|
||||
protected async process(item: Comment | Submission): Promise<[boolean, RuleResult]> {
|
||||
if (this.include.length > 0) {
|
||||
for (const auth of this.include) {
|
||||
if (await testAuthorCriteria(item, auth)) {
|
||||
return Promise.resolve([true, [this.getResult(true)]]);
|
||||
if (await this.resources.testAuthorCriteria(item, auth)) {
|
||||
return Promise.resolve([true, this.getResult(true)]);
|
||||
}
|
||||
}
|
||||
return Promise.resolve([false, [this.getResult(false)]]);
|
||||
return Promise.resolve([false, this.getResult(false)]);
|
||||
}
|
||||
for (const auth of this.exclude) {
|
||||
if (await testAuthorCriteria(item, auth, false)) {
|
||||
return Promise.resolve([true, [this.getResult(true)]]);
|
||||
if (await this.resources.testAuthorCriteria(item, auth, false)) {
|
||||
return Promise.resolve([true, this.getResult(true)]);
|
||||
}
|
||||
}
|
||||
return Promise.resolve([false, [this.getResult(false)]]);
|
||||
return Promise.resolve([false, this.getResult(false)]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
456
src/Rule/HistoryRule.ts
Normal file
456
src/Rule/HistoryRule.ts
Normal file
@@ -0,0 +1,456 @@
|
||||
|
||||
import {ActivityWindowType, CompareValueOrPercent, SubredditState, ThresholdCriteria} from "../Common/interfaces";
|
||||
import {Rule, RuleJSONConfig, RuleOptions, RuleResult} from "./index";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {getAuthorActivities} from "../Utils/SnoowrapUtils";
|
||||
import dayjs from "dayjs";
|
||||
import {
|
||||
asSubmission,
|
||||
comparisonTextOp,
|
||||
FAIL,
|
||||
formatNumber, getActivitySubredditName, isSubmission,
|
||||
parseGenericValueOrPercentComparison, parseSubredditName,
|
||||
PASS,
|
||||
percentFromString, toStrongSubredditState
|
||||
} from "../util";
|
||||
import {Comment} from "snoowrap";
|
||||
|
||||
export interface CommentThresholdCriteria extends ThresholdCriteria {
|
||||
/**
|
||||
* If `true` then when threshold...
|
||||
*
|
||||
* * is `number` it will be number of comments where author is OP
|
||||
* * is `percent` it will be **percent of total comments where author is OP**
|
||||
* */
|
||||
asOp?: boolean
|
||||
}
|
||||
/**
|
||||
* Criteria will only trigger if ALL present thresholds (comment, submission, total) are met
|
||||
* */
|
||||
export interface HistoryCriteria {
|
||||
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare **filtered** (using `include` or `exclude`, if present) submissions against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign]`
|
||||
*
|
||||
* * EX `> 100` => greater than 100 filtered submissions
|
||||
* * EX `<= 75%` => filtered submissions are equal to or less than 75% of unfiltered Activities
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* */
|
||||
submission?: CompareValueOrPercent
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare **filtered** (using `include` or `exclude`, if present) comments against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign] [OP]`
|
||||
*
|
||||
* * EX `> 100` => greater than 100 comments
|
||||
* * EX `<= 75%` => comments are equal to or less than 75% of unfiltered Activities
|
||||
*
|
||||
* If your string also contains the text `OP` somewhere **after** `<number>[percent sign]`...:
|
||||
*
|
||||
* * EX `> 100 OP` => greater than 100 filtered comments as OP
|
||||
* * EX `<= 25% as OP` => **Filtered** comments as OP were less then or equal to 25% of **unfiltered Comments**
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* */
|
||||
comment?: CompareValueOrPercent
|
||||
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare **filtered** (using `include` or `exclude`) activities against
|
||||
*
|
||||
* **Note:** This is only useful if using `include` or `exclude` otherwise percent will always be 100% and total === activityTotal
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign] [OP]`
|
||||
*
|
||||
* * EX `> 100` => greater than 100 filtered activities
|
||||
* * EX `<= 75%` => filtered activities are equal to or less than 75% of all Activities
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* */
|
||||
total?: CompareValueOrPercent
|
||||
|
||||
window: ActivityWindowType
|
||||
|
||||
/**
|
||||
* The minimum number of **filtered** activities that must exist from the `window` results for this criteria to run
|
||||
* @default 5
|
||||
* */
|
||||
minActivityCount?: number
|
||||
name?: string
|
||||
}
|
||||
|
||||
export class HistoryRule extends Rule {
|
||||
criteria: HistoryCriteria[];
|
||||
condition: 'AND' | 'OR';
|
||||
include: (string | SubredditState)[];
|
||||
exclude: (string | SubredditState)[];
|
||||
activityFilterFunc: (x: Submission|Comment) => Promise<boolean> = async (x) => true;
|
||||
|
||||
constructor(options: HistoryOptions) {
|
||||
super(options);
|
||||
const {
|
||||
criteria,
|
||||
condition = 'OR',
|
||||
include = [],
|
||||
exclude = [],
|
||||
} = options || {};
|
||||
|
||||
this.criteria = criteria;
|
||||
this.condition = condition;
|
||||
if (this.criteria.length === 0) {
|
||||
throw new Error('Must provide at least one HistoryCriteria');
|
||||
}
|
||||
|
||||
this.include = include;
|
||||
this.exclude = exclude;
|
||||
|
||||
if(this.include.length > 0) {
|
||||
const subStates = include.map((x) => {
|
||||
if(typeof x === 'string') {
|
||||
return toStrongSubredditState({name: x, stateDescription: x}, {defaultFlags: 'i', generateDescription: true});
|
||||
}
|
||||
return toStrongSubredditState(x, {defaultFlags: 'i', generateDescription: true});
|
||||
});
|
||||
this.activityFilterFunc = async (x: Submission|Comment) => {
|
||||
for(const ss of subStates) {
|
||||
if(await this.resources.testSubredditCriteria(x, ss)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
} else if(this.exclude.length > 0) {
|
||||
const subStates = exclude.map((x) => {
|
||||
if(typeof x === 'string') {
|
||||
return toStrongSubredditState({name: x, stateDescription: x}, {defaultFlags: 'i', generateDescription: true});
|
||||
}
|
||||
return toStrongSubredditState(x, {defaultFlags: 'i', generateDescription: true});
|
||||
});
|
||||
this.activityFilterFunc = async (x: Submission|Comment) => {
|
||||
for(const ss of subStates) {
|
||||
if(await this.resources.testSubredditCriteria(x, ss)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
getKind(): string {
|
||||
return "History";
|
||||
}
|
||||
|
||||
protected getSpecificPremise(): object {
|
||||
return {
|
||||
criteria: this.criteria,
|
||||
include: this.include,
|
||||
exclude: this.exclude,
|
||||
}
|
||||
}
|
||||
|
||||
protected async process(item: Submission): Promise<[boolean, RuleResult]> {
|
||||
|
||||
let criteriaResults = [];
|
||||
|
||||
for (const criteria of this.criteria) {
|
||||
|
||||
const {comment, window, submission, total, minActivityCount = 5} = criteria;
|
||||
|
||||
let activities = await this.resources.getAuthorActivities(item.author, {window: window});
|
||||
const filteredActivities = [];
|
||||
for(const a of activities) {
|
||||
if(await this.activityFilterFunc(a)) {
|
||||
filteredActivities.push(a);
|
||||
}
|
||||
}
|
||||
|
||||
if (filteredActivities.length < minActivityCount) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const activityTotal = activities.length;
|
||||
const {submissionTotal, commentTotal, opTotal} = activities.reduce((acc, act) => {
|
||||
if(asSubmission(act)) {
|
||||
return {...acc, submissionTotal: acc.submissionTotal + 1};
|
||||
}
|
||||
let a = {...acc, commentTotal: acc.commentTotal + 1};
|
||||
if(act.is_submitter) {
|
||||
a.opTotal = a.opTotal + 1;
|
||||
}
|
||||
return a;
|
||||
},{submissionTotal: 0, commentTotal: 0, opTotal: 0});
|
||||
let fSubmissionTotal = submissionTotal;
|
||||
let fCommentTotal = commentTotal;
|
||||
let fOpTotal = opTotal;
|
||||
if(activities.length !== filteredActivities.length) {
|
||||
const filteredCounts = filteredActivities.reduce((acc, act) => {
|
||||
if(asSubmission(act)) {
|
||||
return {...acc, submissionTotal: acc.submissionTotal + 1};
|
||||
}
|
||||
let a = {...acc, commentTotal: acc.commentTotal + 1};
|
||||
if(act.is_submitter) {
|
||||
a.opTotal = a.opTotal + 1;
|
||||
}
|
||||
return a;
|
||||
},{submissionTotal: 0, commentTotal: 0, opTotal: 0});
|
||||
fSubmissionTotal = filteredCounts.submissionTotal;
|
||||
fCommentTotal = filteredCounts.commentTotal;
|
||||
fOpTotal = filteredCounts.opTotal;
|
||||
}
|
||||
|
||||
let commentTrigger = undefined;
|
||||
if(comment !== undefined) {
|
||||
const {operator, value, isPercent, extra = ''} = parseGenericValueOrPercentComparison(comment);
|
||||
const asOp = extra.toLowerCase().includes('op');
|
||||
if(isPercent) {
|
||||
const per = value / 100;
|
||||
if(asOp) {
|
||||
commentTrigger = comparisonTextOp(fOpTotal / commentTotal, operator, per);
|
||||
} else {
|
||||
commentTrigger = comparisonTextOp(fCommentTotal / activityTotal, operator, per);
|
||||
}
|
||||
} else {
|
||||
if(asOp) {
|
||||
commentTrigger = comparisonTextOp(fOpTotal, operator, value);
|
||||
} else {
|
||||
commentTrigger = comparisonTextOp(fCommentTotal, operator, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let submissionTrigger = undefined;
|
||||
if(submission !== undefined) {
|
||||
const {operator, value, isPercent} = parseGenericValueOrPercentComparison(submission);
|
||||
if(isPercent) {
|
||||
const per = value / 100;
|
||||
submissionTrigger = comparisonTextOp(fSubmissionTotal / activityTotal, operator, per);
|
||||
} else {
|
||||
submissionTrigger = comparisonTextOp(fSubmissionTotal, operator, value);
|
||||
}
|
||||
}
|
||||
|
||||
let totalTrigger = undefined;
|
||||
if(total !== undefined) {
|
||||
const {operator, value, isPercent} = parseGenericValueOrPercentComparison(total);
|
||||
if(isPercent) {
|
||||
const per = value / 100;
|
||||
totalTrigger = comparisonTextOp(filteredActivities.length / activityTotal, operator, per);
|
||||
} else {
|
||||
totalTrigger = comparisonTextOp(filteredActivities.length, operator, value);
|
||||
}
|
||||
}
|
||||
|
||||
const firstActivity = activities[0];
|
||||
const lastActivity = activities[activities.length - 1];
|
||||
|
||||
const activityTotalWindow = activities.length === 0 ? dayjs.duration(0, 's') : dayjs.duration(dayjs(firstActivity.created_utc * 1000).diff(dayjs(lastActivity.created_utc * 1000)));
|
||||
|
||||
criteriaResults.push({
|
||||
criteria,
|
||||
activityTotal,
|
||||
activityTotalWindow,
|
||||
submissionTotal: fSubmissionTotal,
|
||||
commentTotal: fCommentTotal,
|
||||
opTotal: fOpTotal,
|
||||
filteredTotal: filteredActivities.length,
|
||||
submissionTrigger,
|
||||
commentTrigger,
|
||||
totalTrigger,
|
||||
triggered: (submissionTrigger === undefined || submissionTrigger === true) && (commentTrigger === undefined || commentTrigger === true) && (totalTrigger === undefined || totalTrigger === true)
|
||||
});
|
||||
}
|
||||
|
||||
let criteriaMet = false;
|
||||
let failCriteriaResult: string = '';
|
||||
if (this.condition === 'OR') {
|
||||
criteriaMet = criteriaResults.some(x => x.triggered);
|
||||
if(!criteriaMet) {
|
||||
failCriteriaResult = `${FAIL} No criteria was met`;
|
||||
}
|
||||
} else {
|
||||
criteriaMet = criteriaResults.every(x => x.triggered);
|
||||
if(!criteriaMet) {
|
||||
if(criteriaResults.some(x => x.triggered)) {
|
||||
const met = criteriaResults.filter(x => x.triggered);
|
||||
failCriteriaResult = `${FAIL} ${met.length} out of ${criteriaResults.length} criteria met but Rule required all be met. Set log level to debug to see individual results`;
|
||||
const results = criteriaResults.map(x => this.generateResultDataFromCriteria(x, true));
|
||||
this.logger.debug(`\r\n ${results.map(x => x.result).join('\r\n')}`);
|
||||
} else {
|
||||
failCriteriaResult = `${FAIL} No criteria was met`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(criteriaMet) {
|
||||
// use first triggered criteria found
|
||||
const refCriteriaResults = criteriaResults.find(x => x.triggered);
|
||||
const resultData = this.generateResultDataFromCriteria(refCriteriaResults);
|
||||
|
||||
this.logger.verbose(`${PASS} ${resultData.result}`);
|
||||
return Promise.resolve([true, this.getResult(true, resultData)]);
|
||||
}
|
||||
|
||||
return Promise.resolve([false, this.getResult(false, {result: failCriteriaResult})]);
|
||||
}
|
||||
|
||||
protected generateResultDataFromCriteria(results: any, includePassFailSymbols = false) {
|
||||
const {
|
||||
activityTotal,
|
||||
activityTotalWindow,
|
||||
submissionTotal,
|
||||
commentTotal,
|
||||
filteredTotal,
|
||||
opTotal,
|
||||
criteria: {
|
||||
comment,
|
||||
submission,
|
||||
total,
|
||||
window,
|
||||
},
|
||||
criteria,
|
||||
triggered,
|
||||
submissionTrigger,
|
||||
commentTrigger,
|
||||
totalTrigger,
|
||||
} = results;
|
||||
|
||||
const data: any = {
|
||||
activityTotal,
|
||||
submissionTotal,
|
||||
commentTotal,
|
||||
filteredTotal,
|
||||
opTotal,
|
||||
commentPercent: formatNumber((commentTotal/activityTotal)*100),
|
||||
submissionPercent: formatNumber((submissionTotal/activityTotal)*100),
|
||||
opPercent: formatNumber((opTotal/commentTotal)*100),
|
||||
filteredPercent: formatNumber((filteredTotal/activityTotal)*100),
|
||||
criteria,
|
||||
window: typeof window === 'number' || activityTotal === 0 ? `${activityTotal} Items` : activityTotalWindow.humanize(true),
|
||||
triggered,
|
||||
submissionTrigger,
|
||||
commentTrigger,
|
||||
totalTrigger,
|
||||
};
|
||||
|
||||
let thresholdSummary = [];
|
||||
let totalSummary;
|
||||
let submissionSummary;
|
||||
let commentSummary;
|
||||
if(total !== undefined) {
|
||||
const {operator, value, isPercent, displayText} = parseGenericValueOrPercentComparison(total);
|
||||
const suffix = !isPercent ? 'Items' : `(${formatNumber((filteredTotal/activityTotal)*100)}%) of ${activityTotal} Total`;
|
||||
totalSummary = `${includePassFailSymbols ? `${submissionTrigger ? PASS : FAIL} ` : ''}Filtered Activities (${filteredTotal}) were${totalTrigger ? '' : ' not'} ${displayText} ${suffix}`;
|
||||
data.totalSummary = totalSummary;
|
||||
thresholdSummary.push(totalSummary);
|
||||
}
|
||||
if(submission !== undefined) {
|
||||
const {operator, value, isPercent, displayText} = parseGenericValueOrPercentComparison(submission);
|
||||
const suffix = !isPercent ? 'Items' : `(${formatNumber((submissionTotal/activityTotal)*100)}%) of ${activityTotal} Total`;
|
||||
submissionSummary = `${includePassFailSymbols ? `${submissionTrigger ? PASS : FAIL} ` : ''}Submissions (${submissionTotal}) were${submissionTrigger ? '' : ' not'} ${displayText} ${suffix}`;
|
||||
data.submissionSummary = submissionSummary;
|
||||
thresholdSummary.push(submissionSummary);
|
||||
}
|
||||
if(comment !== undefined) {
|
||||
const {operator, value, isPercent, displayText, extra = ''} = parseGenericValueOrPercentComparison(comment);
|
||||
const asOp = extra.toLowerCase().includes('op');
|
||||
const totalType = asOp ? 'Comments' : 'Activities'
|
||||
const countType = asOp ? 'Comments as OP' : 'Comments';
|
||||
const suffix = !isPercent ? 'Items' : `(${asOp ? formatNumber((opTotal/commentTotal)*100) : formatNumber((commentTotal/activityTotal)*100)}%) of ${activityTotal} Total ${totalType}`;
|
||||
commentSummary = `${includePassFailSymbols ? `${commentTrigger ? PASS : FAIL} ` : ''}${countType} (${asOp ? opTotal : commentTotal}) were${commentTrigger ? '' : ' not'} ${displayText} ${suffix}`;
|
||||
data.commentSummary = commentSummary;
|
||||
thresholdSummary.push(commentSummary);
|
||||
}
|
||||
|
||||
data.thresholdSummary = thresholdSummary.join(' and ');
|
||||
|
||||
const result = `${thresholdSummary} (${data.window})`;
|
||||
|
||||
return {result, data};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export default HistoryRule;
|
||||
|
||||
interface HistoryConfig {
|
||||
|
||||
/**
|
||||
* A list threshold-window values to test activities against.
|
||||
*
|
||||
* @minItems 1
|
||||
* */
|
||||
criteria: HistoryCriteria[]
|
||||
|
||||
/**
|
||||
* * If `OR` then any set of Criteria that pass will trigger the Rule
|
||||
* * If `AND` then all Criteria sets must pass to trigger the Rule
|
||||
* */
|
||||
condition?: 'AND' | 'OR'
|
||||
|
||||
/**
|
||||
* If present, activities will be counted only if they are found in this list of Subreddits.
|
||||
*
|
||||
* Each value in the list can be either:
|
||||
*
|
||||
* * string (name of subreddit)
|
||||
* * regular expression to run on the subreddit name
|
||||
* * `SubredditState`
|
||||
*
|
||||
* EX `["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]`
|
||||
*
|
||||
* **Note:** This affects **post-window retrieval** activities. So that:
|
||||
*
|
||||
* * `activityTotal` is number of activities retrieved from `window` -- NOT post-filtering
|
||||
* * all comparisons using **percentages** will compare **post-filtering** results against **activity count from window**
|
||||
* * -- to run this rule where all activities are only from include/exclude filtering instead use include/exclude in `window`
|
||||
*
|
||||
* @examples [["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]]
|
||||
* */
|
||||
include?: (string | SubredditState)[],
|
||||
/**
|
||||
* If present, activities will be counted only if they are **NOT** found in this list of Subreddits
|
||||
*
|
||||
* Each value in the list can be either:
|
||||
*
|
||||
* * string (name of subreddit)
|
||||
* * regular expression to run on the subreddit name
|
||||
* * `SubredditState`
|
||||
*
|
||||
* EX `["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]`
|
||||
*
|
||||
* **Note:** This affects **post-window retrieval** activities. So that:
|
||||
*
|
||||
* * `activityTotal` is number of activities retrieved from `window` -- NOT post-filtering
|
||||
* * all comparisons using **percentages** will compare **post-filtering** results against **activity count from window**
|
||||
* * -- to run this rule where all activities are only from include/exclude filtering instead use include/exclude in `window`
|
||||
*
|
||||
* @examples [["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]]
|
||||
* */
|
||||
exclude?: (string | SubredditState)[],
|
||||
}
|
||||
|
||||
export interface HistoryOptions extends HistoryConfig, RuleOptions {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Aggregates an Author's submission and comment history. Rule can be triggered on count/percent of total (for either or both comment/sub totals) as well as comment OP total.
|
||||
*
|
||||
* Available data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):
|
||||
*
|
||||
* ```
|
||||
* activityTotal => Total number of activities
|
||||
* submissionTotal => Total number of submissions
|
||||
* commentTotal => Total number of comments
|
||||
* opTotal => Total number of comments as OP
|
||||
* thresholdSummary => A text summary of the first Criteria triggered with totals/percentages
|
||||
* criteria => The ThresholdCriteria object
|
||||
* window => A text summary of the range of Activities considered (# of Items if number, time range if Duration)
|
||||
* ```
|
||||
* */
|
||||
export interface HistoryJSONConfig extends HistoryConfig, RuleJSONConfig {
|
||||
kind: 'history'
|
||||
}
|
||||
@@ -1,22 +1,50 @@
|
||||
import {Rule, RuleJSONConfig, RuleOptions, RulePremise, RuleResult} from "./index";
|
||||
import {Comment, VoteableContent} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {getAuthorActivities, getAuthorComments, getAuthorSubmissions} from "../Utils/SnoowrapUtils";
|
||||
import {parseUsableLinkIdentifier} from "../util";
|
||||
import as from 'async';
|
||||
import pMap from 'p-map';
|
||||
// @ts-ignore
|
||||
import subImageMatch from 'matches-subimage';
|
||||
import {
|
||||
activityWindowText,
|
||||
asSubmission, bitsToHexLength,
|
||||
// blockHashImage,
|
||||
compareImages,
|
||||
comparisonTextOp, convertSubredditsRawToStrong,
|
||||
FAIL,
|
||||
formatNumber,
|
||||
getActivitySubredditName, imageCompareMaxConcurrencyGuess,
|
||||
//getImageDataFromUrl,
|
||||
isSubmission,
|
||||
isValidImageURL,
|
||||
objectToStringSummary,
|
||||
parseGenericValueOrPercentComparison,
|
||||
parseStringToRegex,
|
||||
parseSubredditName,
|
||||
parseUsableLinkIdentifier,
|
||||
PASS, sleep,
|
||||
toStrongSubredditState
|
||||
} from "../util";
|
||||
import {
|
||||
ActivityWindow,
|
||||
ActivityWindowCriteria,
|
||||
ActivityWindowType,
|
||||
ReferenceSubmission,
|
||||
SubredditCriteria
|
||||
ActivityWindowType, CommentState,
|
||||
//ImageData,
|
||||
ImageDetection,
|
||||
ReferenceSubmission, StrongImageDetection, StrongSubredditState, SubmissionState,
|
||||
SubredditCriteria, SubredditState
|
||||
} from "../Common/interfaces";
|
||||
import ImageData from "../Common/ImageData";
|
||||
import {blockhash, hammingDistance} from "../Common/blockhash/blockhash";
|
||||
import leven from "leven";
|
||||
|
||||
const parseLink = parseUsableLinkIdentifier();
|
||||
|
||||
export class RecentActivityRule extends Rule {
|
||||
window: ActivityWindowType;
|
||||
thresholds: SubThreshold[];
|
||||
thresholds: ActivityThreshold[];
|
||||
useSubmissionAsReference: boolean;
|
||||
imageDetection: StrongImageDetection
|
||||
lookAt?: 'comments' | 'submissions';
|
||||
|
||||
constructor(options: RecentActivityRuleOptions) {
|
||||
@@ -24,8 +52,47 @@ export class RecentActivityRule extends Rule {
|
||||
const {
|
||||
window = 15,
|
||||
useSubmissionAsReference = true,
|
||||
imageDetection,
|
||||
lookAt,
|
||||
} = options || {};
|
||||
|
||||
const {
|
||||
enable = false,
|
||||
fetchBehavior = 'extension',
|
||||
threshold = 5,
|
||||
hash = {},
|
||||
pixel = {},
|
||||
} = imageDetection || {};
|
||||
|
||||
const {
|
||||
enable: hEnable = true,
|
||||
bits = 16,
|
||||
ttl = 60,
|
||||
hardThreshold = threshold,
|
||||
softThreshold
|
||||
} = hash || {};
|
||||
|
||||
const {
|
||||
enable: pEnable = true,
|
||||
threshold: pThreshold = threshold,
|
||||
} = pixel || {};
|
||||
|
||||
this.imageDetection = {
|
||||
enable,
|
||||
fetchBehavior,
|
||||
threshold,
|
||||
hash: {
|
||||
enable: hEnable,
|
||||
hardThreshold,
|
||||
softThreshold,
|
||||
bits,
|
||||
ttl,
|
||||
},
|
||||
pixel: {
|
||||
enable: pEnable,
|
||||
threshold: pThreshold
|
||||
}
|
||||
};
|
||||
this.lookAt = lookAt;
|
||||
this.useSubmissionAsReference = useSubmissionAsReference;
|
||||
this.window = window;
|
||||
@@ -33,7 +100,7 @@ export class RecentActivityRule extends Rule {
|
||||
}
|
||||
|
||||
getKind(): string {
|
||||
return 'Recent Activity';
|
||||
return 'Recent';
|
||||
}
|
||||
|
||||
getSpecificPremise(): object {
|
||||
@@ -45,95 +112,395 @@ export class RecentActivityRule extends Rule {
|
||||
}
|
||||
}
|
||||
|
||||
async process(item: Submission | Comment): Promise<[boolean, RuleResult[]]> {
|
||||
async process(item: Submission | Comment): Promise<[boolean, RuleResult]> {
|
||||
let activities;
|
||||
|
||||
switch (this.lookAt) {
|
||||
case 'comments':
|
||||
activities = await getAuthorComments(item.author, {window: this.window});
|
||||
activities = await this.resources.getAuthorComments(item.author, {window: this.window});
|
||||
break;
|
||||
case 'submissions':
|
||||
activities = await getAuthorSubmissions(item.author, {window: this.window});
|
||||
activities = await this.resources.getAuthorSubmissions(item.author, {window: this.window});
|
||||
break;
|
||||
default:
|
||||
activities = await getAuthorActivities(item.author, {window: this.window});
|
||||
activities = await this.resources.getAuthorActivities(item.author, {window: this.window});
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
let viableActivity = activities;
|
||||
if (this.useSubmissionAsReference) {
|
||||
if (!(item instanceof Submission)) {
|
||||
this.logger.debug('Cannot use post as reference because triggered item is not a Submission');
|
||||
if (!asSubmission(item)) {
|
||||
this.logger.warn('Cannot use post as reference because triggered item is not a Submission');
|
||||
} else if (item.is_self) {
|
||||
this.logger.debug('Cannot use post as reference because triggered Submission is not a link type');
|
||||
this.logger.warn('Cannot use post as reference because triggered Submission is not a link type');
|
||||
} else {
|
||||
const usableUrl = parseLink(await item.url);
|
||||
viableActivity = viableActivity.filter((x) => {
|
||||
if (!(x instanceof Submission)) {
|
||||
return false;
|
||||
const itemId = item.id;
|
||||
const referenceUrl = await item.url;
|
||||
const usableUrl = parseLink(referenceUrl);
|
||||
let filteredActivity: (Submission|Comment)[] = [];
|
||||
let analysisTimes: number[] = [];
|
||||
let referenceImage: ImageData | undefined;
|
||||
if (this.imageDetection.enable) {
|
||||
try {
|
||||
referenceImage = ImageData.fromSubmission(item);
|
||||
referenceImage.setPreferredResolutionByWidth(800);
|
||||
if(this.imageDetection.hash.enable) {
|
||||
let refHash: string | undefined;
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
refHash = await this.resources.getImageHash(referenceImage);
|
||||
if(refHash === undefined) {
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
await this.resources.setImageHash(referenceImage, refHash, this.imageDetection.hash.ttl);
|
||||
} else if(refHash.length !== bitsToHexLength(this.imageDetection.hash.bits)) {
|
||||
this.logger.warn('Reference image hash length did not correspond to bits specified in config. Recomputing...');
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
await this.resources.setImageHash(referenceImage, refHash, this.imageDetection.hash.ttl);
|
||||
}
|
||||
} else {
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
}
|
||||
}
|
||||
//await referenceImage.sharp();
|
||||
// await referenceImage.hash();
|
||||
// if (referenceImage.preferredResolution !== undefined) {
|
||||
// await (referenceImage.getSimilarResolutionVariant(...referenceImage.preferredResolution) as ImageData).sharp();
|
||||
// }
|
||||
} catch (err) {
|
||||
this.logger.verbose(err.message);
|
||||
}
|
||||
}
|
||||
let longRun;
|
||||
if (referenceImage !== undefined) {
|
||||
const l = this.logger;
|
||||
longRun = setTimeout(() => {
|
||||
l.verbose('FYI: Image processing is causing rule to take longer than normal');
|
||||
}, 2500);
|
||||
}
|
||||
// @ts-ignore
|
||||
const ci = async (x: (Submission|Comment)) => {
|
||||
if (!asSubmission(x) || x.id === itemId) {
|
||||
return null;
|
||||
}
|
||||
if (x.url === undefined) {
|
||||
return false;
|
||||
return null;
|
||||
}
|
||||
return parseLink(x.url) === usableUrl;
|
||||
});
|
||||
}
|
||||
}
|
||||
const groupedActivity = viableActivity.reduce((grouped, activity) => {
|
||||
const s = activity.subreddit.display_name.toLowerCase();
|
||||
grouped[s] = (grouped[s] || []).concat(activity);
|
||||
return grouped;
|
||||
}, {} as Record<string, (Submission | Comment)[]>);
|
||||
const triggeredOn = [];
|
||||
for (const triggerSet of this.thresholds) {
|
||||
const {count: threshold = 1, subreddits = []} = triggerSet;
|
||||
for (const sub of subreddits) {
|
||||
const isub = sub.toLowerCase();
|
||||
const {[isub]: tSub = []} = groupedActivity;
|
||||
if (tSub.length >= threshold) {
|
||||
triggeredOn.push({subreddit: sub, count: tSub.length});
|
||||
if (parseLink(x.url) === usableUrl) {
|
||||
return x;
|
||||
}
|
||||
// only do image detection if regular URL comparison and other conditions fail first
|
||||
// to reduce CPU/bandwidth usage
|
||||
if (referenceImage !== undefined) {
|
||||
try {
|
||||
let imgData = ImageData.fromSubmission(x);
|
||||
imgData.setPreferredResolutionByWidth(800);
|
||||
if(this.imageDetection.hash.enable) {
|
||||
let compareHash: string | undefined;
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
compareHash = await this.resources.getImageHash(imgData);
|
||||
}
|
||||
if(compareHash === undefined)
|
||||
{
|
||||
compareHash = await imgData.hash(this.imageDetection.hash.bits);
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
await this.resources.setImageHash(imgData, compareHash, this.imageDetection.hash.ttl);
|
||||
}
|
||||
}
|
||||
const refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
if(refHash.length !== compareHash.length) {
|
||||
this.logger.debug(`Hash lengths were not the same! Will need to recompute compare hash to match reference.\n\nReference: ${referenceImage.baseUrl} has is ${refHash.length} char long | Comparing: ${imgData.baseUrl} has is ${compareHash} ${compareHash.length} long`);
|
||||
compareHash = await imgData.hash(this.imageDetection.hash.bits)
|
||||
}
|
||||
const distance = leven(refHash, compareHash);
|
||||
const diff = (distance/refHash.length)*100;
|
||||
|
||||
|
||||
// return image if hard is defined and diff is less
|
||||
if(null !== this.imageDetection.hash.hardThreshold && diff <= this.imageDetection.hash.hardThreshold) {
|
||||
return x;
|
||||
}
|
||||
// hard is either not defined or diff was gerater than hard
|
||||
|
||||
// if soft is defined
|
||||
if (this.imageDetection.hash.softThreshold !== undefined) {
|
||||
// and diff is greater than soft allowance
|
||||
if(diff > this.imageDetection.hash.softThreshold) {
|
||||
// not similar enough
|
||||
return null;
|
||||
}
|
||||
// similar enough, will continue on to pixel (if enabled!)
|
||||
} else {
|
||||
// only hard was defined and did not pass
|
||||
return null;
|
||||
}
|
||||
}
|
||||
// at this point either hash was not enabled or it was and we hit soft threshold but not hard
|
||||
if(this.imageDetection.pixel.enable) {
|
||||
try {
|
||||
const [compareResult, sameImage] = await compareImages(referenceImage, imgData, this.imageDetection.pixel.threshold / 100);
|
||||
analysisTimes.push(compareResult.analysisTime);
|
||||
if (sameImage) {
|
||||
return x;
|
||||
}
|
||||
} catch (err) {
|
||||
this.logger.warn(`Unexpected error encountered while pixel-comparing images, will skip comparison => ${err.message}`);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
if(!err.message.includes('did not end with a valid image extension')) {
|
||||
this.logger.warn(`Will not compare image from Submission ${x.id} due to error while parsing image URL => ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (triggeredOn.length > 0) {
|
||||
const friendlyText = triggeredOn.map(x => `${x.subreddit}(${x.count})`).join(', ');
|
||||
const friendly = `Triggered by: ${friendlyText}`;
|
||||
this.logger.debug(friendly);
|
||||
return Promise.resolve([true, [this.getResult(true, {
|
||||
result: friendly,
|
||||
data: {
|
||||
triggeredOn,
|
||||
summary: friendlyText,
|
||||
subCount: triggeredOn.length,
|
||||
totalCount: triggeredOn.reduce((cnt, data) => cnt + data.count, 0)
|
||||
// parallel all the things
|
||||
this.logger.profile('asyncCompare');
|
||||
const results = await pMap(viableActivity, ci, {concurrency: imageCompareMaxConcurrencyGuess});
|
||||
this.logger.profile('asyncCompare', {level: 'debug', message: 'Total time for image comparison (incl download/cache calls)'});
|
||||
const totalAnalysisTime = analysisTimes.reduce((acc, x) => acc + x,0);
|
||||
if(analysisTimes.length > 0) {
|
||||
this.logger.debug(`Reference image pixel-compared ${analysisTimes.length} times. Timings: Avg ${formatNumber(totalAnalysisTime / analysisTimes.length, {toFixed: 0})}ms | Max: ${Math.max(...analysisTimes)}ms | Min: ${Math.min(...analysisTimes)}ms | Total: ${totalAnalysisTime}ms (${formatNumber(totalAnalysisTime/1000)}s)`);
|
||||
}
|
||||
})]]);
|
||||
filteredActivity = filteredActivity.concat(results.filter(x => x !== null));
|
||||
if (longRun !== undefined) {
|
||||
clearTimeout(longRun);
|
||||
}
|
||||
viableActivity = filteredActivity;
|
||||
}
|
||||
}
|
||||
|
||||
return Promise.resolve([false, [this.getResult(false)]]);
|
||||
const summaries = [];
|
||||
let totalTriggeredOn;
|
||||
for (const triggerSet of this.thresholds) {
|
||||
let currCount = 0;
|
||||
const presentSubs: string[] = [];
|
||||
let combinedKarma = 0;
|
||||
const {
|
||||
threshold = '>= 1',
|
||||
subreddits = [],
|
||||
karma: karmaThreshold,
|
||||
commentState,
|
||||
submissionState,
|
||||
} = triggerSet;
|
||||
|
||||
// convert subreddits array into entirely StrongSubredditState
|
||||
const defaultOpts = {
|
||||
defaultFlags: 'i',
|
||||
generateDescription: true
|
||||
};
|
||||
const subStates: StrongSubredditState[] = subreddits.map((x) => convertSubredditsRawToStrong(x, defaultOpts));
|
||||
|
||||
let validActivity: (Comment | Submission)[] = await as.filter(viableActivity, async (activity) => {
|
||||
if (asSubmission(activity) && submissionState !== undefined) {
|
||||
return await this.resources.testItemCriteria(activity, [submissionState]);
|
||||
} else if (commentState !== undefined) {
|
||||
return await this.resources.testItemCriteria(activity, [commentState]);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
validActivity = await this.resources.batchTestSubredditCriteria(validActivity, subStates);
|
||||
for (const activity of validActivity) {
|
||||
currCount++;
|
||||
// @ts-ignore
|
||||
combinedKarma += activity.score;
|
||||
const pSub = getActivitySubredditName(activity);
|
||||
if (!presentSubs.includes(pSub)) {
|
||||
presentSubs.push(pSub);
|
||||
}
|
||||
}
|
||||
|
||||
for (const activity of viableActivity) {
|
||||
if (asSubmission(activity) && submissionState !== undefined) {
|
||||
if (!(await this.resources.testItemCriteria(activity, [submissionState]))) {
|
||||
continue;
|
||||
}
|
||||
} else if (commentState !== undefined) {
|
||||
if (!(await this.resources.testItemCriteria(activity, [commentState]))) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let inSubreddits = false;
|
||||
for (const ss of subStates) {
|
||||
const res = await this.resources.testSubredditCriteria(activity, ss);
|
||||
if (res) {
|
||||
inSubreddits = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (inSubreddits) {
|
||||
currCount++;
|
||||
combinedKarma += activity.score;
|
||||
const pSub = getActivitySubredditName(activity);
|
||||
if (!presentSubs.includes(pSub)) {
|
||||
presentSubs.push(pSub);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const {operator, value, isPercent} = parseGenericValueOrPercentComparison(threshold);
|
||||
let sum = {
|
||||
subsWithActivity: presentSubs,
|
||||
combinedKarma,
|
||||
karmaThreshold,
|
||||
subreddits: subStates.map(x => x.stateDescription),
|
||||
count: currCount,
|
||||
threshold,
|
||||
triggered: false,
|
||||
testValue: currCount.toString()
|
||||
};
|
||||
if (isPercent) {
|
||||
sum.testValue = `${formatNumber((currCount / viableActivity.length) * 100)}%`;
|
||||
if (comparisonTextOp(currCount / viableActivity.length, operator, value / 100)) {
|
||||
sum.triggered = true;
|
||||
totalTriggeredOn = sum;
|
||||
}
|
||||
} else if (comparisonTextOp(currCount, operator, value)) {
|
||||
sum.triggered = true;
|
||||
totalTriggeredOn = sum;
|
||||
}
|
||||
// if we would trigger on threshold need to also test for karma
|
||||
if (totalTriggeredOn !== undefined && karmaThreshold !== undefined) {
|
||||
const {operator: opKarma, value: valueKarma} = parseGenericValueOrPercentComparison(karmaThreshold);
|
||||
if (!comparisonTextOp(combinedKarma, opKarma, valueKarma)) {
|
||||
sum.triggered = false;
|
||||
totalTriggeredOn = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
summaries.push(sum);
|
||||
// if either trigger condition is hit end the iteration early
|
||||
if (totalTriggeredOn !== undefined) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
let result = '';
|
||||
if (totalTriggeredOn !== undefined) {
|
||||
const resultData = this.generateResultData(totalTriggeredOn, viableActivity);
|
||||
result = `${PASS} ${resultData.result}`;
|
||||
this.logger.verbose(result);
|
||||
return Promise.resolve([true, this.getResult(true, resultData)]);
|
||||
} else if (summaries.length === 1) {
|
||||
// can display result if its only one summary otherwise need to log to debug
|
||||
const res = this.generateResultData(summaries[0], viableActivity);
|
||||
result = `${FAIL} ${res.result}`;
|
||||
} else {
|
||||
result = `${FAIL} No criteria was met. Use 'debug' to see individual results`;
|
||||
this.logger.debug(`\r\n ${summaries.map(x => this.generateResultData(x, viableActivity).result).join('\r\n')}`);
|
||||
}
|
||||
|
||||
this.logger.verbose(result);
|
||||
|
||||
return Promise.resolve([false, this.getResult(false, {result})]);
|
||||
}
|
||||
|
||||
generateResultData(summary: any, activities: (Submission | Comment)[] = []) {
|
||||
const {
|
||||
count,
|
||||
testValue,
|
||||
subreddits = [],
|
||||
subsWithActivity = [],
|
||||
threshold,
|
||||
triggered,
|
||||
combinedKarma,
|
||||
karmaThreshold,
|
||||
} = summary;
|
||||
const relevantSubs = subsWithActivity.length === 0 ? subreddits : subsWithActivity;
|
||||
let totalSummary = `${testValue} activities over ${relevantSubs.length} subreddits${karmaThreshold !== undefined ? ` with ${combinedKarma} combined karma` : ''} ${triggered ? 'met' : 'did not meet'} threshold of ${threshold}${karmaThreshold !== undefined ? ` and ${karmaThreshold} combined karma` : ''}`;
|
||||
if (triggered && subsWithActivity.length > 0) {
|
||||
totalSummary = `${totalSummary} -- subreddits: ${subsWithActivity.join(', ')}`;
|
||||
}
|
||||
return {
|
||||
result: totalSummary,
|
||||
data: {
|
||||
window: typeof this.window === 'number' ? `${activities.length} Items` : activityWindowText(activities),
|
||||
summary: totalSummary,
|
||||
subSummary: relevantSubs.join(', '),
|
||||
subCount: relevantSubs.length,
|
||||
totalCount: count,
|
||||
threshold,
|
||||
testValue,
|
||||
karmaThreshold,
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export interface SubThreshold extends SubredditCriteria {
|
||||
/**
|
||||
* At least one count property must be present. If both are present then either can trigger the rule
|
||||
*
|
||||
* @minProperties 1
|
||||
* @additionalProperties false
|
||||
* */
|
||||
export interface ActivityThreshold {
|
||||
/**
|
||||
* The number of activities in each subreddit from the list that will trigger this rule
|
||||
* @default 1
|
||||
* @minimum 1
|
||||
* When present, a Submission will only be counted if it meets this criteria
|
||||
* */
|
||||
count?: number,
|
||||
submissionState?: SubmissionState
|
||||
/**
|
||||
* When present, a Comment will only be counted if it meets this criteria
|
||||
* */
|
||||
commentState?: CommentState
|
||||
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare recent activities against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign]`
|
||||
*
|
||||
* * EX `> 3` => greater than 3 activities found in the listed subreddits
|
||||
* * EX `<= 75%` => number of Activities in the subreddits listed are equal to or less than 75% of all Activities
|
||||
*
|
||||
* **Note:** If you use percentage comparison here as well as `useSubmissionAsReference` then "all Activities" is only pertains to Activities that had the Link of the Submission, rather than all Activities from this window.
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* @default ">= 1"
|
||||
* @examples [">= 1"]
|
||||
* */
|
||||
threshold?: string
|
||||
|
||||
/**
|
||||
* Test the **combined karma** from Activities found in the specified subreddits
|
||||
*
|
||||
* Value is a string containing a comparison operator and a number of **combined karma** to compare against
|
||||
*
|
||||
* If specified then both `threshold` and `karma` must be met for this `SubThreshold` to be satisfied
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>`
|
||||
*
|
||||
* * EX `> 50` => greater than 50 combined karma for all found Activities in specified subreddits
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* */
|
||||
karma?: string
|
||||
|
||||
/**
|
||||
* Activities will be counted if they are found in this list of Subreddits
|
||||
*
|
||||
* Each value in the list can be either:
|
||||
*
|
||||
* * string (name of subreddit)
|
||||
* * regular expression to run on the subreddit name
|
||||
* * `SubredditState`
|
||||
*
|
||||
* EX `["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]`
|
||||
* @examples [["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]]
|
||||
* */
|
||||
subreddits?: (string | SubredditState)[]
|
||||
}
|
||||
|
||||
interface RecentActivityConfig extends ActivityWindow, ReferenceSubmission {
|
||||
/**
|
||||
* If present restricts the activities that are considered for count from SubThreshold
|
||||
* @examples ["submissions","comments"]
|
||||
* */
|
||||
lookAt?: 'comments' | 'submissions',
|
||||
/**
|
||||
* A list of subreddits/count criteria that may trigger this rule. ANY SubThreshold will trigger this rule.
|
||||
* @minItems 1
|
||||
* */
|
||||
thresholds: SubThreshold[],
|
||||
thresholds: ActivityThreshold[],
|
||||
|
||||
imageDetection?: ImageDetection
|
||||
}
|
||||
|
||||
export interface RecentActivityRuleOptions extends RecentActivityConfig, RuleOptions {
|
||||
@@ -142,7 +509,7 @@ export interface RecentActivityRuleOptions extends RecentActivityConfig, RuleOpt
|
||||
/**
|
||||
* Checks a user's history for any Activity (Submission/Comment) in the subreddits specified in thresholds
|
||||
*
|
||||
* Available data for [Action templating](https://github.com/FoxxMD/reddit-context-bot#action-templating):
|
||||
* Available data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):
|
||||
*
|
||||
* ```
|
||||
* summary => comma-deliminated list of subreddits that hit the threshold and their count EX subredditA(1), subredditB(4),...
|
||||
@@ -151,6 +518,9 @@ export interface RecentActivityRuleOptions extends RecentActivityConfig, RuleOpt
|
||||
* ```
|
||||
* */
|
||||
export interface RecentActivityRuleJSONConfig extends RecentActivityConfig, RuleJSONConfig {
|
||||
/**
|
||||
* @examples ["recentActivity"]
|
||||
* */
|
||||
kind: 'recentActivity'
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user