mirror of
https://github.com/FoxxMD/context-mod.git
synced 2026-01-14 07:57:57 -05:00
Compare commits
143 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d6954533a0 | ||
|
|
04b8762926 | ||
|
|
dcc5f87c30 | ||
|
|
66d9c0b2a7 | ||
|
|
00e7cad423 | ||
|
|
bc541d00d4 | ||
|
|
ba53233640 | ||
|
|
ede86d285b | ||
|
|
52f6aabb69 | ||
|
|
18175f3662 | ||
|
|
68a272d305 | ||
|
|
3dac91fafc | ||
|
|
e5bb8c2a38 | ||
|
|
61e0baf3fd | ||
|
|
37e9d1fcc2 | ||
|
|
5e70ca1cb6 | ||
|
|
7f7ed18927 | ||
|
|
efed3381fd | ||
|
|
5ac5d65a28 | ||
|
|
1ac7ad4724 | ||
|
|
0ae74fdce1 | ||
|
|
845173822c | ||
|
|
edb3036957 | ||
|
|
3790f0e061 | ||
|
|
e3e4e4abff | ||
|
|
fd9b83437b | ||
|
|
05694f115c | ||
|
|
70ee157198 | ||
|
|
bbb4ec3c2d | ||
|
|
acb72551ec | ||
|
|
bf6affe592 | ||
|
|
8c2cb02a46 | ||
|
|
73e2af2100 | ||
|
|
ba4c4af5a7 | ||
|
|
9ad21ee2dd | ||
|
|
b32c4f213c | ||
|
|
7e01c8d1f8 | ||
|
|
aee158ecc9 | ||
|
|
8cd2243c2d | ||
|
|
4969789532 | ||
|
|
1dcfdc14d1 | ||
|
|
f1c9b64f64 | ||
|
|
2e5a61566b | ||
|
|
85761fa662 | ||
|
|
0b1a6bd77b | ||
|
|
51e299ca99 | ||
|
|
7696f3c2ff | ||
|
|
1c9ed41e70 | ||
|
|
2d67f9f57d | ||
|
|
975bcb6ad7 | ||
|
|
2a282a0d6f | ||
|
|
0d087521a7 | ||
|
|
fb5fc961cc | ||
|
|
c04b305881 | ||
|
|
5c5e9a26aa | ||
|
|
477d1a10ae | ||
|
|
bbee92699c | ||
|
|
7f09043cdf | ||
|
|
768a199c40 | ||
|
|
6e4b0c7719 | ||
|
|
89b21e6073 | ||
|
|
da611c5894 | ||
|
|
2c90a260c0 | ||
|
|
f081598da6 | ||
|
|
55f45163a4 | ||
|
|
e4dfa9dde3 | ||
|
|
0e395792db | ||
|
|
dcbeb784e8 | ||
|
|
aeaeb6ce27 | ||
|
|
d6a29c5914 | ||
|
|
c1224121d4 | ||
|
|
9790e681ea | ||
|
|
a48a850c98 | ||
|
|
b8369a9e9f | ||
|
|
0c31bdf25e | ||
|
|
4b14e581dd | ||
|
|
b2846efd2b | ||
|
|
a787e4515b | ||
|
|
f63e2a0ec4 | ||
|
|
9d0e098db1 | ||
|
|
181390f0eb | ||
|
|
a8c7b1dac9 | ||
|
|
fd5a92758d | ||
|
|
027199d788 | ||
|
|
2a9f01b928 | ||
|
|
cf54502f0d | ||
|
|
2a3663ccc9 | ||
|
|
dc2eeffcb5 | ||
|
|
39daa11f2d | ||
|
|
93de38a845 | ||
|
|
43caaca1f2 | ||
|
|
7bcc0195fe | ||
|
|
dac6541e28 | ||
|
|
2504a34a34 | ||
|
|
e19639ad0d | ||
|
|
b8084e02b5 | ||
|
|
97906281e6 | ||
|
|
2cea119657 | ||
|
|
6f16d289dd | ||
|
|
a96575c6b3 | ||
|
|
0a82e83352 | ||
|
|
d5e1cdec61 | ||
|
|
ef40c25b09 | ||
|
|
6370a2976a | ||
|
|
d8180299ea | ||
|
|
ac409dce3d | ||
|
|
56c007c20d | ||
|
|
487f13f704 | ||
|
|
00b9d87cdc | ||
|
|
2c797e0b9b | ||
|
|
4a2b27bfbf | ||
|
|
463a4dc0eb | ||
|
|
4b3bea661d | ||
|
|
976f310f51 | ||
|
|
4d8d3dc266 | ||
|
|
ce9e678c4c | ||
|
|
8cf30b6b7d | ||
|
|
2b6d08f8a5 | ||
|
|
f8fc63991f | ||
|
|
d96a1f677c | ||
|
|
b14689791c | ||
|
|
b70c877e44 | ||
|
|
041655376a | ||
|
|
e1eab7696b | ||
|
|
65d1d36d53 | ||
|
|
120d776fc2 | ||
|
|
425e16295b | ||
|
|
dd7e9d72cc | ||
|
|
55535ddd62 | ||
|
|
631e21452c | ||
|
|
be6fa4dd50 | ||
|
|
0d7a82836f | ||
|
|
d9a59b6824 | ||
|
|
ddbf8c3189 | ||
|
|
8393c471b2 | ||
|
|
fe66a2e8f7 | ||
|
|
4b0284102d | ||
|
|
95529f14a8 | ||
|
|
26af2c4e4d | ||
|
|
044c293f34 | ||
|
|
a082c9e593 | ||
|
|
4f3685a1f5 | ||
|
|
e242c36c09 |
@@ -1,8 +1,8 @@
|
||||
node_modules
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
.gitignore
|
||||
.git
|
||||
src/logs
|
||||
/docs
|
||||
.github
|
||||
/docs/
|
||||
/node_modules/
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -381,4 +381,5 @@ dist
|
||||
.pnp.*
|
||||
|
||||
**/src/**/*.js
|
||||
!src/Web/assets/public/yaml/*
|
||||
**/src/**/*.map
|
||||
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,13 +1,17 @@
|
||||
FROM node:16-alpine3.12
|
||||
FROM node:16-alpine3.14 as base
|
||||
|
||||
ENV TZ=Etc/GMT
|
||||
|
||||
RUN apk update
|
||||
# vips required to run sharp library for image comparison
|
||||
RUN echo "http://dl-4.alpinelinux.org/alpine/v3.14/community" >> /etc/apk/repositories \
|
||||
&& apk --no-cache add vips
|
||||
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
WORKDIR /usr/app
|
||||
|
||||
FROM base as build
|
||||
|
||||
COPY package*.json ./
|
||||
COPY tsconfig.json .
|
||||
|
||||
@@ -15,7 +19,13 @@ RUN npm install
|
||||
|
||||
ADD . /usr/app
|
||||
|
||||
RUN npm run build
|
||||
RUN npm run build && rm -rf node_modules
|
||||
|
||||
FROM base as app
|
||||
|
||||
COPY --from=build /usr/app /usr/app
|
||||
|
||||
RUN npm install --production
|
||||
|
||||
ENV NPM_CONFIG_LOGLEVEL debug
|
||||
|
||||
|
||||
10
README.md
10
README.md
@@ -19,13 +19,15 @@ Some feature highlights:
|
||||
* Default/no configuration runs "All In One" behavior
|
||||
* Additional configuration allows web interface to connect to multiple servers
|
||||
* Each server instance can run multiple reddit accounts as bots
|
||||
* **Per-subreddit configuration** is handled by JSON stored in the subreddit wiki
|
||||
* Any text-based actions (comment, submission, message, usernotes, ban, etc...) can be configured via a wiki page or raw text in JSON and support [mustache](https://mustache.github.io) [templating](/docs/actionTemplating.md)
|
||||
* **Per-subreddit configuration** is handled by YAML (**like automoderator!**) or JSON stored in the subreddit wiki
|
||||
* Any text-based actions (comment, submission, message, usernotes, ban, etc...) can be configured via a wiki page or raw text and supports [mustache](https://mustache.github.io) [templating](/docs/actionTemplating.md)
|
||||
* History-based rules support multiple "valid window" types -- [ISO 8601 Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations), [Day.js Durations](https://day.js.org/docs/en/durations/creating), and submission/comment count limits.
|
||||
* Support Activity skipping based on:
|
||||
* Author criteria (name, css flair/text, age, karma, moderator status, and [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes))
|
||||
* Activity state (removed, locked, distinguished, etc.)
|
||||
* Rules and Actions support named references (write once, reference anywhere)
|
||||
* [**Image Comparisons**](/docs/imageComparison.md) via fingerprinting and/or pixel differences
|
||||
* [**Repost detection**](/docs/examples/repost) with support for external services (youtube, etc...)
|
||||
* Global/subreddit-level **API caching**
|
||||
* Support for [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes) as criteria or Actions (writing notes)
|
||||
* Docker container support
|
||||
@@ -83,7 +85,7 @@ See the [Moderator's Getting Started Guide](/docs/gettingStartedMod.md)
|
||||
|
||||
## Configuration and Documentation
|
||||
|
||||
Context Bot's configuration can be written in JSON, [JSON5](https://json5.org/) or YAML. Its schema conforms to [JSON Schema Draft 7](https://json-schema.org/). Additionally, many **operator** settings can be passed via command line or environmental variables.
|
||||
Context Bot's configuration can be written in YAML (like automoderator) or [JSON5](https://json5.org/). Its schema conforms to [JSON Schema Draft 7](https://json-schema.org/). Additionally, many **operator** settings can be passed via command line or environmental variables.
|
||||
|
||||
* For **operators** (running the bot instance) see the [Operator Configuration](/docs/operatorConfiguration.md) guide
|
||||
* For **moderators** consult the [app schema and examples folder](/docs/#configuration-and-usage)
|
||||
@@ -124,7 +126,7 @@ Moderator view/invite and authorization:
|
||||
|
||||
A built-in editor using [monaco-editor](https://microsoft.github.io/monaco-editor/) makes editing configurations easy:
|
||||
|
||||
* Automatic JSON syntax validation and formatting
|
||||
* Automatic JSON or YAML syntax validation and formatting
|
||||
* Automatic Schema (subreddit or operator) validation
|
||||
* All properties are annotated via hover popups
|
||||
* Unauthenticated view via `yourdomain.com/config`
|
||||
|
||||
14
app.json
14
app.json
@@ -17,12 +17,22 @@
|
||||
"REFRESH_TOKEN": {
|
||||
"description": "Refresh token retrieved from authenticating an account with your Reddit Application",
|
||||
"value": "",
|
||||
"required": true
|
||||
"required": false
|
||||
},
|
||||
"ACCESS_TOKEN": {
|
||||
"description": "Access token retrieved from authenticating an account with your Reddit Application",
|
||||
"value": "",
|
||||
"required": true
|
||||
"required": false
|
||||
},
|
||||
"REDIRECT_URI": {
|
||||
"description": "Redirect URI you specified when creating your Reddit Application. Required if you want to use the web interface. In the provided example replace 'your-heroku-app-name' with the name of your HEROKU app.",
|
||||
"value": "https://your-heroku-6app-name.herokuapp.com/callback",
|
||||
"required": false
|
||||
},
|
||||
"OPERATOR": {
|
||||
"description": "Your reddit username WITHOUT any prefixes EXAMPLE /u/FoxxMD => FoxxMD. Specified user will be recognized as an admin.",
|
||||
"value": "",
|
||||
"required": false
|
||||
},
|
||||
"WIKI_CONFIG": {
|
||||
"description": "Relative url to contextbot wiki page EX https://reddit.com/r/subreddit/wiki/<path>",
|
||||
|
||||
@@ -62,6 +62,6 @@ commit_parsers = [
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
filter_commits = false
|
||||
# glob pattern for matching git tags
|
||||
tag_pattern = "v[0-9]*"
|
||||
tag_pattern = "[0-9]*"
|
||||
# regex for skipping tags
|
||||
skip_tags = "v0.1.0-beta.1"
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
* [Activities `window`](#activities-window)
|
||||
* [Comparisons](#thresholds-and-comparisons)
|
||||
* [Activity Templating](/docs/actionTemplating.md)
|
||||
* [Image Comparisons](#image-comparisons)
|
||||
* [Best Practices](#best-practices)
|
||||
* [Named Rules](#named-rules)
|
||||
* [Rule Order](#rule-order)
|
||||
@@ -101,6 +102,7 @@ Find detailed descriptions of all the Rules, with examples, below:
|
||||
* [History](/docs/examples/history)
|
||||
* [Author](/docs/examples/author)
|
||||
* [Regex](/docs/examples/regex)
|
||||
* [Repost](/docs/examples/repost)
|
||||
|
||||
### Rule Set
|
||||
|
||||
@@ -118,6 +120,15 @@ It consists of:
|
||||
* **rules** -- The **Rules** for the Rule Set.
|
||||
|
||||
Example
|
||||
|
||||
YAML
|
||||
```yaml
|
||||
condition: AND
|
||||
# rules are an array
|
||||
rules:
|
||||
- aRule
|
||||
```
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"condition": "AND",
|
||||
@@ -268,6 +279,12 @@ The duration value compares a time range from **now** to `duration value` time i
|
||||
|
||||
Refer to [duration values in activity window documentation](/docs/activitiesWindow.md#duration-values) as well as the individual rule/criteria schema to see what this duration is comparing against.
|
||||
|
||||
### Image Comparisons
|
||||
|
||||
ContextMod implements two methods for comparing **image content**, perceptual hashing and pixel-to-pixel comparisons. Comparisons can be used to filter activities in some activities.
|
||||
|
||||
See [image comparison documentation](/docs/imageComparison.md) for a full reference.
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Named Rules
|
||||
|
||||
@@ -17,7 +17,28 @@ Examples of all of the above
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
# count, last 100 activities
|
||||
window: 100
|
||||
|
||||
# duration, last 10 days
|
||||
window: 10 days
|
||||
|
||||
# duration object, last 2 months and 5 days
|
||||
window:
|
||||
months: 2
|
||||
days: 5
|
||||
|
||||
# iso 8601 string, last 15 minutes
|
||||
window: PT15M
|
||||
|
||||
# ActivityWindowCriteria, last 100 activities or 6 weeks of activities (whichever is found first)
|
||||
window:
|
||||
count: 100
|
||||
duration: 6 weeks
|
||||
```
|
||||
|
||||
```json5
|
||||
// count, last 100 activities
|
||||
{
|
||||
"window": 100
|
||||
@@ -49,6 +70,7 @@ Examples of all of the above
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Types of Ranges
|
||||
@@ -95,6 +117,7 @@ If you need to specify multiple units of time for your duration you can instead
|
||||
|
||||
Example
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"days": 4,
|
||||
@@ -102,6 +125,13 @@ Example
|
||||
"minutes": 20
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
days: 4
|
||||
hours: 6
|
||||
minutes: 20
|
||||
```
|
||||
|
||||
##### An ISO 8601 duration string
|
||||
|
||||
@@ -119,6 +149,7 @@ This is an object that lets you specify more granular conditions for your range.
|
||||
|
||||
The full object looks like this:
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
@@ -130,6 +161,19 @@ The full object looks like this:
|
||||
}
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 100
|
||||
duration: 10 days
|
||||
satisfyOn: any
|
||||
subreddits:
|
||||
include:
|
||||
- mealtimevideos
|
||||
- pooptimevideos
|
||||
exclude:
|
||||
- videos
|
||||
```
|
||||
|
||||
### Specifying Range
|
||||
|
||||
@@ -142,7 +186,9 @@ If both range properties are specified then the value `satisfyOn` determines how
|
||||
|
||||
If **any** then Activities will be retrieved until one of the range properties is met, **whichever occurs first.**
|
||||
|
||||
Example
|
||||
Example
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 80,
|
||||
@@ -150,6 +196,13 @@ Example
|
||||
"satisfyOn": "any"
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 80
|
||||
duration: 90 days
|
||||
satisfyOn: any
|
||||
```
|
||||
Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
|
||||
|
||||
* If 90 days of activities returns only 40 activities => returns 40 activities
|
||||
@@ -160,6 +213,8 @@ Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
|
||||
If **all** then both ranges must be satisfied. Effectively, whichever range produces the most Activities will be the one that is used.
|
||||
|
||||
Example
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
@@ -167,6 +222,13 @@ Example
|
||||
"satisfyOn": "all"
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 100
|
||||
duration: 90 days
|
||||
satisfyOn: all
|
||||
```
|
||||
Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
|
||||
|
||||
* If at 90 days of activities => 40 activities retrieved
|
||||
@@ -187,6 +249,8 @@ You may filter retrieved Activities using an array of subreddits.
|
||||
Use **include** to specify which subreddits should be included from results
|
||||
|
||||
Example where only activities from /r/mealtimevideos and /r/modsupport will be returned
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
@@ -196,7 +260,17 @@ Example where only activities from /r/mealtimevideos and /r/modsupport will be r
|
||||
"include": ["mealtimevideos","modsupport"]
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 100
|
||||
duruation: 90 days
|
||||
satisfyOn: any
|
||||
subreddits:
|
||||
include:
|
||||
- mealtimevideos
|
||||
- modsupport
|
||||
```
|
||||
|
||||
#### Exclude
|
||||
@@ -204,6 +278,8 @@ Example where only activities from /r/mealtimevideos and /r/modsupport will be r
|
||||
Use **exclude** to specify which subreddits should NOT be in the results
|
||||
|
||||
Example where activities from /r/mealtimevideos and /r/modsupport will not be returned in results
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
@@ -214,4 +290,15 @@ Example where activities from /r/mealtimevideos and /r/modsupport will not be re
|
||||
}
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 100
|
||||
duruation: 90 days
|
||||
satisfyOn: any
|
||||
subreddits:
|
||||
exclude:
|
||||
- mealtimevideos
|
||||
- modsupport
|
||||
```
|
||||
**Note:** `exclude` will be ignored if `include` is also present.
|
||||
|
||||
@@ -17,6 +17,8 @@ This directory contains example of valid, ready-to-go configurations for Context
|
||||
* [History](/docs/examples/history)
|
||||
* [Author](/docs/examples/author)
|
||||
* [Regex](/docs/examples/regex)
|
||||
* [Repost](/docs/examples/repost)
|
||||
* [Author and post flairs](/docs/examples/onlyfansFlair)
|
||||
* [Toolbox User Notes](/docs/examples/userNotes)
|
||||
* [Advanced Concepts](/docs/examples/advancedConcepts)
|
||||
* [Rule Sets](/docs/examples/advancedConcepts/ruleSets.json5)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
### Named Rules
|
||||
|
||||
See [ruleNameReuse.json5](/docs/examples/advancedConcepts/ruleNameReuse.json5)
|
||||
See **Rule Name Reuse Examples [YAML](/docs/examples/advancedConcepts/ruleNameReuse.yaml) | [JSON](/docs/examples/advancedConcepts/ruleNameReuse.json5)**
|
||||
|
||||
### Check Order
|
||||
|
||||
@@ -23,7 +23,7 @@ The `rules` array on a `Checks` can contain both `Rule` objects and `RuleSet` ob
|
||||
|
||||
A **Rule Set** is a "nested" set of `Rule` objects with a passing condition specified. These allow you to create more complex trigger behavior by combining multiple rules.
|
||||
|
||||
See **[ruleSets.json5](/docs/examples/advancedConcepts/ruleSets.json5)** for a complete example as well as consulting the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRuleSetJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json).
|
||||
See **ruleSets [YAML](/docs/examples/advancedConcepts/ruleSets.yaml) | [JSON](/docs/examples/advancedConcepts/ruleSets.json5)** for a complete example as well as consulting the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRuleSetJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json).
|
||||
|
||||
### Rule Order
|
||||
|
||||
|
||||
52
docs/examples/advancedConcepts/ruleNameReuse.yaml
Normal file
52
docs/examples/advancedConcepts/ruleNameReuse.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
checks:
|
||||
- name: Auto Remove SP Karma
|
||||
description: >-
|
||||
Remove submission because author has self-promo >10% and posted in karma
|
||||
subs recently
|
||||
kind: submission
|
||||
rules:
|
||||
# named rules can be referenced at any point in the configuration (where they occur does not matter)
|
||||
# and can be used in any Check
|
||||
# Note: rules do not transfer between subreddit configurations
|
||||
- freekarmasub
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
actions:
|
||||
- kind: remove
|
||||
- kind: comment
|
||||
content: >-
|
||||
Your submission was removed because you are over reddit's threshold
|
||||
for self-promotion and recently posted this content in a karma sub
|
||||
- name: Free Karma On Submission Alert
|
||||
description: Check if author has posted this submission in 'freekarma' subreddits
|
||||
kind: submission
|
||||
rules:
|
||||
# rules can be re-used throughout a configuration by referencing them by name
|
||||
#
|
||||
# The rule name itself can only contain spaces, hyphens and underscores
|
||||
# The value used to reference it will have all of these removed, and lower-cased
|
||||
#
|
||||
# so to reference this rule use the value 'freekarmasub'
|
||||
- name: Free_Karma-SUB
|
||||
kind: recentActivity
|
||||
lookAt: submissions
|
||||
useSubmissionAsReference: true
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- upvote
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Submission posted {{rules.freekarmasub.totalCount}} times in karma
|
||||
{{rules.freekarmasub.subCount}} subs over
|
||||
{{rules.freekarmasub.window}}: {{rules.freekarmasub.subSummary}}
|
||||
53
docs/examples/advancedConcepts/ruleSets.yaml
Normal file
53
docs/examples/advancedConcepts/ruleSets.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
checks:
|
||||
- name: Self Promo All or low comment
|
||||
description: >-
|
||||
SP >10% of all activities or >10% of submissions with low comment
|
||||
engagement
|
||||
kind: submission
|
||||
rules:
|
||||
# this attribution rule is looking at all activities
|
||||
#
|
||||
# we want want this one rule to trigger the check because >10% of all activity (submission AND comments) is a good requirement
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
# this is a RULE SET
|
||||
#
|
||||
# it is made up of "nested" rules with a pass condition (AND/OR)
|
||||
# if the nested rules pass the condition then the Rule Set triggers the Check
|
||||
#
|
||||
# AND = all nested rules must be triggered to make the Rule Set trigger
|
||||
# AND = any of the nested Rules will be the Rule Set trigger
|
||||
- condition: AND
|
||||
# in this check we use an Attribution >10% on ONLY submissions, which is a lower requirement then the above attribution rule
|
||||
# and combine it with a History rule looking for low comment engagement
|
||||
# to make a "higher" requirement Rule Set our of two low requirement Rules
|
||||
rules:
|
||||
- name: attr20sub
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
thresholdOn: submissions
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
thresholdOn: submissions
|
||||
window: 100
|
||||
lookAt: media
|
||||
- name: lowOrOpComm
|
||||
kind: history
|
||||
criteriaJoin: OR
|
||||
criteria:
|
||||
- window: 90 days
|
||||
comment: < 50%
|
||||
- window: 90 days
|
||||
comment: '> 40% OP'
|
||||
actions:
|
||||
- kind: remove
|
||||
- kind: comment
|
||||
content: >-
|
||||
Your submission was removed because you are over reddit's threshold
|
||||
for self-promotion or exhibit low comment engagement
|
||||
@@ -10,5 +10,5 @@ Consult the [schema](https://json-schema.app/view/%23/%23%2Fdefinitions%2FCheckJ
|
||||
|
||||
### Examples
|
||||
|
||||
* [Self Promotion as percentage of all Activities](/docs/examples/attribution/redditSelfPromoAll.json5) - Check if Author is submitting much more than they comment.
|
||||
* [Self Promotion as percentage of Submissions](/docs/examplesm/attribution/redditSelfPromoSubmissionsOnly.json5) - Check if any of Author's aggregated submission origins are >10% of their submissions
|
||||
* Self Promotion as percentage of all Activities [YAML](/docs/examples/attribution/redditSelfPromoAll.yaml) | [JSON](/docs/examples/attribution/redditSelfPromoAll.json5) - Check if Author is submitting much more than they comment.
|
||||
* Self Promotion as percentage of Submissions [YAML](/docs/examples/attribution/redditSelfPromoSubmissionsOnly.yaml) | [JSON](/docs/examplesm/attribution/redditSelfPromoSubmissionsOnly.json5) - Check if any of Author's aggregated submission origins are >10% of their submissions
|
||||
|
||||
27
docs/examples/attribution/redditSelfPromoAll.yaml
Normal file
27
docs/examples/attribution/redditSelfPromoAll.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
checks:
|
||||
- name: Self Promo Activities
|
||||
description: >-
|
||||
Check if any of Author's aggregated submission origins are >10% of entire
|
||||
history
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
# criteria defaults to OR -- so either of these criteria will trigger the rule
|
||||
criteria:
|
||||
- threshold: '> 10%' # threshold can be a percent or an absolute number
|
||||
# The default is "all" -- calculate percentage of entire history (submissions & comments)
|
||||
#thresholdOn: all
|
||||
#
|
||||
# look at last 90 days of Author's activities (comments and submissions)
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
# look at Author's last 100 activities (comments and submissions)
|
||||
window: 100
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
{{rules.attr10all.largestPercent}}% of
|
||||
{{rules.attr10all.activityTotal}} items over
|
||||
{{rules.attr10all.window}}
|
||||
24
docs/examples/attribution/redditSelfPromoSubmissionOnly.yaml
Normal file
24
docs/examples/attribution/redditSelfPromoSubmissionOnly.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
checks:
|
||||
- name: Self Promo Submissions
|
||||
description: >-
|
||||
Check if any of Author's aggregated submission origins are >10% of their
|
||||
submissions
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: attr10sub
|
||||
kind: attribution
|
||||
# criteria defaults to OR -- so either of these criteria will trigger the rule
|
||||
criteria:
|
||||
- threshold: '> 10%' # threshold can be a percent or an absolute number
|
||||
thresholdOn: submissions # calculate percentage of submissions, rather than entire history (submissions & comments)
|
||||
window: 90 days # look at last 90 days of Author's activities (comments and submissions)
|
||||
- threshold: '> 10%'
|
||||
thresholdOn: submissions
|
||||
window: 100 # look at Author's last 100 activities (comments and submissions)
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
{{rules.attr10sub.largestPercent}}% of
|
||||
{{rules.attr10sub.activityTotal}} items over
|
||||
{{rules.attr10sub.window}}
|
||||
@@ -18,10 +18,10 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorRule
|
||||
### Examples
|
||||
|
||||
* Basic examples
|
||||
* [Flair new user Submission](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does not have the `vet` flair then flair the Submission with `New User`
|
||||
* [Flair vetted user Submission](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does have the `vet` flair then flair the Submission with `Vetted`
|
||||
* Flair new user Submission [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does not have the `vet` flair then flair the Submission with `New User`
|
||||
* Flair vetted user Submission [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does have the `vet` flair then flair the Submission with `Vetted`
|
||||
* Used with other Rules
|
||||
* [Ignore vetted user](/docs/examples/author/flairNewUserSubmission.json5) - Short-circuit the Check if the Author has the `vet` flair
|
||||
* Ignore vetted user [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - Short-circuit the Check if the Author has the `vet` flair
|
||||
|
||||
## Filter
|
||||
|
||||
@@ -35,4 +35,4 @@ All **Rules** and **Checks** have an optional `authorIs` property that takes an
|
||||
|
||||
### Examples
|
||||
|
||||
* [Skip recent activity check based on author](/docs/examples/author/authorFilter.json5) - Skip a Recent Activity check for a set of subreddits if the Author of the Submission has any set of flairs.
|
||||
* Skip recent activity check based on author [YAML](/docs/examples/author/authorFilter.yaml) | [JSON](/docs/examples/author/authorFilter.json5) - Skip a Recent Activity check for a set of subreddits if the Author of the Submission has any set of flairs.
|
||||
|
||||
48
docs/examples/author/authorFilter.yaml
Normal file
48
docs/examples/author/authorFilter.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
checks:
|
||||
- name: Karma/Meme Sub Activity
|
||||
description: Report on karma sub activity or meme sub activity if user isn't a memelord
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
lookAt: submissions
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
window: 7 days
|
||||
- name: noobmemer
|
||||
kind: recentActivity
|
||||
# authors filter will be checked before a rule is run. If anything passes then the Rule is skipped -- it is not failed or triggered.
|
||||
# if *all* Rules for a Check are skipped due to authors filter then the Check will fail
|
||||
authorIs:
|
||||
# each property (include/exclude) can contain multiple AuthorCriteria
|
||||
# if any AuthorCriteria passes its test the Rule is skipped
|
||||
#
|
||||
# for an AuthorCriteria to pass all properties present on it must pass
|
||||
#
|
||||
# if include is present it will always run and exclude will be skipped
|
||||
#-include:
|
||||
exclude:
|
||||
# for this to pass the Author of the Submission must not have the flair "Supreme Memer" and have the name "user1" or "user2"
|
||||
- flairText:
|
||||
- Supreme Memer
|
||||
names:
|
||||
- user1
|
||||
- user2
|
||||
# for this to pass the Author of the Submission must not have the flair "Decent Memer"
|
||||
- flairText:
|
||||
- Decent Memer
|
||||
lookAt: submissions
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- dankmemes
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Author has posted in free karma sub, or in /r/dankmemes and does not
|
||||
have meme flair in this subreddit
|
||||
16
docs/examples/author/flairNewUserSubmission.yaml
Normal file
16
docs/examples/author/flairNewUserSubmission.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
checks:
|
||||
- name: Flair New User Sub
|
||||
description: Flair submission as sketchy if user does not have vet flair
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: newflair
|
||||
kind: author
|
||||
# rule will trigger if Author does not have "vet" flair text
|
||||
exclude:
|
||||
- flairText:
|
||||
- vet
|
||||
actions:
|
||||
- kind: flair
|
||||
text: New User
|
||||
css: orange
|
||||
16
docs/examples/author/flairVettedUserSubmission.yaml
Normal file
16
docs/examples/author/flairVettedUserSubmission.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
checks:
|
||||
- name: Flair Vetted User Submission
|
||||
description: Flair submission as Approved if user has vet flair
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: newflair
|
||||
kind: author
|
||||
# rule will trigger if Author has "vet" flair text
|
||||
include:
|
||||
- flairText:
|
||||
- vet
|
||||
actions:
|
||||
- kind: flair
|
||||
text: Vetted
|
||||
css: green
|
||||
45
docs/examples/author/ignoreVettedUser.yaml
Normal file
45
docs/examples/author/ignoreVettedUser.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
checks:
|
||||
- name: non-vetted karma/meme activity
|
||||
description: >-
|
||||
Report if Author has SP and has recent karma/meme sub activity and isn't
|
||||
vetted
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
# The Author Rule is best used in conjunction with other Rules --
|
||||
# instead of having to write an AuthorFilter for every Rule where you want to skip it based on Author criteria
|
||||
# you can write one Author Rule and make it fail on the required criteria
|
||||
# so that the check fails and Actions don't run
|
||||
- name: nonvet
|
||||
kind: author
|
||||
exclude:
|
||||
- flairText:
|
||||
- vet
|
||||
- name: attr10
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
lookAt: submissions
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
window: 7 days
|
||||
- name: memes
|
||||
kind: recentActivity
|
||||
lookAt: submissions
|
||||
thresholds:
|
||||
- threshold: '>= 3'
|
||||
subreddits:
|
||||
- dankmemes
|
||||
window: 7 days
|
||||
# will NOT run if the Author for this Submission has the flair "vet"
|
||||
actions:
|
||||
- kind: report
|
||||
content: Author has posted in free karma or meme subs recently
|
||||
@@ -9,5 +9,5 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FHistoryJSO
|
||||
|
||||
### Examples
|
||||
|
||||
* [Low Comment Engagement](/docs/examples/history/lowEngagement.json5) - Check if Author is submitting much more than they comment.
|
||||
* [OP Comment Engagement](/docs/examples/history/opOnlyEngagement.json5) - Check if Author is mostly engaging only in their own content
|
||||
* Low Comment Engagement [YAML](/docs/examples/history/lowEngagement.yaml) | [JSON](/docs/examples/history/lowEngagement.json5) - Check if Author is submitting much more than they comment.
|
||||
* OP Comment Engagement [YAML](/docs/examples/history/opOnlyEngagement.yaml) | [JSON](/docs/examples/history/opOnlyEngagement.json5) - Check if Author is mostly engaging only in their own content
|
||||
|
||||
21
docs/examples/history/lowEngagement.yaml
Normal file
21
docs/examples/history/lowEngagement.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
checks:
|
||||
- name: Low Comment Engagement
|
||||
description: Check if Author is submitting much more than they comment
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: lowComm
|
||||
kind: history
|
||||
criteria:
|
||||
- comment: '< 30%'
|
||||
window:
|
||||
# get author's last 90 days of activities or 100 activities, whichever is less
|
||||
duration: 90 days
|
||||
count: 100
|
||||
# trigger if less than 30% of their activities in this time period are comments
|
||||
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Low engagement: comments were {{rules.lowcomm.commentPercent}} of
|
||||
{{rules.lowcomm.activityTotal}} over {{rules.lowcomm.window}}
|
||||
22
docs/examples/history/opOnlyEngagement.yaml
Normal file
22
docs/examples/history/opOnlyEngagement.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
checks:
|
||||
- name: Engaging Own Content Only
|
||||
description: Check if Author is mostly engaging in their own content only
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: opOnly
|
||||
kind: history
|
||||
criteria:
|
||||
# trigger if more than 60% of their activities in this time period are comments as OP
|
||||
- comment: '> 60% OP'
|
||||
window:
|
||||
# get author's last 90 days of activities or 100 activities, whichever is less
|
||||
duration: 90 days
|
||||
count: 100
|
||||
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Selfish OP: {{rules.oponly.opPercent}} of
|
||||
{{rules.oponly.commentTotal}} comments over {{rules.oponly.window}}
|
||||
are as OP
|
||||
9
docs/examples/onlyfansFlair/README.md
Normal file
9
docs/examples/onlyfansFlair/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Flair users and submissions
|
||||
|
||||
Flair users and submissions based on certain keywords from submitter's profile.
|
||||
|
||||
Consult [User Flair schema](https://json-schema.app/view/%23%2Fdefinitions%2FUserFlairActionJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) and [Submission Flair schema](https://json-schema.app/view/%23%2Fdefinitions%2FFlairActionJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for a complete reference of the rule's properties.
|
||||
|
||||
### Examples
|
||||
|
||||
* OnlyFans submissions [YAML](/docs/examples/onlyFansFlair/onlyFansFlair.yaml) | [JSON](/docs/examples/onlyfansFlair/onlyfansFlair.json5) - Check whether submitter has typical OF keywords in their profile and flair both author + submission accordingly.
|
||||
68
docs/examples/onlyfansFlair/onlyfansFlair.json5
Normal file
68
docs/examples/onlyfansFlair/onlyfansFlair.json5
Normal file
@@ -0,0 +1,68 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "Flair OF submitters",
|
||||
"description": "Flair submission as OF if user does not have Verified flair and has certain keywords in their profile",
|
||||
"kind": "submission",
|
||||
"authorIs": {
|
||||
"exclude": [
|
||||
{
|
||||
"flairCssClass": ["verified"]
|
||||
}
|
||||
]
|
||||
},
|
||||
"rules": [
|
||||
{
|
||||
"name": "OnlyFans strings in description",
|
||||
"kind": "author",
|
||||
"include": [
|
||||
{
|
||||
"description": [
|
||||
"/(cashapp|allmylinks|linktr|onlyfans\\.com)/i",
|
||||
"/(see|check|my|view) (out|of|onlyfans|kik|skype|insta|ig|profile|links)/i",
|
||||
"my links",
|
||||
"$"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"name": "Set OnlyFans user flair",
|
||||
"kind": "userflair",
|
||||
"flair_template_id": "put-your-onlyfans-user-flair-id-here"
|
||||
},
|
||||
{
|
||||
"name":"Set OF Creator SUBMISSION flair",
|
||||
"kind": "flair",
|
||||
"flair_template_id": "put-your-onlyfans-post-flair-id-here"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"name": "Flair posts of OF submitters",
|
||||
"description": "Flair submission as OnlyFans if submitter has OnlyFans userflair (override post flair set by submitter)",
|
||||
"kind": "submission",
|
||||
"rules": [
|
||||
{
|
||||
"name": "Include OF submitters",
|
||||
"kind": "author",
|
||||
"include": [
|
||||
{
|
||||
"flairCssClass": ["onlyfans"]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"name":"Set OF Creator SUBMISSION flair",
|
||||
"kind": "flair",
|
||||
"flair_template_id": "put-your-onlyfans-post-flair-id-here"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
38
docs/examples/onlyfansFlair/onlyfansFlair.yaml
Normal file
38
docs/examples/onlyfansFlair/onlyfansFlair.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
checks:
|
||||
- name: Flair OF submitters
|
||||
description: Flair submission as OF if user does not have Verified flair and has
|
||||
certain keywords in their profile
|
||||
kind: submission
|
||||
authorIs:
|
||||
exclude:
|
||||
- flairCssClass:
|
||||
- verified
|
||||
rules:
|
||||
- name: OnlyFans strings in description
|
||||
kind: author
|
||||
include:
|
||||
- description:
|
||||
- '/(cashapp|allmylinks|linktr|onlyfans\.com)/i'
|
||||
- '/(see|check|my|view) (out|of|onlyfans|kik|skype|insta|ig|profile|links)/i'
|
||||
- my links
|
||||
- "$"
|
||||
actions:
|
||||
- name: Set OnlyFans user flair
|
||||
kind: userflair
|
||||
flair_template_id: put-your-onlyfans-user-flair-id-here
|
||||
- name: Set OF Creator SUBMISSION flair
|
||||
kind: flair
|
||||
flair_template_id: put-your-onlyfans-post-flair-id-here
|
||||
- name: Flair posts of OF submitters
|
||||
description: Flair submission as OnlyFans if submitter has OnlyFans userflair (override post flair set by submitter)
|
||||
kind: submission
|
||||
rules:
|
||||
- name: Include OF submitters
|
||||
kind: author
|
||||
include:
|
||||
- flairCssClass:
|
||||
- onlyfans
|
||||
actions:
|
||||
- name: Set OF Creator SUBMISSION flair
|
||||
kind: flair
|
||||
flair_template_id: put-your-onlyfans-post-flair-id-here
|
||||
@@ -6,5 +6,5 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRecentActi
|
||||
|
||||
### Examples
|
||||
|
||||
* [Free Karma Subreddits](/docs/examples/recentActivity/freeKarma.json5) - Check if the Author has recently posted in any "free karma" subreddits
|
||||
* [Submission in Free Karma Subreddits](/docs/examples/recentActivity/freeKarmaOnSubmission.json5) - Check if the Author has posted the Submission this check is running on in any "free karma" subreddits recently
|
||||
* Free Karma Subreddits [YAML](/docs/examples/recentActivity/freeKarma.yaml) | [JSON](/docs/examples/recentActivity/freeKarma.json5) - Check if the Author has recently posted in any "free karma" subreddits
|
||||
* Submission in Free Karma Subreddits [YAML](/docs/examples/recentActivity/freeKarmaOnSubmission.yaml) | [JSON](/docs/examples/recentActivity/freeKarmaOnSubmission.json5) - Check if the Author has posted the Submission this check is running on in any "free karma" subreddits recently
|
||||
|
||||
27
docs/examples/recentActivity/freeKarma.yaml
Normal file
27
docs/examples/recentActivity/freeKarma.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
checks:
|
||||
- name: Free Karma Alert
|
||||
description: Check if author has posted in 'freekarma' subreddits
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
# // when lookAt is not present this rule will look for submissions and comments
|
||||
#lookAt: comments
|
||||
useSubmissionAsReference: false
|
||||
thresholds:
|
||||
# if the number of activities (sub/comment) found CUMULATIVELY in the subreddits listed is
|
||||
# equal to or greater than 1 then the rule is triggered
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- upvote
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
{{rules.freekarma.totalCount}} activities in karma
|
||||
{{rules.freekarma.subCount}} subs over {{rules.freekarma.window}}:
|
||||
{{rules.freekarma.subSummary}}
|
||||
26
docs/examples/recentActivity/freeKarmaOnSubmission.yaml
Normal file
26
docs/examples/recentActivity/freeKarmaOnSubmission.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
checks:
|
||||
- name: Free Karma On Submission Alert
|
||||
description: Check if author has posted this submission in 'freekarma' subreddits
|
||||
kind: submission
|
||||
rules:
|
||||
- name: freekarmasub
|
||||
kind: recentActivity
|
||||
# rule will only look at Author's submissions in these subreddits
|
||||
lookAt: submissions
|
||||
# rule will only look at Author's submissions in these subreddits that have the same content (link) as the submission this event was made on
|
||||
# In simpler terms -- rule will only check to see if the same link the author just posted is also posted in these subreddits
|
||||
useSubmissionAsReference: true
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- upvote
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Submission posted {{rules.freekarmasub.totalCount}} times in karma
|
||||
{{rules.freekarmasub.subCount}} subs over
|
||||
{{rules.freekarmasub.window}}: {{rules.freekarmasub.subSummary}}
|
||||
@@ -11,10 +11,12 @@ Which can then be used in conjunction with a [`window`](https://github.com/FoxxM
|
||||
|
||||
### Examples
|
||||
|
||||
* [Trigger if regex matches against the current activity](/docs/examples/regex/matchAnyCurrentActivity.json5)
|
||||
* [Trigger if regex matches 5 times against the current activity](/docs/examples/regex/matchThresholdCurrentActivity.json5)
|
||||
* [Trigger if regex matches against any part of a Submission](/docs/examples/regex/matchSubmissionParts.json5)
|
||||
* [Trigger if regex matches any of Author's last 10 activities](/docs/examples/regex/matchHistoryActivity.json5)
|
||||
* [Trigger if regex matches at least 3 of Author's last 10 activities](/docs/examples/regex/matchActivityThresholdHistory.json5)
|
||||
* [Trigger if there are 5 regex matches in the Author's last 10 activities](/docs/examples/regex/matchTotalHistoryActivity.json5)
|
||||
* [Trigger if there are 5 regex matches in the Author's last 10 comments](/docs/examples/regex/matchSubsetHistoryActivity.json5)
|
||||
* Trigger if regex matches against the current activity - [YAML](/docs/examples/regex/matchAnyCurrentActivity.yaml) | [JSON](/docs/examples/regex/matchAnyCurrentActivity.json5)
|
||||
* Trigger if regex matches 5 times against the current activity - [YAML](/docs/examples/regex/matchThresholdCurrentActivity.yaml) | [JSON](/docs/examples/regex/matchThresholdCurrentActivity.json5)
|
||||
* Trigger if regex matches against any part of a Submission - [YAML](/docs/examples/regex/matchSubmissionParts.yaml) | [JSON](/docs/examples/regex/matchSubmissionParts.json5)
|
||||
* Trigger if regex matches any of Author's last 10 activities - [YAML](/docs/examples/regex/matchHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchHistoryActivity.json5)
|
||||
* Trigger if regex matches at least 3 of Author's last 10 activities - [YAML](/docs/examples/regex/matchActivityThresholdHistory.json5) | [JSON](/docs/examples/regex/matchActivityThresholdHistory.json5)
|
||||
* Trigger if there are 5 regex matches in the Author's last 10 activities - [YAML](/docs/examples/regex/matchTotalHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchTotalHistoryActivity.json5)
|
||||
* Trigger if there are 5 regex matches in the Author's last 10 comments - [YAML](/docs/examples/regex/matchSubsetHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchSubsetHistoryActivity.json5)
|
||||
* Remove comments that are spamming discord links - [YAML](/docs/examples/regex/removeDiscordSpam.yaml) | [JSON](/docs/examples/regex/removeDiscordSpam.json5)
|
||||
* Differs from just using automod because this config can allow one-off/organic links from users who DO NOT spam discord links but will still remove the comment if the user is spamming them
|
||||
|
||||
13
docs/examples/regex/matchActivityThresholdHistory.yaml
Normal file
13
docs/examples/regex/matchActivityThresholdHistory.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
# triggers if more than 3 activities in the last 10 match the regex
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# this differs from "totalMatchThreshold"
|
||||
#
|
||||
# activityMatchThreshold => # of activities from window must match regex
|
||||
# totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
activityMatchThreshold: '> 3'
|
||||
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
window: 10
|
||||
6
docs/examples/regex/matchAnyCurrentActivity.yaml
Normal file
6
docs/examples/regex/matchAnyCurrentActivity.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# if "matchThreshold" is not specified it defaults to this -- default behavior is to trigger if there are any matches
|
||||
#matchThreshold: "> 0"
|
||||
8
docs/examples/regex/matchHistoryActivity.yaml
Normal file
8
docs/examples/regex/matchHistoryActivity.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
# triggers if any activity in the last 10 (including current activity) match the regex
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
window: 10
|
||||
11
docs/examples/regex/matchSubmissionParts.yaml
Normal file
11
docs/examples/regex/matchSubmissionParts.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# triggers if the current activity has more than 0 matches
|
||||
# if the activity is a submission then matches against title, body, and url
|
||||
# if "testOn" is not provided then `title, body` are the defaults
|
||||
testOn:
|
||||
- title
|
||||
- body
|
||||
- url
|
||||
16
docs/examples/regex/matchSubsetHistoryActivity.yaml
Normal file
16
docs/examples/regex/matchSubsetHistoryActivity.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
# triggers if there are more than 5 regex matches in the last 10 activities (comments only)
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# this differs from "activityMatchThreshold"
|
||||
#
|
||||
# activityMatchThreshold => # of activities from window must match regex
|
||||
# totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
totalMatchThreshold: '> 5'
|
||||
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
window: 10
|
||||
# determines which activities from window to consider
|
||||
# defaults to "all" (submissions and comments)
|
||||
lookAt: comments
|
||||
6
docs/examples/regex/matchThresholdCurrentActivity.yaml
Normal file
6
docs/examples/regex/matchThresholdCurrentActivity.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# triggers if current activity has greater than 5 matches
|
||||
matchThreshold: '> 5'
|
||||
13
docs/examples/regex/matchTotalHistoryActivity.yaml
Normal file
13
docs/examples/regex/matchTotalHistoryActivity.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
# triggers if there are more than 5 regex matches in the last 10 activities (comments or submission)
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# this differs from "activityMatchThreshold"
|
||||
#
|
||||
# activityMatchThreshold => # of activities from window must match regex
|
||||
# totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
totalMatchThreshold: '> 5'
|
||||
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
window: 10
|
||||
73
docs/examples/regex/removeDiscordSpam.json5
Normal file
73
docs/examples/regex/removeDiscordSpam.json5
Normal file
@@ -0,0 +1,73 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "remove discord spam",
|
||||
"notifyOnTrigger": true,
|
||||
"description": "remove comments from users who are spamming discord links",
|
||||
"kind": "comment",
|
||||
"authorIs": {
|
||||
"exclude": [
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"itemIs": [
|
||||
{
|
||||
"removed": false,
|
||||
"approved": false,
|
||||
}
|
||||
],
|
||||
"condition": "OR",
|
||||
"rules": [
|
||||
{
|
||||
// set to false if you want to allow comments with a discord link ONLY IF
|
||||
// the author doesn't have a history of spamming discord links
|
||||
// -- basically allows one-off/organic discord links
|
||||
"enable": true,
|
||||
"name": "linkOnlySpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "only link",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+)$/i",
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "linkAnywhereSpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "contains link anywhere",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "linkAnywhereHistoricalSpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "contains links anywhere historically",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
|
||||
"totalMatchThreshold": ">= 3",
|
||||
"lookAt": "comments",
|
||||
"window": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
36
docs/examples/regex/removeDiscordSpam.yaml
Normal file
36
docs/examples/regex/removeDiscordSpam.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
checks:
|
||||
- name: remove discord spam
|
||||
notifyOnTrigger: true
|
||||
description: remove comments from users who are spamming discord links
|
||||
kind: comment
|
||||
authorIs:
|
||||
exclude:
|
||||
- isMod: true
|
||||
itemIs:
|
||||
- removed: false
|
||||
approved: false
|
||||
condition: OR
|
||||
rules:
|
||||
- enable: true
|
||||
name: linkOnlySpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: only link
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+)$/i'
|
||||
- condition: AND
|
||||
rules:
|
||||
- name: linkAnywhereSpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: contains link anywhere
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
|
||||
- name: linkAnywhereHistoricalSpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: contains links anywhere historically
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
|
||||
totalMatchThreshold: '>= 3'
|
||||
lookAt: comments
|
||||
window: 10
|
||||
actions:
|
||||
- kind: remove
|
||||
@@ -45,5 +45,5 @@ With only `gapAllowance: 2` this rule **would trigger** because the the 1 and 2
|
||||
|
||||
## Examples
|
||||
|
||||
* [Crosspost Spamming](/docs/examples/repeatActivity/crosspostSpamming.json5) - Check if an Author is spamming their Submissions across multiple subreddits
|
||||
* [Burst-posting](/docs/examples/repeatActivity/burstPosting.json5) - Check if Author is crossposting their Submissions in short bursts
|
||||
* Crosspost Spamming [JSON](/docs/examples/repeatActivity/crosspostSpamming.json5) | [YAML](/docs/examples/repeatActivity/crosspostSpamming.yaml) - Check if an Author is spamming their Submissions across multiple subreddits
|
||||
* Burst-posting [JSON](/docs/examples/repeatActivity/burstPosting.json5) | [YAML](/docs/examples/repeatActivity/burstPosting.yaml) - Check if Author is crossposting their Submissions in short bursts
|
||||
|
||||
23
docs/examples/repeatActivity/burstPosting.yaml
Normal file
23
docs/examples/repeatActivity/burstPosting.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
checks:
|
||||
- name: Burstpost Spam
|
||||
description: Check if Author is crossposting in short bursts
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: burstpost
|
||||
kind: repeatActivity
|
||||
# will only look at Submissions in Author's history that contain the same content (link) as the Submission this check was initiated by
|
||||
useSubmissionAsReference: true
|
||||
# the number of non-repeat activities (submissions or comments) to ignore between repeat submissions
|
||||
gapAllowance: 3
|
||||
# if the Author has posted this Submission 6 times, ignoring 3 non-repeat activities between each repeat, then this rule will trigger
|
||||
threshold: '>= 6'
|
||||
# look at all of the Author's submissions in the last 7 days or 100 submissions
|
||||
window:
|
||||
duration: 7 days
|
||||
count: 100
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Author has burst-posted this link {{rules.burstpost.largestRepeat}}
|
||||
times over {{rules.burstpost.window}}
|
||||
19
docs/examples/repeatActivity/crosspostSpamming.yaml
Normal file
19
docs/examples/repeatActivity/crosspostSpamming.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
checks:
|
||||
- name: Crosspost Spam
|
||||
description: Check if Author is spamming Submissions across subreddits
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: xpostspam
|
||||
kind: repeatActivity
|
||||
# will only look at Submissions in Author's history that contain the same content (link) as the Submission this check was initiated by
|
||||
useSubmissionAsReference: true
|
||||
# if the Author has posted this Submission 5 times consecutively then this rule will trigger
|
||||
threshold: '>= 5'
|
||||
# look at all of the Author's submissions in the last 7 days
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Author has posted this link {{rules.xpostspam.largestRepeat}} times
|
||||
over {{rules.xpostspam.window}}
|
||||
927
docs/examples/repost/README.md
Normal file
927
docs/examples/repost/README.md
Normal file
@@ -0,0 +1,927 @@
|
||||
The **Repost** rule is used to find reposts for both **Submissions** and **Comments**, depending on what type of **Check** it is used on.
|
||||
|
||||
Note: This rule is for searching **all of Reddit** for reposts, as opposed to just the Author of the Activity being checked. If you only want to check for reposts by the Author of the Activity being checked you should use the [Repeat Activity](/docs/examples/repeatActivity) rule.
|
||||
|
||||
# TLDR
|
||||
|
||||
Out of the box CM generates a repost rule with sensible default behavior without any configuration. You do not need to configure any of below options (facets, modifiers, criteria) yourself in order to have a working repost rule. Default behavior is as follows...
|
||||
|
||||
* When looking for Submission reposts CM will find any Submissions with
|
||||
* a very similar title
|
||||
* or independent of title...
|
||||
* any crossposts/duplicates
|
||||
* any submissions with the exact URL
|
||||
* When looking for Comment reposts CM will do the above AND THEN
|
||||
* compare the top 50 most-upvoted comments from the top 10 most-upvoted Submissions against the comment being checked
|
||||
* compare any items found from external source (Youtube comments, etc...) against the comment being checked
|
||||
|
||||
# Configuration
|
||||
|
||||
## Search Facets
|
||||
|
||||
ContextMod has several ways to search for reposts -- all of which look at different elements of a Submission in order to find repost candidates. You can define any/all of these **Search Facets** you want to use to search Reddit inside the configuration for the Repost Rule in the `searchOn` property.
|
||||
|
||||
### Usage
|
||||
|
||||
Facets are specified in the `searchOn` array property within the rule's configuration.
|
||||
|
||||
**String**
|
||||
|
||||
Specify one or more types of facets as a string to use their default configurations
|
||||
|
||||
<details>
|
||||
|
||||
YAML
|
||||
```yaml
|
||||
kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
- url
|
||||
- crossposts
|
||||
```
|
||||
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// ...
|
||||
"searchOn": ["title", "url", "crossposts"],
|
||||
// ....
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
**Object**
|
||||
|
||||
**string** and object configurations can be mixed
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
- kind: url
|
||||
matchScore: 90
|
||||
- external
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// ...
|
||||
"searchOn": [
|
||||
"title",
|
||||
{
|
||||
"kind": "url",
|
||||
// could also specify multiple types to use the same config for all
|
||||
//"kind": ["url", "duplicates"]
|
||||
"matchScore": 90,
|
||||
//...
|
||||
},
|
||||
"external"
|
||||
],
|
||||
// ....
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Facet Types
|
||||
|
||||
* **title** -- search reddit for Submissions with a similar title
|
||||
* **url** -- search reddit for Submissions with the same URL
|
||||
* **duplicates** -- get all Submissions **reddit has identified** as duplicates that are **NOT** crossposts
|
||||
* these are found under *View discussions in other communities* (new reddit) or *other discussions* (old reddit) on the Submission
|
||||
* **crossposts** -- get all Submissions where the current Submission is the source of an **official** crosspost
|
||||
* this differs from duplicates in that crossposts use reddit's built-in crosspost functionality, respect subreddit crosspost rules, and link back to the original Submission
|
||||
* **external** -- get items from the Submission's link source that may be reposted (currently implemented for **Comment Checks** only)
|
||||
* When the Submission link is for...
|
||||
* **Youtube** -- get top comments on video by replies/like count
|
||||
* **NOTE:** An **API Key** for the [Youtube Data API](https://developers.google.com/youtube/v3) must be provided for this facet to work. This can be provided by the operator alongside [bot credentials](/docs/operatorConfiguration.md) or in the top-level `credentials` property for a [subreddit configuration.](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fmaster%2Fsrc%2FSchema%2FApp.json)
|
||||
|
||||
### Facet Modifiers
|
||||
|
||||
For all **Facets**, except for **external**, there are options that be configured to determine if the found Submissions is a "valid" repost IE filtering. These options can be configured **per facet**.
|
||||
|
||||
* **matchScore** -- The percentage, as a whole number, of a repost title that must match the title being checked in order to consider both a match
|
||||
* **minWordCount** -- The minimum number of words a title must have
|
||||
* **caseSensitive** -- If the match comparison should be case-sensitive (defaults to `false`)
|
||||
|
||||
Additionally, the current Activity's title and/or each repost's title can be transformed before matching:
|
||||
|
||||
* **transformations** -- An array of SearchAndReplace objects used to transform the repost's title
|
||||
* **transformationsActivity** -- An array of SearchAndReplace objects used to transform the current Activity's title
|
||||
|
||||
#### Modifier Defaults
|
||||
|
||||
To make facets easier to use without configuration sensible defaults are applied to each when no other configuration is defined...
|
||||
|
||||
* **title**
|
||||
* `matchScore: 85` -- The candidate repost's title must be at least 85% similar to the current Activity's title
|
||||
* `minWordCount: 2` -- The candidate repost's title must have at least 2 words
|
||||
|
||||
For `url`,`duplicates`, and `crossposts` the only default is `matchScore: 0` because the assumption is you want to treat any actual dups/x-posts or exact URLs as reposts, regardless of their title.
|
||||
|
||||
## Additional Criteria Properties
|
||||
|
||||
A **criteria** object may also specify some additional tests to run against the reposts found from searching.
|
||||
|
||||
### For Submissions and Comments
|
||||
|
||||
#### Occurrences
|
||||
|
||||
Define a set of criteria to test against the **number of reposts**, **time reposts were created**, or both.
|
||||
|
||||
##### Count
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
- url
|
||||
- crossposts
|
||||
occurrences:
|
||||
criteria:
|
||||
- count:
|
||||
condition: AND
|
||||
test:
|
||||
- '> 3'
|
||||
- <= 5
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// ...
|
||||
"searchOn": ["title", "url", "crossposts"],
|
||||
"occurrences": {
|
||||
"criteria": [
|
||||
{
|
||||
// passes if BOTH tests are true
|
||||
"count": {
|
||||
"condition": "AND", // default is AND
|
||||
"test": [
|
||||
"> 3", // TRUE if there are GREATER THAN 3 reposts found
|
||||
"<= 5" // TRUE if there are LESS THAN OR EQUAL TO 5 reposts found
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
##### Time
|
||||
|
||||
Define a test or array of tests to run against **when reposts were created**
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
- url
|
||||
- crossposts
|
||||
occurrences:
|
||||
criteria:
|
||||
- time:
|
||||
condition: AND
|
||||
test:
|
||||
- testOn: all
|
||||
condition: '> 3 months'
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// ...
|
||||
"searchOn": [
|
||||
"title",
|
||||
"url",
|
||||
"crossposts"
|
||||
],
|
||||
"occurrences": {
|
||||
"criteria": [
|
||||
{
|
||||
time: {
|
||||
// how to test array of comparisons. AND => all must pass, OR => any must pass
|
||||
"condition": "AND",
|
||||
"test": [
|
||||
{
|
||||
// which of the found reposts to test the time comparison on
|
||||
//
|
||||
// "all" => ALL reposts must pass time comparison
|
||||
// "any" => ANY repost must pass time comparison
|
||||
// "newest" => The newest (closest in time to now) repost must pass time comparison
|
||||
// "oldest" => The oldest (furthest in time from now) repost must pass time comparison
|
||||
//
|
||||
"testOn": "all",
|
||||
// Tested items must be OLDER THAN 3 months
|
||||
"condition": "> 3 months"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
### For Comments
|
||||
|
||||
When the rule is run in a **Comment Check** you may specify text comparisons (like those found in Search Facets) to run on the contents of the repost comments *against* the contents of the comment being checked.
|
||||
|
||||
* **matchScore** -- The percentage, as a whole number, of a repost comment that must match the comment being checked in order to consider both a match (defaults to 85% IE `85`)
|
||||
* **minWordCount** -- The minimum number of words a comment must have
|
||||
* **caseSensitive** -- If the match comparison should be case-sensitive (defaults to `false`)
|
||||
|
||||
# Examples
|
||||
|
||||
Examples of a *full* CM configuration, including the Repost Rule, in various scenarios. In each scenario the parts of the configuration that affect the rule are indicated.
|
||||
|
||||
## Submissions
|
||||
|
||||
When the Repost Rule is run on a **Submission Check** IE the activity being checked is a Submission.
|
||||
|
||||
### Default Behavior (No configuration)
|
||||
|
||||
This is the same behavior described in the [TLDR](#TLDR) section above -- find any submissions with:
|
||||
|
||||
* a very similar title (85% or more the same)
|
||||
* or ignoring title...
|
||||
* any crossposts/duplicates
|
||||
* any submissions with the exact URL
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check if submission has been reposted
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
actions:
|
||||
- kind: report
|
||||
content: This submission was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check if submission has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost"
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This submission was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by Title Only
|
||||
|
||||
Find any submissions with:
|
||||
|
||||
* a very similar title (85% or more the same)
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check if submission has been reposted
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
actions:
|
||||
- kind: report
|
||||
content: This submission was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check if submission has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only title to search on
|
||||
"searchOn": [
|
||||
"title" // uses default configuration since only string is specified
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This submission was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by Title only and specify similarity percentage
|
||||
|
||||
* a very similar title (95% or more the same)
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check if submission has been reposted
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- kind: title
|
||||
matchScore: '95'
|
||||
actions:
|
||||
- kind: report
|
||||
content: This submission was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check if submission has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only title to search on
|
||||
"searchOn": [
|
||||
{
|
||||
"kind": "title",
|
||||
// titles must be 95% or more similar
|
||||
"matchScore": "95"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This submission was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by Title, specify similarity percentage, AND any duplicates
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check if submission has been reposted
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- duplicates
|
||||
- kind: title
|
||||
matchScore: '95'
|
||||
actions:
|
||||
- kind: report
|
||||
content: This submission was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check if submission has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
"searchOn": [
|
||||
// look for duplicates (NON crossposts) using default configuration
|
||||
"duplicates",
|
||||
// search by title
|
||||
{
|
||||
"kind": "title",
|
||||
// titles must be 95% or more similar
|
||||
"matchScore": "95"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This submission was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Approve Submission if not reposted in the last month, by title
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check there are no reposts with same title in the last month
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
occurrences:
|
||||
condition: OR
|
||||
criteria:
|
||||
- count:
|
||||
test:
|
||||
- < 1
|
||||
- time:
|
||||
test:
|
||||
- testOn: newest
|
||||
condition: '> 1 month'
|
||||
actions:
|
||||
- kind: approve
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check there are no reposts with same title in the last month",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
"searchOn": [
|
||||
"title"
|
||||
],
|
||||
"occurrences": {
|
||||
// if EITHER criteria is TRUE then it "passes"
|
||||
"condition": "OR",
|
||||
"criteria": [
|
||||
// first criteria:
|
||||
// TRUE if there are LESS THAN 1 reposts (no reposts found)
|
||||
{
|
||||
"count": {
|
||||
"test": ["< 1"]
|
||||
}
|
||||
},
|
||||
// second criteria:
|
||||
// TRUE if the newest repost is older than one month
|
||||
{
|
||||
"time": {
|
||||
"test": [
|
||||
{
|
||||
"testOn": "newest",
|
||||
"condition": "> 1 month"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
// approve this post since we know it is not a repost of anything within the last month
|
||||
"kind": "approve",
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
## Comments
|
||||
|
||||
### Default Behavior (No configuration)
|
||||
|
||||
This is the same behavior described in the [TLDR](#TLDR) section above -- find any submissions with:
|
||||
|
||||
* a very similar title (85% or more the same)
|
||||
* or ignoring title...
|
||||
* any crossposts/duplicates
|
||||
* any submissions with the exact URL
|
||||
* If comment being checked is on a Submission for Youtube then get top 50 comments on youtube video as well...
|
||||
|
||||
AND THEN
|
||||
|
||||
* sort submissions by votes
|
||||
* take top 20 (upvoted) comments from top 10 (upvoted) submissions
|
||||
* sort comments by votes, take top 50 + top 50 external items
|
||||
|
||||
FINALLY
|
||||
|
||||
* filter all gathered comments by default `matchScore: 85` to find very similar matches
|
||||
* rules is triggered if any are found
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: commRepost
|
||||
description: Check if comment has been reposted
|
||||
kind: common
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
actions:
|
||||
- kind: report
|
||||
content: This comment was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"newComm"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "commRepost",
|
||||
"description": "Check if comment has been reposted",
|
||||
// kind specifies this check is for COMMENTS
|
||||
"kind": "common",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost"
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This comment was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by external (youtube) comments only
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: commRepost
|
||||
description: Check if comment has been reposted from youtube
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- external
|
||||
actions:
|
||||
- kind: report
|
||||
content: This comment was reposted from youtube
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"newComm"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "commRepost",
|
||||
"description": "Check if comment has been reposted from youtube",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only external (youtube) to search on
|
||||
"searchOn": [
|
||||
"external"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This comment was reposted from youtube"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by external (youtube) comments only, with higher comment match percentage
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: commRepost
|
||||
description: Check if comment has been reposted from youtube
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- external
|
||||
matchScore: 95
|
||||
actions:
|
||||
- kind: report
|
||||
content: This comment was reposted from youtube
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"newComm"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "commRepost",
|
||||
"description": "Check if comment has been reposted from youtube",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only external (youtube) to search on
|
||||
"searchOn": [
|
||||
"external"
|
||||
],
|
||||
"matchScore": 95 // matchScore for comments is on criteria instead of searchOn config...
|
||||
},
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This comment was reposted from youtube"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by external (youtube) comments and submission URL, with higher comment match percentage
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: commRepost
|
||||
description: Check if comment has been reposted
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- external
|
||||
- url
|
||||
matchScore: 95
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
This comment was reposted from youtube or from submission with the
|
||||
same URL
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"newComm"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "commRepost",
|
||||
"description": "Check if comment has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only external (youtube) to search on
|
||||
"searchOn": [
|
||||
"external",
|
||||
// can specify any/all submission search facets to acquire comments from
|
||||
"url"
|
||||
],
|
||||
"matchScore": 95 // matchScore for comments is on criteria instead of searchOn config...
|
||||
},
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This comment was reposted from youtube or from submission with the same URL"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -11,21 +11,31 @@ All actions for these configurations are non-destructive in that:
|
||||
|
||||
**You will have to remove the `report` action and `dryRun` settings yourself.** This is to ensure that you understand the behavior the bot will be performing. If you are unsure of this you should leave them in place until you are certain the behavior the bot is performing is acceptable.
|
||||
|
||||
**YAML** is the same format as **automoderator**
|
||||
|
||||
## Submission-based Behavior
|
||||
|
||||
### [Remove submissions from users who have used 'freekarma' subs to bypass karma checks](/docs/examples/subredditReady/freekarma.json5)
|
||||
### Remove submissions from users who have used 'freekarma' subs to bypass karma checks
|
||||
|
||||
[YAML](/docs/examples/subredditReady/freekarma.yaml) | [JSON](/docs/examples/subredditReady/freekarma.json5)
|
||||
|
||||
If the user has any activity (comment/submission) in known freekarma subreddits in the past (50 activities or 6 months) then remove the submission.
|
||||
|
||||
### [Remove submissions from users who have crossposted the same submission 4 or more times](/docs/examples/subredditReady/crosspostSpam.json5)
|
||||
### Remove submissions from users who have crossposted the same submission 4 or more times
|
||||
|
||||
[YAML](/docs/examples/subredditReady/crosspostSpam.yaml) | [JSON](/docs/examples/subredditReady/crosspostSpam.yaml)
|
||||
|
||||
If the user has crossposted the same submission in the past (50 activities or 6 months) 4 or more times in a row then remove the submission.
|
||||
|
||||
### [Remove submissions from users who have crossposted or used 'freekarma' subs](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5)
|
||||
### Remove submissions from users who have crossposted or used 'freekarma' subs
|
||||
|
||||
[YAML](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.yaml) | [JSON](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5)
|
||||
|
||||
Will remove submission if either of the above two behaviors is detected
|
||||
|
||||
### [Remove link submissions where the user's history is comprised of 10% or more of the same link](/docs/examples/subredditReady/selfPromo.json5)
|
||||
### Remove link submissions where the user's history is comprised of 10% or more of the same link
|
||||
|
||||
[YAML](/docs/examples/subredditReady/selfPromo.yaml) | [JSON](/docs/examples/subredditReady/selfPromo.json5)
|
||||
|
||||
If the link origin (youtube author, twitter author, etc. or regular domain for non-media links)
|
||||
|
||||
@@ -36,6 +46,33 @@ then remove the submission
|
||||
|
||||
## Comment-based behavior
|
||||
|
||||
### [Remove comment if the user has posted the same comment 4 or more times in a row](/docs/examples/subredditReady/commentSpam.json5)
|
||||
### Remove comment if the user has posted the same comment 4 or more times in a row
|
||||
|
||||
[YAML](/docs/examples/subredditReady/commentSpam.yaml) | [JSON](/docs/examples/subredditReady/commentSpam.json5)
|
||||
|
||||
If the user made the same comment (with some fuzzy matching) 4 or more times in a row in the past (50 activities or 6 months) then remove the comment.
|
||||
|
||||
### Remove comment if it is discord invite link spam
|
||||
|
||||
[YAML](/docs/examples/subredditReady/discordSpam.yaml) | [JSON](/docs/examples/subredditReady/discordSpam.json5)
|
||||
|
||||
This rule goes a step further than automod can by being more discretionary about how it handles this type of spam.
|
||||
|
||||
* Remove the comment and **ban a user** if:
|
||||
* Comment being checked contains **only** a discord link (no other text) AND
|
||||
* Discord links appear **anywhere** in three or more of the last 10 comments the Author has made
|
||||
|
||||
otherwise...
|
||||
|
||||
* Remove the comment if:
|
||||
* Comment being checked contains **only** a discord link (no other text) OR
|
||||
* Comment contains a discord link **anywhere** AND
|
||||
* Discord links appear **anywhere** in three or more of the last 10 comments the Author has made
|
||||
|
||||
Using these checks ContextMod can more easily distinguish between these use cases for a user commenting with a discord link:
|
||||
|
||||
* actual spammers who only spam a discord link
|
||||
* users who may comment with a link but have context for it either in the current comment or in their history
|
||||
* users who many comment with a link but it's a one-off event (no other links historically)
|
||||
|
||||
Additionally, you could modify both/either of these checks to not remove one-off discord link comments but still remove if the user has a historical trend for spamming links
|
||||
|
||||
25
docs/examples/subredditReady/commentSpam.yaml
Normal file
25
docs/examples/subredditReady/commentSpam.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
# Stop users who spam the same comment many times
|
||||
- name: low xp comment spam
|
||||
description: X-posted comment >=4x
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- name: xPostLow
|
||||
kind: repeatActivity
|
||||
# number of "non-repeat" comments allowed between "repeat comments"
|
||||
gapAllowance: 2
|
||||
# greater or more than 4 repeat comments triggers this rule
|
||||
threshold: '>= 4'
|
||||
# retrieve either last 50 comments or 6 months' of history, whichever is less
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: 'Remove => Posted same comment {{rules.xpostlow.largestRepeat}}x times'
|
||||
- kind: remove
|
||||
enable: true
|
||||
48
docs/examples/subredditReady/crosspostSpam.yaml
Normal file
48
docs/examples/subredditReady/crosspostSpam.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
# stop users who post low-effort, crossposted spam submissions
|
||||
#
|
||||
# Remove a SUBMISSION if the user has crossposted it at least 4 times in recent history AND
|
||||
# less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
- name: low xp spam and engagement
|
||||
description: X-posted 4x and low comment engagement
|
||||
kind: submission
|
||||
itemIs:
|
||||
- removed: false
|
||||
condition: AND
|
||||
rules:
|
||||
- name: xPostLow
|
||||
kind: repeatActivity
|
||||
gapAllowance: 2
|
||||
threshold: '>= 4'
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
- name: lowOrOpComm
|
||||
kind: history
|
||||
criteriaJoin: OR
|
||||
criteria:
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: < 50%
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: '> 40% OP'
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: >-
|
||||
Remove=>{{rules.xpostlow.largestRepeat}} X-P =>
|
||||
{{rules.loworopcomm.thresholdSummary}}
|
||||
- kind: remove
|
||||
enable: true
|
||||
- kind: comment
|
||||
enable: true
|
||||
content: >-
|
||||
Your submission has been removed because you cross-posted it
|
||||
{{rules.xpostlow.largestRepeat}} times and you have very low
|
||||
engagement outside of making submissions
|
||||
distinguish: true
|
||||
75
docs/examples/subredditReady/discordSpam.json5
Normal file
75
docs/examples/subredditReady/discordSpam.json5
Normal file
@@ -0,0 +1,75 @@
|
||||
{
|
||||
"polling": ["newComm"],
|
||||
"checks": [
|
||||
{
|
||||
"name": "ban discord only spammer",
|
||||
"description": "ban a user who spams only a discord link many times historically",
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
"linkOnlySpam",
|
||||
"linkAnywhereHistoricalSpam",
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
},
|
||||
{
|
||||
"kind": "ban",
|
||||
"content": "spamming discord links"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "remove discord spam",
|
||||
"description": "remove comments from users who only link to discord or mention discord link many times historically",
|
||||
"kind": "comment",
|
||||
"condition": "OR",
|
||||
"rules": [
|
||||
{
|
||||
"name": "linkOnlySpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "only link",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+)$/i",
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "linkAnywhereSpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "contains link anywhere",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "linkAnywhereHistoricalSpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "contains links anywhere historically",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
|
||||
"totalMatchThreshold": ">= 3",
|
||||
"lookAt": "comments",
|
||||
"window": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
46
docs/examples/subredditReady/discordSpam.yaml
Normal file
46
docs/examples/subredditReady/discordSpam.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: ban discord only spammer
|
||||
description: ban a user who spams only a discord link many times historically
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- linkOnlySpam
|
||||
- linkAnywhereHistoricalSpam
|
||||
actions:
|
||||
- kind: remove
|
||||
- kind: ban
|
||||
content: spamming discord links
|
||||
- name: remove discord spam
|
||||
description: >-
|
||||
remove comments from users who only link to discord or mention discord
|
||||
link many times historically
|
||||
kind: comment
|
||||
condition: OR
|
||||
rules:
|
||||
- name: linkOnlySpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: only link
|
||||
# single quotes are required to escape special characters
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+)$/i'
|
||||
- condition: AND
|
||||
rules:
|
||||
- name: linkAnywhereSpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: contains link anywhere
|
||||
# single quotes are required to escape special characters
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
|
||||
- name: linkAnywhereHistoricalSpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: contains links anywhere historically
|
||||
# single quotes are required to escape special characters
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
|
||||
totalMatchThreshold: '>= 3'
|
||||
lookAt: comments
|
||||
window: 10
|
||||
actions:
|
||||
- kind: remove
|
||||
84
docs/examples/subredditReady/freeKarmaOrCrosspostSpam.yaml
Normal file
84
docs/examples/subredditReady/freeKarmaOrCrosspostSpam.yaml
Normal file
@@ -0,0 +1,84 @@
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
# stop users who post low-effort, crossposted spam submissions
|
||||
#
|
||||
# Remove a SUBMISSION if the user has crossposted it at least 4 times in recent history AND
|
||||
# less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
- name: remove on low xp spam and engagement
|
||||
description: X-posted 4x and low comment engagement
|
||||
kind: submission
|
||||
itemIs:
|
||||
- removed: false
|
||||
condition: AND
|
||||
rules:
|
||||
- name: xPostLow
|
||||
kind: repeatActivity
|
||||
gapAllowance: 2
|
||||
threshold: '>= 4'
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
- name: lowOrOpComm
|
||||
kind: history
|
||||
criteriaJoin: OR
|
||||
criteria:
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: < 50%
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: '> 40% OP'
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: >-
|
||||
Remove=>{{rules.xpostlow.largestRepeat}} X-P =>
|
||||
{{rules.loworopcomm.thresholdSummary}}
|
||||
- kind: remove
|
||||
enable: false
|
||||
- kind: comment
|
||||
enable: true
|
||||
content: >-
|
||||
Your submission has been removed because you cross-posted it
|
||||
{{rules.xpostlow.largestRepeat}} times and you have very low
|
||||
engagement outside of making submissions
|
||||
distinguish: true
|
||||
dryRun: true
|
||||
# Remove submissions from users who have recent activity in freekarma subs within the last 50 activities or 6 months (whichever is less)
|
||||
- name: freekarma removal
|
||||
description: Remove submission if user has used freekarma sub recently
|
||||
kind: submission
|
||||
itemIs:
|
||||
- removed: false
|
||||
condition: AND
|
||||
rules:
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
useSubmissionAsReference: false
|
||||
thresholds:
|
||||
- subreddits:
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- KarmaStore
|
||||
- promote
|
||||
- shamelessplug
|
||||
- upvote
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: 'Remove=> {{rules.newtube.totalCount}} activities in freekarma subs'
|
||||
- kind: remove
|
||||
enable: false
|
||||
- kind: comment
|
||||
enable: true
|
||||
content: >-
|
||||
Your submission has been removed because you have recent activity in
|
||||
'freekarma' subs
|
||||
distinguish: true
|
||||
dryRun: true
|
||||
35
docs/examples/subredditReady/freekarma.yaml
Normal file
35
docs/examples/subredditReady/freekarma.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
# Remove submissions from users who have recent activity in freekarma subs within the last 50 activities or 6 months (whichever is less)
|
||||
- name: freekarma removal
|
||||
description: Remove submission if user has used freekarma sub recently
|
||||
kind: submission
|
||||
itemIs:
|
||||
- removed: false
|
||||
condition: AND
|
||||
rules:
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
useSubmissionAsReference: false
|
||||
thresholds:
|
||||
- subreddits:
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- KarmaStore
|
||||
- upvote
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: 'Remove=> {{rules.newtube.totalCount}} activities in freekarma subs'
|
||||
- kind: remove
|
||||
enable: true
|
||||
- kind: comment
|
||||
enable: false
|
||||
content: >-
|
||||
Your submission has been removed because you have recent activity in
|
||||
'freekarma' subs
|
||||
distinguish: true
|
||||
71
docs/examples/subredditReady/selfPromo.yaml
Normal file
71
docs/examples/subredditReady/selfPromo.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
#
|
||||
# Stop users who make link submissions with a self-promotional agenda (with reddit's suggested 10% rule)
|
||||
# https://www.reddit.com/wiki/selfpromotion#wiki_guidelines_for_self-promotion_on_reddit
|
||||
#
|
||||
# Remove a SUBMISSION if the link comprises more than or equal to 10% of users history (100 activities or 6 months) OR
|
||||
#
|
||||
# if link comprises 10% of submission history (100 activities or 6 months)
|
||||
# AND less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
#
|
||||
- name: Self-promo all AND low engagement
|
||||
description: Self-promo is >10% for all or just sub and low comment engagement
|
||||
kind: submission
|
||||
condition: OR
|
||||
rules:
|
||||
- name: attr
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '>= 10%'
|
||||
window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
domains:
|
||||
- 'AGG:SELF'
|
||||
- condition: AND
|
||||
rules:
|
||||
- name: attrsub
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '>= 10%'
|
||||
thresholdOn: submissions
|
||||
window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
domains:
|
||||
- 'AGG:SELF'
|
||||
- name: lowOrOpComm
|
||||
kind: history
|
||||
criteriaJoin: OR
|
||||
criteria:
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: < 50%
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: '> 40% OP'
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: >-
|
||||
{{rules.attr.largestPercent}}{{rules.attrsub.largestPercent}} of
|
||||
{{rules.attr.activityTotal}}{{rules.attrsub.activityTotal}} items
|
||||
({{rules.attr.window}}{{rules.attrsub.window}}){{#rules.loworopcomm.thresholdSummary}}
|
||||
=>
|
||||
{{rules.loworopcomm.thresholdSummary}}{{/rules.loworopcomm.thresholdSummary}}
|
||||
- kind: remove
|
||||
enable: false
|
||||
- kind: comment
|
||||
enable: true
|
||||
content: >-
|
||||
Your submission has been removed it comprises 10% or more of your
|
||||
recent history
|
||||
({{rules.attr.largestPercent}}{{rules.attrsub.largestPercent}}). This
|
||||
is against [reddit's self promotional
|
||||
guidelines.](https://www.reddit.com/wiki/selfpromotion#wiki_guidelines_for_self-promotion_on_reddit)
|
||||
distinguish: true
|
||||
dryRun: true
|
||||
@@ -14,7 +14,7 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FUserNoteCr
|
||||
|
||||
### Examples
|
||||
|
||||
* [Do not tag user with Good User note](/docs/examples/userNotes/usernoteFilter.json5)
|
||||
* Do not tag user with Good User note [JSON](/docs/examples/userNotes/usernoteFilter.json5) | [YAML](/docs/examples/userNotes/usernoteFilter.yaml)
|
||||
|
||||
## Action
|
||||
|
||||
@@ -23,4 +23,4 @@ A User Note can also be added to the Author of a Submission or Comment with the
|
||||
|
||||
### Examples
|
||||
|
||||
* [Add note on user doing self promotion](/docs/examples/userNotes/usernoteSP.json5)
|
||||
* Add note on user doing self promotion [JSON](/docs/examples/userNotes/usernoteSP.json5) | [YAML](/docs/examples/userNotes/usernoteSP.yaml)
|
||||
|
||||
27
docs/examples/userNotes/usernoteFilter.yaml
Normal file
27
docs/examples/userNotes/usernoteFilter.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
checks:
|
||||
- name: Self Promo Activities
|
||||
description: Tag SP only if user does not have good contributor user note
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
author:
|
||||
exclude:
|
||||
# the key of the usernote type to look for https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
# rule will not run if current usernote on Author is of type 'gooduser'
|
||||
- type: gooduser
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
actions:
|
||||
- kind: usernote
|
||||
# the key of usernote type
|
||||
# https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
type: spamwarn
|
||||
# content is mustache templated
|
||||
content: >-
|
||||
Self Promotion: {{rules.attr10all.titlesDelim}}
|
||||
{{rules.attr10sub.largestPercent}}%
|
||||
23
docs/examples/userNotes/usernoteSP.yaml
Normal file
23
docs/examples/userNotes/usernoteSP.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
checks:
|
||||
- name: Self Promo Activities
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
description: >-
|
||||
Check if any of Author's aggregated submission origins are >10% of entire
|
||||
history
|
||||
kind: submission
|
||||
rules:
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
actions:
|
||||
- kind: usernote
|
||||
# the key of usernote type
|
||||
# https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
type: spamwarn
|
||||
content: >-
|
||||
Self Promotion: {{rules.attr10all.titlesDelim}}
|
||||
{{rules.attr10sub.largestPercent}}%
|
||||
@@ -14,8 +14,8 @@ This getting started guide is for **reddit moderators** -- that is, someone who
|
||||
|
||||
Before continuing with this guide you should first make sure you understand how a ContextMod works. Please review this documentation:
|
||||
|
||||
* [How It Works](/docs#how-it-works)
|
||||
* [Core Concepts](/docs#concepts)
|
||||
* [How It Works](/docs/README.md#how-it-works)
|
||||
* [Core Concepts](/docs/README.md#concepts)
|
||||
|
||||
# Choose A Bot
|
||||
|
||||
@@ -36,15 +36,16 @@ If the Operator has communicated that **you should add a bot they control as a m
|
||||
|
||||
___
|
||||
|
||||
Ensure that you are in communication with the **operator** for this bot. The bot **will not automatically accept a moderator invitation,** it must be manually done by the bot operator. This is an intentional barrier to ensure moderators and the operator are familiar with their respective needs and have some form of trust.
|
||||
Ensure that you are in communication with the **operator** of this bot. The bot **will only accept a moderator invitation if your subreddit has been whitelisted by the operator.** This is an intentional barrier to ensure moderators and the operator are familiar with their respective needs and have some form of trust.
|
||||
|
||||
Now invite the bot to moderate your subreddit. The bot should have at least these permissions:
|
||||
|
||||
* Manage Users
|
||||
* Manage Posts and Comments
|
||||
* Manage Flair
|
||||
|
||||
Additionally, the bot must have the **Manage Wiki Pages** permission if you plan to use [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes). If you are not planning on using this feature and do not want the bot to have this permission then you **must** ensure the bot has visibility to the configuration wiki page (detailed below).
|
||||
* Manage Wiki Pages
|
||||
* Required to read the moderator-only visible wiki page used to configure the bot
|
||||
* Required to read/write to [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes)
|
||||
|
||||
## Bring Your Own Bot (BYOB)
|
||||
|
||||
@@ -60,7 +61,7 @@ If the operator has communicated that **they want to use a bot you control** thi
|
||||
|
||||
**Cons:**
|
||||
|
||||
* More setup required for both moderators and operators
|
||||
* You must have access to the credentials for the reddit account (bot)
|
||||
|
||||
___
|
||||
|
||||
@@ -72,15 +73,28 @@ Review the information shown on the invite link webpage and then follow the dire
|
||||
|
||||
# Configuring the Bot
|
||||
|
||||
The bot's behavior is defined using a configuration, like automoderator, that is stored in the **wiki** of each subreddit it moderates.
|
||||
|
||||
The default location for this page is at `https://old.reddit.com/r/YOURSUBERDDIT/wiki/botconfig/contextbot`
|
||||
|
||||
## Setup wiki page
|
||||
|
||||
The bot automatically tries to create its configuration wiki page. You can find the result of this in the log for your subreddit in the web interface.
|
||||
|
||||
If this fails for some reason you can create the wiki page through the web interface by navigating to your subreddit's tab, opening the [built-in editor (click **View**)](/docs/screenshots/configBox.png), and following the directions in **Create configuration for...** link found there.
|
||||
|
||||
If neither of the above approaches work, or you do not wish to use the web interface, expand the section below for directions on how to manually setup the wiki page:
|
||||
|
||||
<details>
|
||||
|
||||
* Visit the wiki page of the subreddit you want the bot to moderate
|
||||
* The default location the bot checks for a configuration is at `https://old.reddit.com/r/YOURSUBERDDIT/wiki/botconfig/contextbot`
|
||||
* If the page does not exist create it
|
||||
* Ensure the wiki page visibility is restricted
|
||||
* On the wiki page click **settings** (**Page settings** in new reddit)
|
||||
* Check the box for **Only mods may edit and view** and then **save**
|
||||
* Alternatively, if you did not give the bot the **Manage Wiki Pages** permission then add it to the **allow users to edit page** setting
|
||||
|
||||
</details>
|
||||
|
||||
## Procure a configuration
|
||||
|
||||
@@ -94,25 +108,46 @@ Visit the [Examples](https://github.com/FoxxMD/context-mod/tree/master/docs/exam
|
||||
|
||||
After you have found a configuration to use as a starting point:
|
||||
|
||||
* In a new tab open the github page for the configuration you want ([example](/docs/examples/repeatActivity/crosspostSpamming.json5))
|
||||
* Click the **Raw** button, then select all and copy all of the text to your clipboard.
|
||||
* Copy the URL for the configuration file EX `https://github.com/FoxxMD/context-mod/blob/master/docs/examples/subredditReady/freekarma.json5` and either:
|
||||
* (Easiest) **Load** it into your [subreddit's built-in editor](#using-the-built-in-editor) and **Save**
|
||||
* or on the file's page, click the **Raw** button, select all and copy to your clipboard, and [manually save to your wiki page](#manually-saving)
|
||||
|
||||
### Build Your Own Config
|
||||
|
||||
Additionally, you can use [this schema editor](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) to build your configuration. The editor features a ton of handy features:
|
||||
CM comes equipped with a [configuration explorer](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) to help you see all available options, with descriptions and examples, that can be used in your configuration.
|
||||
|
||||
* fully annotated configuration data/structure
|
||||
* generated examples in json/yaml
|
||||
* built-in editor that automatically validates your config
|
||||
To create or edit a configuration you should use **CM's buit-in editor** which features:
|
||||
* syntax validation and formatting
|
||||
* full configuration validation with error highlighting, hints, and fixes
|
||||
* hover over properties to see documentation and examples
|
||||
|
||||
PROTIP: Find an example config to use as a starting point and then build on it using the editor.
|
||||
To use the editor either:
|
||||
* [use your subreddit's built-in editor](#using-the-built-in-editor)
|
||||
* or use the public editor at https://cm.foxxmd.dev/config
|
||||
|
||||
PROTIP: Find an [example config](#using-an-example-config) to use as a starting point and then build on it using the editor.
|
||||
|
||||
## Saving Your Configuration
|
||||
|
||||
* Open the wiki page you created in the [previous step](#setup-wiki-page) and click **edit**
|
||||
### Using the built-in Editor
|
||||
|
||||
In the web interface each subreddit's tab has access to the built-in editor. Use this built-in editor to automatically create, load, or save the configuration for that subreddit's wiki.
|
||||
|
||||
* Visit the tab for the subreddit you want to edit the configuration of
|
||||
* Open the [built-in editor by click **View**](/docs/screenshots/configBox.png)
|
||||
* Edit your configuration
|
||||
* Follow the directions on the **Save to r/..** link found at the top of the editor to automatically save your configuration
|
||||
|
||||
### Manually Saving
|
||||
|
||||
<details>
|
||||
|
||||
* Open the wiki page you created in the [wiki setup step](#setup-wiki-page) and click **edit**
|
||||
* Copy-paste your configuration into the wiki text box
|
||||
* Save the edited wiki page
|
||||
|
||||
</details>
|
||||
|
||||
___
|
||||
|
||||
The bot automatically checks for new configurations on your wiki page every 5 minutes. If your operator has the web interface accessible you may login there and force the config to update on your subreddit.
|
||||
|
||||
@@ -50,6 +50,18 @@ tsc -p .
|
||||
### [Heroku Quick Deploy](https://heroku.com/about)
|
||||
[](https://dashboard.heroku.com/new?template=https://github.com/FoxxMD/context-mod)
|
||||
|
||||
This template provides a **web** and **worker** dyno for heroku.
|
||||
|
||||
* **Web** -- Will run the bot **and** the web interface for ContextMod.
|
||||
* **Worker** -- Will run **just** the bot.
|
||||
|
||||
Be aware that Heroku's [free dyno plan](https://devcenter.heroku.com/articles/free-dyno-hours#dyno-sleeping) enacts some limits:
|
||||
|
||||
* A **Web** dyno will go to sleep (pause) after 30 minutes without web activity -- so your bot will ALSO go to sleep at this time
|
||||
* The **Worker** dyno **will not** go to sleep but you will NOT be able to access the web interface. You can, however, still see how Cm is running by reading the logs for the dyno.
|
||||
|
||||
If you want to use a free dyno it is recommended you perform first-time setup (bot authentication and configuration, testing, etc...) with the **Web** dyno, then SWITCH to a **Worker** dyno so it can run 24/7.
|
||||
|
||||
# Bot Authentication
|
||||
|
||||
Next you need to create a bot and authenticate it with Reddit. Follow the [bot authentication guide](/docs/botAuthentication.md) to complete this step.
|
||||
|
||||
237
docs/imageComparison.md
Normal file
237
docs/imageComparison.md
Normal file
@@ -0,0 +1,237 @@
|
||||
# Overview
|
||||
|
||||
ContextMod supports comparing image content, for the purpose of detecting duplicates, with two different but complimentary systems. Image comparison behavior is available for the following rules:
|
||||
|
||||
* [Recent Activity](/docs/examples/recentActivity)
|
||||
* Repeat Activity (In-progress)
|
||||
|
||||
To enable comparisons reference the example below (at the top-level of your rule) and configure as needed:
|
||||
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"name": "ruleWithImageDetection",
|
||||
"kind": "recentActivity",
|
||||
// Add block below...
|
||||
//
|
||||
"imageDetection": {
|
||||
// enables image comparison
|
||||
"enable": true,
|
||||
// The difference, in percentage, between the reference submission and the submissions being checked
|
||||
// must be less than this number to consider the images "the same"
|
||||
"threshold": 5,
|
||||
// optional
|
||||
// set the behavior for determining if image comparison should occur on a URL:
|
||||
//
|
||||
// "extension" => try image detection if URL ends in a known image extension (jpeg, gif, png, bmp, etc.)
|
||||
// "unknown" => try image detection if URL ends in known image extension OR there is no extension OR the extension is unknown (not video, html, doc, etc...)
|
||||
// "all" => ALWAYS try image detection, regardless of URL extension
|
||||
//
|
||||
// if fetchBehavior is not defined then "extension" is the default
|
||||
"fetchBehavior": "extension",
|
||||
},
|
||||
//
|
||||
// And above ^^^
|
||||
//...
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
name: ruleWithImageDetection
|
||||
kind: recentActivity
|
||||
enable: true
|
||||
threshold: 5
|
||||
fetchBehavior: extension
|
||||
|
||||
```
|
||||
|
||||
**Perceptual Hashing** (`hash`) and **Pixel Comparisons** (`pixel`) may be used at the same time. Refer to the documentation below to see how they interact.
|
||||
|
||||
**Note:** Regardless of `fetchBehavior`, if the response from the URL does not indicate it is an image then image detection will not occur. IE Response `Content-Type` must contain `image`
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Both image comparison systems require [Sharp](https://sharp.pixelplumbing.com/) as a dependency. Most modern operating systems running Node.js >= 12.13.0 do not require installing additional dependencies in order to use Sharp.
|
||||
|
||||
If you are using the docker image for ContextMod (`foxxmd/context-mod`) Sharp is built-in.
|
||||
|
||||
If you are installing ContextMod using npm then **Sharp should be installed automatically as an optional dependency.**
|
||||
|
||||
**If you do not want to install it automatically** install ContextMod with the following command:
|
||||
|
||||
```
|
||||
npm install --no-optional
|
||||
```
|
||||
|
||||
If you are using ContextMod as part of a larger project you may want to require Sharp in your own package:
|
||||
|
||||
```
|
||||
npm install sharp@0.29.1 --save
|
||||
```
|
||||
|
||||
# Comparison Systems
|
||||
|
||||
## Perceptual Hashing
|
||||
|
||||
[Perceptual Hashing](https://en.wikipedia.org/wiki/Perceptual_hashing) creates a text fingerprint of an image by:
|
||||
|
||||
* Dividing up the image into a grid
|
||||
* Using an algorithm to derive a value from the pixels in each grid
|
||||
* Adding up all the values to create a unique string (the "fingerprint")
|
||||
|
||||
An example of how a perceptual hash can work [can be found here.](https://www.hackerfactor.com/blog/?/archives/432-Looks-Like-It.html)
|
||||
|
||||
ContextMod uses [blockhash-js](https://github.com/commonsmachinery/blockhash-js) which is a javascript implementation of the algorithm described in the paper [Block Mean Value Based Image Perceptual Hashing by Bian Yang, Fan Gu and Xiamu Niu.](https://ieeexplore.ieee.org/document/4041692)
|
||||
|
||||
|
||||
**Advantages**
|
||||
|
||||
* Low memory requirements and not CPU intensive
|
||||
* Does not require any image transformations
|
||||
* Hash results can be stored to make future comparisons even faster and skip downloading images (cached by url)
|
||||
* Resolution-independent
|
||||
|
||||
**Disadvantages**
|
||||
|
||||
* Hash is weak when image differences are based only on color
|
||||
* Hash is weak when image contains lots of text
|
||||
* Higher accuracy requires larger calculation (more bits required)
|
||||
|
||||
**When should I use it?**
|
||||
|
||||
* General duplicate detection
|
||||
* Comparing many images
|
||||
* Comparing the same images often
|
||||
|
||||
### How To Use
|
||||
|
||||
If `imageDetection.enable` is `true` then hashing is enabled by default and no further configuration is required.
|
||||
|
||||
To further configure hashing refer to this code block:
|
||||
|
||||
```json5
|
||||
{
|
||||
"name": "ruleWithImageDetectionAndConfiguredHashing",
|
||||
"kind": "recentActivity",
|
||||
"imageDetection": {
|
||||
"enable": true,
|
||||
// Add block below...
|
||||
//
|
||||
"hash": {
|
||||
// enable or disable hash comparisons (enabled by default)
|
||||
"enable": true,
|
||||
// determines accuracy of hash and granularity of hash comparison (comparison to other hashes)
|
||||
// the higher the bits the more accurate the comparison
|
||||
//
|
||||
// NOTE: Hashes of different sizes (bits) cannot be compared. If you are caching hashes make sure all rules where results may be shared use the same bit count to ensure hashes can be compared. Otherwise hashes will be recomputed.
|
||||
"bits": 32,
|
||||
// default is 32 if not defined
|
||||
//
|
||||
// number of seconds to cache an image hash
|
||||
"ttl": 60,
|
||||
// default is 60 if not defined
|
||||
//
|
||||
// "High Confidence" Threshold
|
||||
// If the difference in comparison is equal to or less than this number the images are considered the same and pixel comparison WILL NOT occur
|
||||
//
|
||||
// Defaults to the parent-level `threshold` value if not present
|
||||
//
|
||||
// Use null if you want pixel comparison to ALWAYS occur (softThreshold must be present)
|
||||
"hardThreshold": 5,
|
||||
//
|
||||
// "Low Confidence" Threshold -- only used if `pixel` is enabled
|
||||
// If the difference in comparison is:
|
||||
//
|
||||
// 1) equal to or less than this value and
|
||||
// 2) the value is greater than `hardThreshold`
|
||||
//
|
||||
// the images will be compared using the `pixel` method
|
||||
"softThreshold": 0,
|
||||
},
|
||||
//
|
||||
// And above ^^^
|
||||
//"pixel": {...}
|
||||
}
|
||||
//...
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
name: ruleWithImageDetectionAndConfiguredHashing
|
||||
kind: recentActivity
|
||||
imageDetection:
|
||||
enable: true
|
||||
hash:
|
||||
enable: true
|
||||
bits: 32
|
||||
ttl: 60
|
||||
hardThreshold: 5
|
||||
softThreshold: 0
|
||||
```
|
||||
|
||||
## Pixel Comparison
|
||||
|
||||
This approach is as straight forward as it sounds. Both images are compared, pixel by pixel, to determine the difference between the two. ContextMod uses [pixelmatch](https://github.com/mapbox/pixelmatch) to do the comparison.
|
||||
|
||||
**Advantages**
|
||||
|
||||
* Extremely accurate, high-confidence on difference percentage
|
||||
* Strong when comparing text-based images or color-only differences
|
||||
|
||||
**Disadvantages**
|
||||
|
||||
* High memory requirements (10-30MB per comparison) and CPU intensive
|
||||
* Weak against similar images with different aspect ratios
|
||||
* Requires image transformations (resize, crop) before comparison
|
||||
* Can only store image-to-image results (no single image fingerprints)
|
||||
|
||||
**When should I use it?**
|
||||
|
||||
* Require very high accuracy in comparison results
|
||||
* Comparing mostly text-based images or subtle color/detail differences
|
||||
* As a secondary, high-confidence confirmation of comparison result after hashing
|
||||
|
||||
### How To Use
|
||||
|
||||
By default pixel comparisons **are not enabled.** They must be explicitly enabled in configuration.
|
||||
|
||||
Pixel comparisons will be performed in either of these scenarios:
|
||||
|
||||
* pixel is enabled, hashing is enabled and `hash.softThreshold` is defined
|
||||
* When a comparison occurs that is less different than `softThreshold` but more different then `hardThreshold` (or `"hardThreshold": null`), then pixel comparison will occur as a high-confidence check
|
||||
* Example
|
||||
* hash comparison => 7% difference
|
||||
* `"softThreshold": 10`
|
||||
* `"hardThreshold": 4`
|
||||
* `hash.enable` is `false` and `pixel.enable` is true
|
||||
* hashing is skipped entirely and only pixel comparisons are performed
|
||||
|
||||
To configure pixel comparisons refer to this code block:
|
||||
|
||||
```json5
|
||||
{
|
||||
"name": "ruleWithImageDetectionAndPixelEnabled",
|
||||
"kind": "recentActivity",
|
||||
"imageDetection": {
|
||||
//"hash": {...}
|
||||
"pixel": {
|
||||
// enable or disable pixel comparisons (disabled by default)
|
||||
"enable": true,
|
||||
// if the comparison difference percentage is equal to or less than this value the images are considered the same
|
||||
//
|
||||
// if not defined the value from imageDetection.threshold will be used
|
||||
"threshold": 5
|
||||
}
|
||||
},
|
||||
//...
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
name: ruleWithImageDetectionAndPixelEnabled
|
||||
kind: recentActivity
|
||||
imageDetection:
|
||||
pixel:
|
||||
enable: true
|
||||
threshold: 5
|
||||
```
|
||||
@@ -121,6 +121,16 @@ Below are examples of the minimum required config to run the application using a
|
||||
Using **FILE**
|
||||
<details>
|
||||
|
||||
YAML
|
||||
```yaml
|
||||
bots:
|
||||
- credentials:
|
||||
clientId: f4b4df1c7b2
|
||||
clientSecret: 34v5q1c56ub
|
||||
refreshToken: 34_f1w1v4
|
||||
accessToken: p75_1c467b2
|
||||
```
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"bots": [
|
||||
@@ -175,6 +185,11 @@ An example of using multiple configuration levels together IE all are provided t
|
||||
}
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
logging:
|
||||
level: debug
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
@@ -220,6 +235,30 @@ See the [Architecture Docs](/docs/serverClientArchitecture.md) for more informat
|
||||
|
||||
<details>
|
||||
|
||||
YAML
|
||||
```yaml
|
||||
bots:
|
||||
- credentials:
|
||||
clientId: f4b4df1c7b2
|
||||
clientSecret: 34v5q1c56ub
|
||||
refreshToken: 34_f1w1v4
|
||||
accessToken: p75_1c467b2
|
||||
web:
|
||||
credentials:
|
||||
clientId: f4b4df1c7b2
|
||||
clientSecret: 34v5q1c56ub
|
||||
redirectUri: 'http://localhost:8085/callback'
|
||||
clients:
|
||||
# server application running on this same CM instance
|
||||
- host: 'localhost:8095'
|
||||
secret: localSecret
|
||||
# a server application running somewhere else
|
||||
- host: 'mySecondContextMod.com:8095'
|
||||
secret: anotherSecret
|
||||
api:
|
||||
secret: localSecret
|
||||
```
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"bots": [
|
||||
@@ -289,3 +328,14 @@ A caching object in the json configuration:
|
||||
}
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
provider:
|
||||
store: memory
|
||||
ttl: 60
|
||||
max: 500
|
||||
host: localhost
|
||||
port: 6379
|
||||
auth_pass: null
|
||||
db: 0
|
||||
```
|
||||
|
||||
BIN
docs/screenshots/configBox.png
Normal file
BIN
docs/screenshots/configBox.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 45 KiB |
29
heroku.Dockerfile
Normal file
29
heroku.Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM node:16-alpine3.14
|
||||
|
||||
ENV TZ=Etc/GMT
|
||||
|
||||
# vips required to run sharp library for image comparison
|
||||
RUN echo "http://dl-4.alpinelinux.org/alpine/v3.14/community" >> /etc/apk/repositories \
|
||||
&& apk --update add vips
|
||||
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
WORKDIR /usr/app
|
||||
|
||||
COPY package*.json ./
|
||||
COPY tsconfig.json .
|
||||
|
||||
RUN npm install
|
||||
|
||||
ADD . /usr/app
|
||||
|
||||
RUN npm run build
|
||||
|
||||
ENV NPM_CONFIG_LOGLEVEL debug
|
||||
|
||||
ARG log_dir=/home/node/logs
|
||||
RUN mkdir -p $log_dir
|
||||
VOLUME $log_dir
|
||||
ENV LOG_DIR=$log_dir
|
||||
|
||||
CMD [ "node", "src/index.js", "run", "all", "--port $PORT"]
|
||||
@@ -1,3 +1,4 @@
|
||||
build:
|
||||
docker:
|
||||
worker: Dockerfile
|
||||
web: heroku.Dockerfile
|
||||
worker: heroku.Dockerfile
|
||||
|
||||
4312
package-lock.json
generated
4312
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
18
package.json
18
package.json
@@ -7,7 +7,6 @@
|
||||
"test": "echo \"Error: no tests installed\" && exit 1",
|
||||
"build": "tsc",
|
||||
"start": "node src/index.js run",
|
||||
"guard": "ts-auto-guard src/JsonConfig.ts",
|
||||
"schema": "npm run -s schema-app & npm run -s schema-ruleset & npm run -s schema-rule & npm run -s schema-action & npm run -s schema-config",
|
||||
"schema-app": "typescript-json-schema tsconfig.json JSONConfig --out src/Schema/App.json --required --tsNodeRegister --refs",
|
||||
"schema-ruleset": "typescript-json-schema tsconfig.json RuleSetJson --out src/Schema/RuleSet.json --required --tsNodeRegister --refs",
|
||||
@@ -26,8 +25,10 @@
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@awaitjs/express": "^0.8.0",
|
||||
"@googleapis/youtube": "^2.0.0",
|
||||
"@stdlib/regexp-regexp": "^0.0.6",
|
||||
"ajv": "^7.2.4",
|
||||
"ansi-regex": ">=5.0.1",
|
||||
"async": "^3.2.0",
|
||||
"autolinker": "^3.14.3",
|
||||
"body-parser": "^1.19.0",
|
||||
@@ -50,30 +51,34 @@
|
||||
"got": "^11.8.2",
|
||||
"he": "^1.2.0",
|
||||
"http-proxy": "^1.18.1",
|
||||
"image-size": "^1.0.0",
|
||||
"js-yaml": "^4.1.0",
|
||||
"json5": "^2.2.0",
|
||||
"jsonwebtoken": "^8.5.1",
|
||||
"leven": "^3.1.0",
|
||||
"lodash": "^4.17.21",
|
||||
"lru-cache": "^6.0.0",
|
||||
"monaco-editor": "^0.27.0",
|
||||
"mustache": "^4.2.0",
|
||||
"node-fetch": "^2.6.1",
|
||||
"normalize-url": "^6.1.0",
|
||||
"object-hash": "^2.2.0",
|
||||
"p-event": "^4.2.0",
|
||||
"p-map": "^4.0.0",
|
||||
"passport": "^0.4.1",
|
||||
"passport-custom": "^1.1.1",
|
||||
"passport-jwt": "^4.0.0",
|
||||
"pixelmatch": "^5.2.1",
|
||||
"pretty-print-json": "^1.0.3",
|
||||
"safe-stable-stringify": "^1.1.1",
|
||||
"snoostorm": "^1.5.2",
|
||||
"snoowrap": "^1.23.0",
|
||||
"socket.io": "^4.1.3",
|
||||
"string-similarity": "^4.0.4",
|
||||
"tcp-port-used": "^1.0.2",
|
||||
"triple-beam": "^1.3.0",
|
||||
"typescript": "^4.3.4",
|
||||
"webhook-discord": "^3.7.7",
|
||||
"winston": "FoxxMD/winston#fbab8de969ecee578981c77846156c7f43b5f01e",
|
||||
"winston": "github:FoxxMD/winston#fbab8de969ecee578981c77846156c7f43b5f01e",
|
||||
"winston-daily-rotate-file": "^4.5.5",
|
||||
"winston-duplex": "^0.1.1",
|
||||
"winston-transport": "^4.4.0",
|
||||
@@ -101,10 +106,15 @@
|
||||
"@types/object-hash": "^2.1.0",
|
||||
"@types/passport": "^1.0.7",
|
||||
"@types/passport-jwt": "^3.0.6",
|
||||
"@types/pixelmatch": "^5.2.4",
|
||||
"@types/sharp": "^0.29.2",
|
||||
"@types/string-similarity": "^4.0.0",
|
||||
"@types/tcp-port-used": "^1.0.0",
|
||||
"@types/triple-beam": "^1.3.2",
|
||||
"ts-auto-guard": "*",
|
||||
"ts-json-schema-generator": "^0.93.0",
|
||||
"typescript-json-schema": "^0.50.1"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"sharp": "^0.29.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,10 +10,11 @@ import ApproveAction, {ApproveActionConfig} from "./ApproveAction";
|
||||
import BanAction, {BanActionJson} from "./BanAction";
|
||||
import {MessageAction, MessageActionJson} from "./MessageAction";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import Snoowrap from "snoowrap";
|
||||
import {UserFlairAction, UserFlairActionJson} from './UserFlairAction';
|
||||
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
|
||||
|
||||
export function actionFactory
|
||||
(config: ActionJson, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Action {
|
||||
(config: ActionJson, logger: Logger, subredditName: string, resources: SubredditResources, client: ExtendedSnoowrap): Action {
|
||||
switch (config.kind) {
|
||||
case 'comment':
|
||||
return new CommentAction({...config as CommentActionJson, logger, subredditName, resources, client});
|
||||
@@ -25,6 +26,8 @@ export function actionFactory
|
||||
return new ReportAction({...config as ReportActionJson, logger, subredditName, resources, client});
|
||||
case 'flair':
|
||||
return new FlairAction({...config as FlairActionJson, logger, subredditName, resources, client});
|
||||
case 'userflair':
|
||||
return new UserFlairAction({...config as UserFlairActionJson, logger, subredditName, resources, client});
|
||||
case 'approve':
|
||||
return new ApproveAction({...config as ApproveActionConfig, logger, subredditName, resources, client});
|
||||
case 'usernote':
|
||||
|
||||
@@ -11,6 +11,7 @@ export class ApproveAction extends Action {
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const touchedEntities = [];
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
if (item.approved) {
|
||||
@@ -23,11 +24,12 @@ export class ApproveAction extends Action {
|
||||
}
|
||||
if (!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.approve();
|
||||
touchedEntities.push(await item.approve());
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
touchedEntities
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ export class BanAction extends Action {
|
||||
const renderedBody = content === undefined ? undefined : await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
const renderedContent = renderedBody === undefined ? undefined : `${renderedBody}${await this.resources.generateFooter(item, this.footer)}`;
|
||||
|
||||
const touchedEntities = [];
|
||||
let banPieces = [];
|
||||
banPieces.push(`Message: ${renderedContent === undefined ? 'None' : `${renderedContent.length > 100 ? `\r\n${renderedContent}` : renderedContent}`}`);
|
||||
banPieces.push(`Reason: ${this.reason || 'None'}`);
|
||||
@@ -50,18 +51,20 @@ export class BanAction extends Action {
|
||||
// @ts-ignore
|
||||
const fetchedSub = await item.subreddit.fetch();
|
||||
const fetchedName = await item.author.name;
|
||||
await fetchedSub.banUser({
|
||||
const bannedUser = await fetchedSub.banUser({
|
||||
name: fetchedName,
|
||||
banMessage: renderedContent === undefined ? undefined : renderedContent,
|
||||
banReason: this.reason,
|
||||
banNote: this.note,
|
||||
duration: this.duration
|
||||
});
|
||||
touchedEntities.push(bannedUser);
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: `Banned ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`
|
||||
result: `Banned ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`,
|
||||
touchedEntities
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,16 +51,19 @@ export class CommentAction extends Action {
|
||||
result: 'Cannot comment because Item is archived'
|
||||
};
|
||||
}
|
||||
const touchedEntities = [];
|
||||
let reply: Comment;
|
||||
if(!dryRun) {
|
||||
// @ts-ignore
|
||||
reply = await item.reply(renderedContent);
|
||||
touchedEntities.push(reply);
|
||||
}
|
||||
if (this.lock) {
|
||||
if (!dryRun) {
|
||||
// snoopwrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
await item.lock();
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
}
|
||||
if (this.distinguish && !dryRun) {
|
||||
@@ -78,7 +81,8 @@ export class CommentAction extends Action {
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: `${modifierStr}${this.lock ? ' - Locked Author\'s Activity - ' : ''}${truncateStringToLength(100)(body)}`
|
||||
result: `${modifierStr}${this.lock ? ' - Locked Author\'s Activity - ' : ''}${truncateStringToLength(100)(body)}`,
|
||||
touchedEntities,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ export class LockAction extends Action {
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const touchedEntities = [];
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
if (item.locked) {
|
||||
@@ -25,10 +26,12 @@ export class LockAction extends Action {
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
await item.lock();
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true
|
||||
success: true,
|
||||
touchedEntities
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,13 +58,13 @@ export class MessageAction extends Action {
|
||||
if(this.to !== undefined) {
|
||||
// parse to value
|
||||
try {
|
||||
const entityData = parseRedditEntity(this.to);
|
||||
const entityData = parseRedditEntity(this.to, 'user');
|
||||
if(entityData.type === 'user') {
|
||||
recipient = entityData.name;
|
||||
} else {
|
||||
recipient = `/r/${entityData.name}`;
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error(`'to' field for message was not in a valid format. See ${REDDIT_ENTITY_REGEX_URL} for valid examples`);
|
||||
this.logger.error(err);
|
||||
err.logged = true;
|
||||
|
||||
@@ -12,6 +12,7 @@ export class RemoveAction extends Action {
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const touchedEntities = [];
|
||||
// issue with snoowrap typings, doesn't think prop exists on Submission
|
||||
// @ts-ignore
|
||||
if (activityIsRemoved(item)) {
|
||||
@@ -24,11 +25,13 @@ export class RemoveAction extends Action {
|
||||
if (!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.remove();
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
touchedEntities
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,15 +29,20 @@ export class ReportAction extends Action {
|
||||
const renderedContent = await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
this.logger.verbose(`Contents:\r\n${renderedContent}`);
|
||||
const truncatedContent = reportTrunc(renderedContent);
|
||||
const touchedEntities = [];
|
||||
if(!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.report({reason: truncatedContent});
|
||||
// due to reddit not updating this in response (maybe)?? just increment stale activity
|
||||
item.num_reports++;
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: truncatedContent
|
||||
result: truncatedContent,
|
||||
touchedEntities
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,23 @@
|
||||
import {SubmissionActionConfig} from "./index";
|
||||
import Action, {ActionJson, ActionOptions} from "../index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {RuleResult} from "../../Rule";
|
||||
import {ActionProcessResult} from "../../Common/interfaces";
|
||||
import Submission from 'snoowrap/dist/objects/Submission';
|
||||
import Comment from 'snoowrap/dist/objects/Comment';
|
||||
|
||||
export class FlairAction extends Action {
|
||||
text: string;
|
||||
css: string;
|
||||
flair_template_id: string;
|
||||
|
||||
constructor(options: FlairActionOptions) {
|
||||
super(options);
|
||||
if (options.text === undefined && options.css === undefined) {
|
||||
throw new Error('Must define either text or css on FlairAction');
|
||||
if (options.text === undefined && options.css === undefined && options.flair_template_id === undefined) {
|
||||
throw new Error('Must define either text+css or flair_template_id on FlairAction');
|
||||
}
|
||||
this.text = options.text || '';
|
||||
this.css = options.css || '';
|
||||
this.flair_template_id = options.flair_template_id || '';
|
||||
}
|
||||
|
||||
getKind() {
|
||||
@@ -34,8 +37,12 @@ export class FlairAction extends Action {
|
||||
this.logger.verbose(flairSummary);
|
||||
if (item instanceof Submission) {
|
||||
if(!this.dryRun) {
|
||||
// @ts-ignore
|
||||
await item.assignFlair({text: this.text, cssClass: this.css})
|
||||
if (this.flair_template_id) {
|
||||
await item.selectFlair({flair_template_id: this.flair_template_id}).then(() => {});
|
||||
} else {
|
||||
await item.assignFlair({text: this.text, cssClass: this.css}).then(() => {});
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
this.logger.warn('Cannot flair Comment');
|
||||
@@ -60,12 +67,16 @@ export class FlairAction extends Action {
|
||||
export interface FlairActionConfig extends SubmissionActionConfig {
|
||||
/**
|
||||
* The text of the flair to apply
|
||||
* */
|
||||
* */
|
||||
text?: string,
|
||||
/**
|
||||
* The text of the css class of the flair to apply
|
||||
* */
|
||||
css?: string,
|
||||
/**
|
||||
* Flair template ID to assign
|
||||
* */
|
||||
flair_template_id?: string,
|
||||
}
|
||||
|
||||
export interface FlairActionOptions extends FlairActionConfig,ActionOptions {
|
||||
@@ -76,5 +87,5 @@ export interface FlairActionOptions extends FlairActionConfig,ActionOptions {
|
||||
* Flair the Submission
|
||||
* */
|
||||
export interface FlairActionJson extends FlairActionConfig, ActionJson {
|
||||
kind: 'flair'
|
||||
kind: 'flair'
|
||||
}
|
||||
|
||||
109
src/Action/UserFlairAction.ts
Normal file
109
src/Action/UserFlairAction.ts
Normal file
@@ -0,0 +1,109 @@
|
||||
import Action, {ActionConfig, ActionJson, ActionOptions} from './index';
|
||||
import {Comment, RedditUser, Submission} from 'snoowrap';
|
||||
import {RuleResult} from '../Rule';
|
||||
import {ActionProcessResult} from '../Common/interfaces';
|
||||
|
||||
export class UserFlairAction extends Action {
|
||||
text?: string;
|
||||
css?: string;
|
||||
flair_template_id?: string;
|
||||
|
||||
constructor(options: UserFlairActionOptions) {
|
||||
super(options);
|
||||
|
||||
this.text = options.text === null || options.text === '' ? undefined : options.text;
|
||||
this.css = options.css === null || options.text === '' ? undefined : options.text;
|
||||
this.flair_template_id = options.flair_template_id === null || options.flair_template_id === '' ? undefined : options.flair_template_id;
|
||||
}
|
||||
|
||||
getKind() {
|
||||
return 'User Flair';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
let flairParts = [];
|
||||
|
||||
if (this.flair_template_id !== undefined) {
|
||||
flairParts.push(`Flair template ID: ${this.flair_template_id}`)
|
||||
if(this.text !== undefined || this.css !== undefined) {
|
||||
this.logger.warn('Text/CSS properties will be ignored since a flair template is specified');
|
||||
}
|
||||
} else {
|
||||
if (this.text !== undefined) {
|
||||
flairParts.push(`Text: ${this.text}`);
|
||||
}
|
||||
if (this.css !== undefined) {
|
||||
flairParts.push(`CSS: ${this.css}`);
|
||||
}
|
||||
}
|
||||
|
||||
const flairSummary = flairParts.length === 0 ? 'Unflair user' : flairParts.join(' | ');
|
||||
this.logger.verbose(flairSummary);
|
||||
|
||||
if (!this.dryRun) {
|
||||
if (this.flair_template_id !== undefined) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.assignUserFlairByTemplateId({
|
||||
subredditName: item.subreddit.display_name,
|
||||
flairTemplateId: this.flair_template_id,
|
||||
username: item.author.name,
|
||||
});
|
||||
} catch (err: any) {
|
||||
this.logger.error('Either the flair template ID is incorrect or you do not have permission to access it.');
|
||||
throw err;
|
||||
}
|
||||
} else if (this.text === undefined && this.css === undefined) {
|
||||
// @ts-ignore
|
||||
await item.subreddit.deleteUserFlair(item.author.name);
|
||||
} else {
|
||||
// @ts-ignore
|
||||
await item.author.assignFlair({
|
||||
subredditName: item.subreddit.display_name,
|
||||
cssClass: this.css,
|
||||
text: this.text,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: flairSummary,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flair the Author of an Activity
|
||||
*
|
||||
* Leave all properties blank or null to remove a User's existing flair
|
||||
* */
|
||||
export interface UserFlairActionConfig extends ActionConfig {
|
||||
/**
|
||||
* The text of the flair to apply
|
||||
* */
|
||||
text?: string,
|
||||
/**
|
||||
* The text of the css class of the flair to apply
|
||||
* */
|
||||
css?: string,
|
||||
/**
|
||||
* Flair template to pick.
|
||||
*
|
||||
* **Note:** If this template is used text/css are ignored
|
||||
* */
|
||||
flair_template_id?: string;
|
||||
}
|
||||
|
||||
export interface UserFlairActionOptions extends UserFlairActionConfig, ActionOptions {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Flair the Submission
|
||||
* */
|
||||
export interface UserFlairActionJson extends UserFlairActionConfig, ActionJson {
|
||||
kind: 'userflair'
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {Comment, Submission} from "snoowrap";
|
||||
import {Logger} from "winston";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
@@ -6,12 +6,13 @@ import {ActionProcessResult, ActionResult, ChecksActivityState, TypedActivitySta
|
||||
import Author, {AuthorOptions} from "../Author/Author";
|
||||
import {mergeArr} from "../util";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
|
||||
|
||||
export abstract class Action {
|
||||
name?: string;
|
||||
logger: Logger;
|
||||
resources: SubredditResources;
|
||||
client: Snoowrap
|
||||
client: ExtendedSnoowrap;
|
||||
authorIs: AuthorOptions;
|
||||
itemIs: TypedActivityStates;
|
||||
dryRun: boolean;
|
||||
@@ -98,7 +99,7 @@ export abstract class Action {
|
||||
actRes.run = true;
|
||||
const results = await this.process(item, ruleResults, runtimeDryrun);
|
||||
return {...actRes, ...results};
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(!(err instanceof LoggedError)) {
|
||||
this.logger.error(`Encountered error while running`, err);
|
||||
}
|
||||
@@ -114,8 +115,8 @@ export abstract class Action {
|
||||
export interface ActionOptions extends ActionConfig {
|
||||
logger: Logger;
|
||||
subredditName: string;
|
||||
resources: SubredditResources
|
||||
client: Snoowrap
|
||||
resources: SubredditResources;
|
||||
client: ExtendedSnoowrap;
|
||||
}
|
||||
|
||||
export interface ActionConfig extends ChecksActivityState {
|
||||
@@ -162,7 +163,7 @@ export interface ActionJson extends ActionConfig {
|
||||
/**
|
||||
* The type of action that will be performed
|
||||
*/
|
||||
kind: 'comment' | 'lock' | 'remove' | 'report' | 'approve' | 'ban' | 'flair' | 'usernote' | 'message'
|
||||
kind: 'comment' | 'lock' | 'remove' | 'report' | 'approve' | 'ban' | 'flair' | 'usernote' | 'message' | 'userflair'
|
||||
}
|
||||
|
||||
export const isActionJson = (obj: object): obj is ActionJson => {
|
||||
|
||||
@@ -4,6 +4,7 @@ import {getLogger} from "./Utils/loggerFactory";
|
||||
import {Invokee, OperatorConfig} from "./Common/interfaces";
|
||||
import Bot from "./Bot";
|
||||
import LoggedError from "./Utils/LoggedError";
|
||||
import {sleep} from "./util";
|
||||
|
||||
export class App {
|
||||
|
||||
@@ -66,13 +67,14 @@ export class App {
|
||||
try {
|
||||
await b.testClient();
|
||||
await b.buildManagers();
|
||||
await sleep(2000);
|
||||
b.runManagers(causedBy).catch((err) => {
|
||||
this.logger.error(`Unexpected error occurred while running Bot ${b.botName}. Bot must be re-built to restart`);
|
||||
if (!err.logged || !(err instanceof LoggedError)) {
|
||||
this.logger.error(err);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (b.error === undefined) {
|
||||
b.error = err.message;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import {DurationComparor, UserNoteCriteria} from "../Rule";
|
||||
import {CompareValue, CompareValueOrPercent} from "../Common/interfaces";
|
||||
import {UserNoteCriteria} from "../Rule";
|
||||
import {CompareValue, CompareValueOrPercent, DurationComparor} from "../Common/interfaces";
|
||||
import {parseStringToRegex} from "../util";
|
||||
|
||||
/**
|
||||
* If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.
|
||||
@@ -106,6 +107,17 @@ export interface AuthorCriteria {
|
||||
* This is determined by trying to retrieve the author's profile. If a 404 is returned it is likely they are shadowbanned
|
||||
* */
|
||||
shadowBanned?: boolean
|
||||
|
||||
/**
|
||||
* An (array of) string/regular expression to test contents of an Author's profile description against
|
||||
*
|
||||
* If no flags are specified then the **insensitive** flag is used by default
|
||||
*
|
||||
* If using an array then if **any** value in the array passes the description test passes
|
||||
*
|
||||
* @examples [["/test$/i", "look for this string literal"]]
|
||||
* */
|
||||
description?: string | string[]
|
||||
}
|
||||
|
||||
export class Author implements AuthorCriteria {
|
||||
@@ -120,6 +132,7 @@ export class Author implements AuthorCriteria {
|
||||
totalKarma?: string;
|
||||
verified?: boolean;
|
||||
shadowBanned?: boolean;
|
||||
description?: string[];
|
||||
|
||||
constructor(options: AuthorCriteria) {
|
||||
this.name = options.name;
|
||||
@@ -132,6 +145,7 @@ export class Author implements AuthorCriteria {
|
||||
this.linkKarma = options.linkKarma;
|
||||
this.totalKarma = options.totalKarma;
|
||||
this.shadowBanned = options.shadowBanned;
|
||||
this.description = options.description === undefined ? undefined : Array.isArray(options.description) ? options.description : [options.description];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
220
src/Bot/index.ts
220
src/Bot/index.ts
@@ -1,4 +1,4 @@
|
||||
import Snoowrap, {Subreddit} from "snoowrap";
|
||||
import Snoowrap, {Comment, Submission, Subreddit} from "snoowrap";
|
||||
import {Logger} from "winston";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {Duration} from "dayjs/plugin/duration";
|
||||
@@ -15,16 +15,18 @@ import {
|
||||
snooLogWrapper
|
||||
} from "../util";
|
||||
import {Manager} from "../Subreddit/Manager";
|
||||
import {ProxiedSnoowrap} from "../Utils/SnoowrapClients";
|
||||
import {ExtendedSnoowrap, ProxiedSnoowrap} from "../Utils/SnoowrapClients";
|
||||
import {ModQueueStream, UnmoderatedStream} from "../Subreddit/Streams";
|
||||
import {BotResourcesManager} from "../Subreddit/SubredditResources";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
import pEvent from "p-event";
|
||||
import SimpleError from "../Utils/SimpleError";
|
||||
import {isRateLimitError, isStatusError} from "../Utils/Errors";
|
||||
|
||||
|
||||
class Bot {
|
||||
|
||||
client!: Snoowrap;
|
||||
client!: ExtendedSnoowrap;
|
||||
logger!: Logger;
|
||||
wikiLocation: string;
|
||||
dryRun?: true | undefined;
|
||||
@@ -42,6 +44,7 @@ class Bot {
|
||||
nannyRunning: boolean = false;
|
||||
nextNannyCheck: Dayjs = dayjs().add(10, 'second');
|
||||
nannyRetryHandler: Function;
|
||||
managerRetryHandler: Function;
|
||||
nextExpiration: Dayjs = dayjs();
|
||||
botName?: string;
|
||||
botLink?: string;
|
||||
@@ -49,6 +52,9 @@ class Bot {
|
||||
maxWorkers: number;
|
||||
startedAt: Dayjs = dayjs();
|
||||
sharedModqueue: boolean = false;
|
||||
streamListedOnce: string[] = [];
|
||||
|
||||
stagger: number;
|
||||
|
||||
apiSample: number[] = [];
|
||||
apiRollingAvg: number = 0;
|
||||
@@ -80,10 +86,12 @@ class Bot {
|
||||
heartbeatInterval,
|
||||
},
|
||||
credentials: {
|
||||
clientId,
|
||||
clientSecret,
|
||||
refreshToken,
|
||||
accessToken,
|
||||
reddit: {
|
||||
clientId,
|
||||
clientSecret,
|
||||
refreshToken,
|
||||
accessToken,
|
||||
},
|
||||
},
|
||||
snoowrap: {
|
||||
proxy,
|
||||
@@ -91,6 +99,7 @@ class Bot {
|
||||
},
|
||||
polling: {
|
||||
sharedMod,
|
||||
stagger = 2000,
|
||||
},
|
||||
queue: {
|
||||
maxWorkers,
|
||||
@@ -166,26 +175,29 @@ class Bot {
|
||||
}
|
||||
|
||||
try {
|
||||
this.client = proxy === undefined ? new Snoowrap(creds) : new ProxiedSnoowrap({...creds, proxy});
|
||||
this.client = proxy === undefined ? new ExtendedSnoowrap(creds) : new ProxiedSnoowrap({...creds, proxy});
|
||||
this.client.config({
|
||||
warnings: true,
|
||||
maxRetryAttempts: 5,
|
||||
maxRetryAttempts: 2,
|
||||
debug,
|
||||
logger: snooLogWrapper(this.logger.child({labels: ['Snoowrap']}, mergeArr)),
|
||||
continueAfterRatelimitError: true,
|
||||
continueAfterRatelimitError: false,
|
||||
});
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(this.error === undefined) {
|
||||
this.error = err.message;
|
||||
this.logger.error(err);
|
||||
}
|
||||
}
|
||||
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 1}, this.logger);
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 2}, this.logger);
|
||||
this.nannyRetryHandler = createRetryHandler({maxRequestRetry: 5, maxOtherRetry: 1}, this.logger);
|
||||
this.managerRetryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 8, waitOnRetry: false, clearRetryCountAfter: 2}, this.logger);
|
||||
|
||||
this.stagger = stagger ?? 2000;
|
||||
|
||||
const modStreamErrorListener = (name: string) => async (err: any) => {
|
||||
this.logger.error('Polling error occurred', err);
|
||||
this.logger.error(`Polling error occurred on stream ${name.toUpperCase()}`, err);
|
||||
const shouldRetry = await retryHandler(err);
|
||||
if(shouldRetry) {
|
||||
defaultUnmoderatedStream.startInterval();
|
||||
@@ -199,12 +211,32 @@ class Bot {
|
||||
}
|
||||
}
|
||||
|
||||
const defaultUnmoderatedStream = new UnmoderatedStream(this.client, {subreddit: 'mod'});
|
||||
const modStreamListingListener = (name: string) => async (listing: (Comment|Submission)[]) => {
|
||||
// dole out in order they were received
|
||||
if(!this.streamListedOnce.includes(name)) {
|
||||
this.streamListedOnce.push(name);
|
||||
return;
|
||||
}
|
||||
for(const i of listing) {
|
||||
const foundManager = this.subManagers.find(x => x.subreddit.display_name === i.subreddit.display_name && x.modStreamCallbacks.get(name) !== undefined);
|
||||
if(foundManager !== undefined) {
|
||||
foundManager.modStreamCallbacks.get(name)(i);
|
||||
if(stagger !== undefined) {
|
||||
await sleep(stagger);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const defaultUnmoderatedStream = new UnmoderatedStream(this.client, {subreddit: 'mod', limit: 100, clearProcessed: { size: 100, retain: 100 }});
|
||||
// @ts-ignore
|
||||
defaultUnmoderatedStream.on('error', modStreamErrorListener('unmoderated'));
|
||||
const defaultModqueueStream = new ModQueueStream(this.client, {subreddit: 'mod'});
|
||||
defaultUnmoderatedStream.on('listing', modStreamListingListener('unmoderated'));
|
||||
const defaultModqueueStream = new ModQueueStream(this.client, {subreddit: 'mod', limit: 100, clearProcessed: { size: 100, retain: 100 }});
|
||||
// @ts-ignore
|
||||
defaultModqueueStream.on('error', modStreamErrorListener('modqueue'));
|
||||
defaultModqueueStream.on('listing', modStreamListingListener('modqueue'));
|
||||
this.cacheManager.modStreams.set('unmoderated', defaultUnmoderatedStream);
|
||||
this.cacheManager.modStreams.set('modqueue', defaultModqueueStream);
|
||||
|
||||
@@ -237,19 +269,23 @@ class Bot {
|
||||
}
|
||||
}
|
||||
|
||||
async testClient() {
|
||||
async testClient(initial = true) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.getMe();
|
||||
this.logger.info('Test API call successful');
|
||||
} catch (err) {
|
||||
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
|
||||
if(err.name === 'StatusCodeError') {
|
||||
} catch (err: any) {
|
||||
if (initial) {
|
||||
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
|
||||
}
|
||||
if (err.name === 'StatusCodeError') {
|
||||
const authHeader = err.response.headers['www-authenticate'];
|
||||
if (authHeader !== undefined && authHeader.includes('insufficient_scope')) {
|
||||
this.logger.error('Reddit responded with a 403 insufficient_scope. Please ensure you have chosen the correct scopes when authorizing your account.');
|
||||
} else if(err.statusCode === 401) {
|
||||
} else if (err.statusCode === 401) {
|
||||
this.logger.error('It is likely a credential is missing or incorrect. Check clientId, clientSecret, refreshToken, and accessToken');
|
||||
} else if(err.statusCode === 400) {
|
||||
this.logger.error('Credentials may have been invalidated due to prior behavior. The error message may contain more information.');
|
||||
}
|
||||
this.logger.error(`Error Message: ${err.message}`);
|
||||
} else {
|
||||
@@ -276,10 +312,12 @@ class Bot {
|
||||
}
|
||||
this.logger.info(`Bot Name${botNameFromConfig ? ' (from config)' : ''}: ${this.botName}`);
|
||||
|
||||
for (const sub of await this.client.getModeratedSubreddits()) {
|
||||
// TODO don't know a way to check permissions yet
|
||||
availSubs.push(sub);
|
||||
let subListing = await this.client.getModeratedSubreddits({count: 100});
|
||||
while(!subListing.isFinished) {
|
||||
subListing = await subListing.fetchMore({amount: 100});
|
||||
}
|
||||
availSubs = subListing;
|
||||
|
||||
this.logger.info(`u/${user.name} is a moderator of these subreddits: ${availSubs.map(x => x.display_name_prefixed).join(', ')}`);
|
||||
|
||||
let subsToRun: Subreddit[] = [];
|
||||
@@ -302,26 +340,45 @@ class Bot {
|
||||
const normalExcludes = this.excludeSubreddits.map(x => x.toLowerCase());
|
||||
subsToRun = availSubs.filter(x => !normalExcludes.includes(x.display_name.toLowerCase()));
|
||||
} else {
|
||||
this.logger.info('No user-defined subreddit constraints detected, will run on all moderated subreddits');
|
||||
subsToRun = availSubs;
|
||||
this.logger.info(`No user-defined subreddit constraints detected, will run on all moderated subreddits EXCEPT own profile (${this.botAccount})`);
|
||||
subsToRun = availSubs.filter(x => x.display_name_prefixed !== this.botAccount);
|
||||
}
|
||||
}
|
||||
|
||||
let subSchedule: Manager[] = [];
|
||||
// get configs for subs we want to run on and build/validate them
|
||||
for (const sub of subsToRun) {
|
||||
const manager = new Manager(sub, this.client, this.logger, this.cacheManager, {dryRun: this.dryRun, sharedModqueue: this.sharedModqueue, wikiLocation: this.wikiLocation, botName: this.botName, maxWorkers: this.maxWorkers});
|
||||
try {
|
||||
await manager.parseConfiguration('system', true, {suppressNotification: true});
|
||||
} catch (err) {
|
||||
if (!(err instanceof LoggedError)) {
|
||||
this.logger.error(`Config was not valid:`, {subreddit: sub.display_name_prefixed});
|
||||
this.logger.error(err, {subreddit: sub.display_name_prefixed});
|
||||
}
|
||||
this.subManagers.push(await this.createManager(sub));
|
||||
} catch (err: any) {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async createManager(sub: Subreddit): Promise<Manager> {
|
||||
const manager = new Manager(sub, this.client, this.logger, this.cacheManager, {dryRun: this.dryRun, sharedModqueue: this.sharedModqueue, wikiLocation: this.wikiLocation, botName: this.botName as string, maxWorkers: this.maxWorkers});
|
||||
try {
|
||||
await manager.parseConfiguration('system', true, {suppressNotification: true});
|
||||
} catch (err: any) {
|
||||
if (!(err instanceof LoggedError)) {
|
||||
this.logger.error(`Config was not valid:`, {subreddit: sub.display_name_prefixed});
|
||||
this.logger.error(err, {subreddit: sub.display_name_prefixed});
|
||||
err.logged = true;
|
||||
}
|
||||
}
|
||||
// all errors from managers will count towards bot-level retry count
|
||||
manager.on('error', async (err) => await this.panicOnRetries(err));
|
||||
return manager;
|
||||
}
|
||||
|
||||
// if the cumulative errors exceeds configured threshold then stop ALL managers as there is most likely something very bad happening
|
||||
async panicOnRetries(err: any) {
|
||||
if(!await this.managerRetryHandler(err)) {
|
||||
this.logger.warn('Bot detected too many errors from managers within a short time. Stopping all managers and will try to restart on next heartbeat.');
|
||||
for(const m of this.subManagers) {
|
||||
await m.stop('system',{reason: 'Bot detected too many errors from all managers. Stopping all manager as a failsafe.'});
|
||||
}
|
||||
subSchedule.push(manager);
|
||||
}
|
||||
this.subManagers = subSchedule;
|
||||
}
|
||||
|
||||
async destroy(causedBy: Invokee) {
|
||||
@@ -335,9 +392,43 @@ class Bot {
|
||||
this.logger.info('Bot is stopped.');
|
||||
}
|
||||
|
||||
async checkModInvites() {
|
||||
const subs: string[] = await this.cacheManager.getPendingSubredditInvites();
|
||||
for (const name of subs) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.getSubreddit(name).acceptModeratorInvite();
|
||||
this.logger.info(`Accepted moderator invite for r/${name}!`);
|
||||
await this.cacheManager.deletePendingSubredditInvite(name);
|
||||
// @ts-ignore
|
||||
const sub = await this.client.getSubreddit(name);
|
||||
this.logger.info(`Attempting to add manager for r/${name}`);
|
||||
try {
|
||||
const manager = await this.createManager(sub);
|
||||
this.logger.info(`Starting manager for r/${name}`);
|
||||
this.subManagers.push(manager);
|
||||
await manager.start('system', {reason: 'Caused by creation due to moderator invite'});
|
||||
await this.runModStreams();
|
||||
} catch (err: any) {
|
||||
if (!(err instanceof LoggedError)) {
|
||||
this.logger.error(err);
|
||||
}
|
||||
}
|
||||
} catch (err: any) {
|
||||
if (err.message.includes('NO_INVITE_FOUND')) {
|
||||
this.logger.warn(`No pending moderation invite for r/${name} was found`);
|
||||
} else if (isStatusError(err) && err.statusCode === 403) {
|
||||
this.logger.error(`Error occurred while checking r/${name} for a pending moderation invite. It is likely that this bot does not have the 'modself' oauth permission. Error: ${err.message}`);
|
||||
} else {
|
||||
this.logger.error(`Error occurred while checking r/${name} for a pending moderation invite. Error: ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async runModStreams(notify = false) {
|
||||
for(const [k,v] of this.cacheManager.modStreams) {
|
||||
if(!v.running && v.listeners('item').length > 0) {
|
||||
if(!v.running && this.subManagers.some(x => x.modStreamCallbacks.get(k) !== undefined)) {
|
||||
v.startInterval();
|
||||
this.logger.info(`Starting default ${k.toUpperCase()} mod stream`);
|
||||
if(notify) {
|
||||
@@ -347,11 +438,14 @@ class Bot {
|
||||
}
|
||||
}
|
||||
}
|
||||
await sleep(2000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async runManagers(causedBy: Invokee = 'system') {
|
||||
this.running = true;
|
||||
|
||||
if(this.subManagers.every(x => !x.validConfigLoaded)) {
|
||||
this.logger.warn('All managers have invalid configs!');
|
||||
this.error = 'All managers have invalid configs';
|
||||
@@ -359,14 +453,15 @@ class Bot {
|
||||
for (const manager of this.subManagers) {
|
||||
if (manager.validConfigLoaded && manager.botState.state !== RUNNING) {
|
||||
await manager.start(causedBy, {reason: 'Caused by application startup'});
|
||||
await sleep(this.stagger);
|
||||
}
|
||||
}
|
||||
|
||||
await this.runModStreams();
|
||||
|
||||
this.running = true;
|
||||
this.nextNannyCheck = dayjs().add(10, 'second');
|
||||
this.nextHeartbeat = dayjs().add(this.heartbeatInterval, 'second');
|
||||
await this.checkModInvites();
|
||||
await this.healthLoop();
|
||||
}
|
||||
|
||||
@@ -380,15 +475,16 @@ class Bot {
|
||||
try {
|
||||
await this.runApiNanny();
|
||||
this.nextNannyCheck = dayjs().add(10, 'second');
|
||||
} catch (err) {
|
||||
this.logger.info('Delaying next nanny check for 1 minute due to emitted error');
|
||||
this.nextNannyCheck = dayjs().add(120, 'second');
|
||||
} catch (err: any) {
|
||||
this.logger.info('Delaying next nanny check for 4 minutes due to emitted error');
|
||||
this.nextNannyCheck = dayjs().add(240, 'second');
|
||||
}
|
||||
}
|
||||
if(dayjs().isSameOrAfter(this.nextHeartbeat)) {
|
||||
try {
|
||||
await this.heartbeat();
|
||||
} catch (err) {
|
||||
await this.checkModInvites();
|
||||
} catch (err: any) {
|
||||
this.logger.error(`Error occurred during heartbeat check: ${err.message}`);
|
||||
}
|
||||
this.nextHeartbeat = dayjs().add(this.heartbeatInterval, 'second');
|
||||
@@ -400,20 +496,39 @@ class Bot {
|
||||
async heartbeat() {
|
||||
const heartbeat = `HEARTBEAT -- API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ~${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion === undefined ? 'N/A' : this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`
|
||||
this.logger.info(heartbeat);
|
||||
|
||||
// run sanity check to see if there is a service issue
|
||||
try {
|
||||
await this.testClient(false);
|
||||
} catch (err: any) {
|
||||
throw new SimpleError(`Something isn't right! This could be a Reddit API issue (service is down? buggy??) or an issue with the Bot account. Will not run heartbeat operations and will wait until next heartbeat (${dayjs.duration(this.nextHeartbeat.diff(dayjs())).humanize()}) to try again`);
|
||||
}
|
||||
let startedAny = false;
|
||||
|
||||
for (const s of this.subManagers) {
|
||||
if(s.botState.state === STOPPED && s.botState.causedBy === USER) {
|
||||
this.logger.debug('Skipping config check/restart on heartbeat due to previously being stopped by user', {subreddit: s.displayLabel});
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
// ensure calls to wiki page are also staggered so we aren't hitting api hard when bot has a ton of subreddits to check
|
||||
await sleep(this.stagger);
|
||||
const newConfig = await s.parseConfiguration();
|
||||
if(newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startQueue('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running queue'});
|
||||
}
|
||||
if(newConfig || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startEvents('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running events'});
|
||||
const willStart = newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM) || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM);
|
||||
if(willStart) {
|
||||
// stagger restart
|
||||
if (startedAny) {
|
||||
await sleep(this.stagger);
|
||||
}
|
||||
startedAny = true;
|
||||
if(newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startQueue('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running queue'});
|
||||
}
|
||||
if(newConfig || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startEvents('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running events'});
|
||||
}
|
||||
}
|
||||
if(s.botState.state !== RUNNING && s.eventsState.state === RUNNING && s.queueState.state === RUNNING) {
|
||||
s.botState = {
|
||||
@@ -421,7 +536,7 @@ class Bot {
|
||||
causedBy: 'system',
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.info('Stopping event polling to prevent activity processing queue from backing up. Will be restarted when config update succeeds.')
|
||||
await s.stopEvents('system', {reason: 'Invalid config will cause events to pile up in queue. Will be restarted when config update succeeds (next heartbeat).'});
|
||||
if(!(err instanceof LoggedError)) {
|
||||
@@ -448,7 +563,10 @@ class Bot {
|
||||
// @ts-ignore
|
||||
await this.client.getMe();
|
||||
shouldRetry = false;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(isRateLimitError(err)) {
|
||||
throw err;
|
||||
}
|
||||
shouldRetry = await this.nannyRetryHandler(err);
|
||||
if (!shouldRetry) {
|
||||
throw err;
|
||||
@@ -566,7 +684,7 @@ class Bot {
|
||||
this.nannyMode = undefined;
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error(`Error occurred during nanny loop: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
|
||||
@@ -29,7 +29,8 @@ import * as RuleSetSchema from '../Schema/RuleSet.json';
|
||||
import * as ActionSchema from '../Schema/Action.json';
|
||||
import {ActionObjectJson, RuleJson, RuleObjectJson, ActionJson as ActionTypeJson} from "../Common/types";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import {Author, AuthorCriteria, AuthorOptions} from "../Author/Author";
|
||||
import {Author, AuthorCriteria, AuthorOptions} from '..';
|
||||
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
|
||||
|
||||
const checkLogName = truncateStringToLength(25);
|
||||
|
||||
@@ -50,7 +51,7 @@ export abstract class Check implements ICheck {
|
||||
dryRun?: boolean;
|
||||
notifyOnTrigger: boolean;
|
||||
resources: SubredditResources;
|
||||
client: Snoowrap;
|
||||
client: ExtendedSnoowrap;
|
||||
|
||||
constructor(options: CheckOptions) {
|
||||
const {
|
||||
@@ -268,7 +269,7 @@ export abstract class Check implements ICheck {
|
||||
// otherwise AND and did not return already so all passed
|
||||
this.logger.info(`${PASS} => Rules: ${resultsSummary(allResults, this.condition)}`);
|
||||
return [true, allRuleResults];
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
e.logged = true;
|
||||
this.logger.warn(`Running rules failed due to uncaught exception`, e);
|
||||
throw e;
|
||||
@@ -345,13 +346,13 @@ export interface ICheck extends JoinCondition, ChecksActivityState {
|
||||
}
|
||||
|
||||
export interface CheckOptions extends ICheck {
|
||||
rules: Array<IRuleSet | IRule>
|
||||
actions: ActionConfig[]
|
||||
logger: Logger
|
||||
subredditName: string
|
||||
notifyOnTrigger?: boolean
|
||||
resources: SubredditResources
|
||||
client: Snoowrap
|
||||
rules: Array<IRuleSet | IRule>;
|
||||
actions: ActionConfig[];
|
||||
logger: Logger;
|
||||
subredditName: string;
|
||||
notifyOnTrigger?: boolean;
|
||||
resources: SubredditResources;
|
||||
client: ExtendedSnoowrap;
|
||||
cacheUserResult?: UserResultCacheOptions;
|
||||
}
|
||||
|
||||
|
||||
248
src/Common/ImageData.ts
Normal file
248
src/Common/ImageData.ts
Normal file
@@ -0,0 +1,248 @@
|
||||
import fetch from "node-fetch";
|
||||
import {Submission} from "snoowrap/dist/objects";
|
||||
import {URL} from "url";
|
||||
import {absPercentDifference, getSharpAsync, isValidImageURL} from "../util";
|
||||
import sizeOf from "image-size";
|
||||
import SimpleError from "../Utils/SimpleError";
|
||||
import {Sharp} from "sharp";
|
||||
import {blockhash} from "./blockhash/blockhash";
|
||||
|
||||
export interface ImageDataOptions {
|
||||
width?: number,
|
||||
height?: number,
|
||||
url: string,
|
||||
variants?: ImageData[]
|
||||
}
|
||||
|
||||
class ImageData {
|
||||
|
||||
width?: number
|
||||
height?: number
|
||||
url: URL
|
||||
variants: ImageData[] = []
|
||||
preferredResolution?: [number, number]
|
||||
sharpImg!: Sharp
|
||||
hashResult!: string
|
||||
actualResolution?: [number, number]
|
||||
|
||||
constructor(data: ImageDataOptions, aggressive = false) {
|
||||
this.width = data.width;
|
||||
this.height = data.height;
|
||||
this.url = new URL(data.url);
|
||||
if (!aggressive && !isValidImageURL(`${this.url.origin}${this.url.pathname}`)) {
|
||||
throw new Error('URL did not end with a valid image extension');
|
||||
}
|
||||
this.variants = data.variants || [];
|
||||
}
|
||||
|
||||
async data(format = 'raw'): Promise<Buffer> {
|
||||
// @ts-ignore
|
||||
return await (await this.sharp()).clone().toFormat(format).toBuffer();
|
||||
}
|
||||
|
||||
async hash(bits: number, useVariantIfPossible = true): Promise<string> {
|
||||
if(this.hashResult === undefined) {
|
||||
let ref: ImageData | undefined;
|
||||
if(useVariantIfPossible && this.preferredResolution !== undefined) {
|
||||
ref = this.getSimilarResolutionVariant(this.preferredResolution[0], this.preferredResolution[1]);
|
||||
}
|
||||
if(ref === undefined) {
|
||||
ref = this;
|
||||
}
|
||||
this.hashResult = await blockhash((await ref.sharp()).clone(), bits);
|
||||
}
|
||||
return this.hashResult;
|
||||
}
|
||||
|
||||
async sharp(): Promise<Sharp> {
|
||||
if (this.sharpImg === undefined) {
|
||||
try {
|
||||
const response = await fetch(this.url.toString())
|
||||
if (response.ok) {
|
||||
const ct = response.headers.get('Content-Type');
|
||||
if (ct !== null && ct.includes('image')) {
|
||||
const sFunc = await getSharpAsync();
|
||||
// if image is animated then we want to extract the first frame and convert it to a regular image
|
||||
// so we can compare two static images later (also because sharp can't use resize() on animated images)
|
||||
if(['gif','webp'].some(x => ct.includes(x))) {
|
||||
this.sharpImg = await sFunc(await (await sFunc(await response.buffer(), {pages: 1, animated: false})).png().toBuffer());
|
||||
} else {
|
||||
this.sharpImg = await sFunc(await response.buffer());
|
||||
}
|
||||
const meta = await this.sharpImg.metadata();
|
||||
if (this.width === undefined || this.height === undefined) {
|
||||
this.width = meta.width;
|
||||
this.height = meta.height;
|
||||
}
|
||||
this.actualResolution = [meta.width as number, meta.height as number];
|
||||
} else {
|
||||
throw new SimpleError(`Content-Type for fetched URL ${this.url} did not contain "image"`);
|
||||
}
|
||||
} else {
|
||||
throw new SimpleError(`URL response was not OK: (${response.status})${response.statusText}`);
|
||||
}
|
||||
|
||||
|
||||
} catch (err: any) {
|
||||
if(!(err instanceof SimpleError)) {
|
||||
throw new Error(`Error occurred while fetching response from URL: ${err.message}`);
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
return this.sharpImg;
|
||||
}
|
||||
|
||||
get pixels() {
|
||||
if (this.actualResolution !== undefined) {
|
||||
return this.actualResolution[0] * this.actualResolution[1];
|
||||
}
|
||||
if (this.width === undefined || this.height === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return this.width * this.height;
|
||||
}
|
||||
|
||||
get hasDimensions() {
|
||||
return this.width !== undefined && this.height !== undefined;
|
||||
}
|
||||
|
||||
get baseUrl() {
|
||||
return `${this.url.origin}${this.url.pathname}`;
|
||||
}
|
||||
|
||||
setPreferredResolutionByWidth(prefWidth: number) {
|
||||
let height: number | undefined = undefined,
|
||||
width: number | undefined = undefined;
|
||||
if (this.variants.length === 0) {
|
||||
return;
|
||||
}
|
||||
for (const v of this.variants) {
|
||||
if (v.hasDimensions && (v.width as number) <= prefWidth) {
|
||||
width = v.width as number;
|
||||
height = v.height as number;
|
||||
}
|
||||
}
|
||||
if (width !== undefined) {
|
||||
this.preferredResolution = [width, (height as number)];
|
||||
}
|
||||
}
|
||||
|
||||
getSimilarResolutionVariant(width: number, height: number, allowablePercentDiff = 0): ImageData | undefined {
|
||||
if (this.variants.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
return this.variants.find(x => {
|
||||
return x.hasDimensions && (absPercentDifference(width, x.width as number) <= allowablePercentDiff) && (absPercentDifference(height, x.height as number) <= allowablePercentDiff);
|
||||
});
|
||||
}
|
||||
|
||||
isSameDimensions(otherImage: ImageData) {
|
||||
if (!this.hasDimensions || !otherImage.hasDimensions) {
|
||||
return false;
|
||||
}
|
||||
return this.width === otherImage.width && this.height === otherImage.height;
|
||||
}
|
||||
|
||||
async sameAspectRatio(otherImage: ImageData) {
|
||||
let thisRes = this.actualResolution;
|
||||
let otherRes = otherImage.actualResolution;
|
||||
if(thisRes === undefined) {
|
||||
const tMeta = await (await this.sharp()).metadata();
|
||||
const thisMeta = {width: tMeta.width as number, height: tMeta.height as number };
|
||||
this.actualResolution = [thisMeta.width, thisMeta.height];
|
||||
thisRes = this.actualResolution;
|
||||
}
|
||||
if(otherRes === undefined) {
|
||||
const otherMeta = await (await otherImage.sharp()).metadata();
|
||||
otherRes = [otherMeta.width as number, otherMeta.height as number];
|
||||
}
|
||||
const thisRatio = thisRes[0] / thisRes[1];
|
||||
const otherRatio = otherRes[0] / otherRes[1];
|
||||
|
||||
// a little leeway
|
||||
return Math.abs(thisRatio - otherRatio) < 0.1;
|
||||
}
|
||||
|
||||
static async dimensionsFromMetadata(img: Sharp) {
|
||||
const {width, height, ...rest} = await img.metadata();
|
||||
return {width: width as number, height: height as number};
|
||||
}
|
||||
|
||||
async normalizeImagesForComparison(compareLibrary: ('pixel' | 'resemble'), imgToCompare: ImageData): Promise<[Sharp, Sharp, number, number]> {
|
||||
const sFunc = await getSharpAsync();
|
||||
|
||||
let refImage = this as ImageData;
|
||||
let compareImage = imgToCompare;
|
||||
if (this.preferredResolution !== undefined) {
|
||||
const matchingVariant = compareImage.getSimilarResolutionVariant(this.preferredResolution[0], this.preferredResolution[1]);
|
||||
if (matchingVariant !== undefined) {
|
||||
compareImage = matchingVariant;
|
||||
refImage = this.getSimilarResolutionVariant(this.preferredResolution[0], this.preferredResolution[1]) as ImageData;
|
||||
}
|
||||
}
|
||||
|
||||
let refSharp = (await refImage.sharp()).clone();
|
||||
let refMeta = await ImageData.dimensionsFromMetadata(refSharp);
|
||||
let compareSharp = (await compareImage.sharp()).clone();
|
||||
let compareMeta = await ImageData.dimensionsFromMetadata(compareSharp);
|
||||
|
||||
// if dimensions on not the same we need to crop or resize before final resize
|
||||
if (refMeta.width !== compareMeta.width || refMeta.height !== compareMeta.height) {
|
||||
const thisRatio = refMeta.width / (refMeta.height);
|
||||
const otherRatio = compareMeta.width / compareMeta.height;
|
||||
|
||||
const sameRatio = Math.abs(thisRatio - otherRatio) < 0.04;
|
||||
if (sameRatio) {
|
||||
// then resize first since its most likely the same image
|
||||
// can be fairly sure a downscale will get pixels close to the same
|
||||
if (refMeta.width > compareMeta.width) {
|
||||
refSharp = sFunc(await refSharp.resize(compareMeta.width, null, {fit: 'outside'}).toBuffer());
|
||||
} else {
|
||||
compareSharp = sFunc(await compareSharp.resize(refMeta.width, null, {fit: 'outside'}).toBuffer());
|
||||
}
|
||||
refMeta = await ImageData.dimensionsFromMetadata(refSharp);
|
||||
compareMeta = await ImageData.dimensionsFromMetadata(compareSharp);
|
||||
}
|
||||
// find smallest common dimensions
|
||||
const sWidth = refMeta.width <= compareMeta.width ? refMeta.width : compareMeta.width;
|
||||
const sHeight = refMeta.height <= compareMeta.height ? refMeta.height : compareMeta.height;
|
||||
|
||||
// crop if necessary
|
||||
if(sWidth !== refMeta.width || sHeight !== refMeta.height) {
|
||||
refSharp = sFunc(await refSharp.extract({left: 0, top: 0, width: sWidth, height: sHeight}).toBuffer());
|
||||
}
|
||||
if(sWidth !== compareMeta.width || sHeight !== compareMeta.height) {
|
||||
compareSharp = sFunc(await compareSharp.extract({left: 0, top: 0, width: sWidth, height: sHeight}).toBuffer());
|
||||
}
|
||||
}
|
||||
|
||||
// final resize to reduce memory/cpu usage during comparison
|
||||
refSharp = sFunc(await refSharp.resize(400, null, {fit: 'outside'}).toBuffer());
|
||||
compareSharp = sFunc(await compareSharp.resize(400, null, {fit: 'outside'}).toBuffer());
|
||||
|
||||
const {width, height} = await ImageData.dimensionsFromMetadata(refSharp);
|
||||
return [refSharp, compareSharp, width, height];
|
||||
}
|
||||
|
||||
static fromSubmission(sub: Submission, aggressive = false): ImageData {
|
||||
const url = new URL(sub.url);
|
||||
const data: any = {
|
||||
url,
|
||||
};
|
||||
let variants = [];
|
||||
if (sub.preview !== undefined && sub.preview.enabled && sub.preview.images.length > 0) {
|
||||
const firstImg = sub.preview.images[0];
|
||||
const ref = sub.preview.images[0].source;
|
||||
data.width = ref.width;
|
||||
data.height = ref.height;
|
||||
|
||||
variants = firstImg.resolutions.map(x => new ImageData(x));
|
||||
data.variants = variants;
|
||||
}
|
||||
return new ImageData(data, aggressive);
|
||||
}
|
||||
}
|
||||
|
||||
export default ImageData;
|
||||
234
src/Common/blockhash/blockhash.ts
Normal file
234
src/Common/blockhash/blockhash.ts
Normal file
@@ -0,0 +1,234 @@
|
||||
// Perceptual image hash calculation tool based on algorithm descibed in
|
||||
// Block Mean Value Based Image Perceptual Hashing by Bian Yang, Fan Gu and Xiamu Niu
|
||||
//
|
||||
// Copyright 2014 Commons Machinery http://commonsmachinery.se/
|
||||
// Distributed under an MIT license, please see LICENSE in the top dir.
|
||||
|
||||
|
||||
// https://github.com/commonsmachinery/blockhash-js/blob/master/index.js
|
||||
|
||||
import {Sharp} from "sharp";
|
||||
|
||||
interface BlockImageData {
|
||||
data: Buffer,
|
||||
width: number,
|
||||
height: number
|
||||
}
|
||||
|
||||
var one_bits = [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4];
|
||||
|
||||
/* Calculate the hamming distance for two hashes in hex format */
|
||||
export const hammingDistance = (hash1: string, hash2: string) => {
|
||||
var d = 0;
|
||||
var i;
|
||||
|
||||
if (hash1.length !== hash2.length) {
|
||||
throw new Error("Can't compare hashes with different length");
|
||||
}
|
||||
|
||||
for (i = 0; i < hash1.length; i++) {
|
||||
var n1 = parseInt(hash1[i], 16);
|
||||
var n2 = parseInt(hash2[i], 16);
|
||||
d += one_bits[n1 ^ n2];
|
||||
}
|
||||
return d;
|
||||
};
|
||||
|
||||
var median = function(data: number[]) {
|
||||
var mdarr = data.slice(0);
|
||||
mdarr.sort(function(a, b) { return a-b; });
|
||||
if (mdarr.length % 2 === 0) {
|
||||
return (mdarr[mdarr.length/2 - 1] + mdarr[mdarr.length/2]) / 2.0;
|
||||
}
|
||||
return mdarr[Math.floor(mdarr.length/2)];
|
||||
};
|
||||
|
||||
var translate_blocks_to_bits = function(blocks: number[], pixels_per_block: number) {
|
||||
var half_block_value = pixels_per_block * 256 * 3 / 2;
|
||||
var bandsize = blocks.length / 4;
|
||||
|
||||
// Compare medians across four horizontal bands
|
||||
for (var i = 0; i < 4; i++) {
|
||||
var m = median(blocks.slice(i * bandsize, (i + 1) * bandsize));
|
||||
for (var j = i * bandsize; j < (i + 1) * bandsize; j++) {
|
||||
var v = blocks[j];
|
||||
|
||||
// Output a 1 if the block is brighter than the median.
|
||||
// With images dominated by black or white, the median may
|
||||
// end up being 0 or the max value, and thus having a lot
|
||||
// of blocks of value equal to the median. To avoid
|
||||
// generating hashes of all zeros or ones, in that case output
|
||||
// 0 if the median is in the lower value space, 1 otherwise
|
||||
blocks[j] = Number(v > m || (Math.abs(v - m) < 1 && m > half_block_value));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var bits_to_hexhash = function(bitsArray: number[]) {
|
||||
var hex = [];
|
||||
for (var i = 0; i < bitsArray.length; i += 4) {
|
||||
var nibble = bitsArray.slice(i, i + 4);
|
||||
hex.push(parseInt(nibble.join(''), 2).toString(16));
|
||||
}
|
||||
|
||||
return hex.join('');
|
||||
};
|
||||
|
||||
var bmvbhash_even = function(data: BlockImageData, bits: number) {
|
||||
var blocksize_x = Math.floor(data.width / bits);
|
||||
var blocksize_y = Math.floor(data.height / bits);
|
||||
|
||||
var result = [];
|
||||
|
||||
for (var y = 0; y < bits; y++) {
|
||||
for (var x = 0; x < bits; x++) {
|
||||
var total = 0;
|
||||
|
||||
for (var iy = 0; iy < blocksize_y; iy++) {
|
||||
for (var ix = 0; ix < blocksize_x; ix++) {
|
||||
var cx = x * blocksize_x + ix;
|
||||
var cy = y * blocksize_y + iy;
|
||||
var ii = (cy * data.width + cx) * 4;
|
||||
|
||||
var alpha = data.data[ii+3];
|
||||
if (alpha === 0) {
|
||||
total += 765;
|
||||
} else {
|
||||
total += data.data[ii] + data.data[ii+1] + data.data[ii+2];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.push(total);
|
||||
}
|
||||
}
|
||||
|
||||
translate_blocks_to_bits(result, blocksize_x * blocksize_y);
|
||||
return bits_to_hexhash(result);
|
||||
};
|
||||
|
||||
var bmvbhash = function(data: BlockImageData, bits: number) {
|
||||
var result = [];
|
||||
|
||||
var i, j, x, y;
|
||||
var block_width, block_height;
|
||||
var weight_top, weight_bottom, weight_left, weight_right;
|
||||
var block_top, block_bottom, block_left, block_right;
|
||||
var y_mod, y_frac, y_int;
|
||||
var x_mod, x_frac, x_int;
|
||||
var blocks: number[][] = [];
|
||||
|
||||
var even_x = data.width % bits === 0;
|
||||
var even_y = data.height % bits === 0;
|
||||
|
||||
if (even_x && even_y) {
|
||||
return bmvbhash_even(data, bits);
|
||||
}
|
||||
|
||||
// initialize blocks array with 0s
|
||||
for (i = 0; i < bits; i++) {
|
||||
blocks.push([]);
|
||||
for (j = 0; j < bits; j++) {
|
||||
blocks[i].push(0);
|
||||
}
|
||||
}
|
||||
|
||||
block_width = data.width / bits;
|
||||
block_height = data.height / bits;
|
||||
|
||||
for (y = 0; y < data.height; y++) {
|
||||
if (even_y) {
|
||||
// don't bother dividing y, if the size evenly divides by bits
|
||||
block_top = block_bottom = Math.floor(y / block_height);
|
||||
weight_top = 1;
|
||||
weight_bottom = 0;
|
||||
} else {
|
||||
y_mod = (y + 1) % block_height;
|
||||
y_frac = y_mod - Math.floor(y_mod);
|
||||
y_int = y_mod - y_frac;
|
||||
|
||||
weight_top = (1 - y_frac);
|
||||
weight_bottom = (y_frac);
|
||||
|
||||
// y_int will be 0 on bottom/right borders and on block boundaries
|
||||
if (y_int > 0 || (y + 1) === data.height) {
|
||||
block_top = block_bottom = Math.floor(y / block_height);
|
||||
} else {
|
||||
block_top = Math.floor(y / block_height);
|
||||
block_bottom = Math.ceil(y / block_height);
|
||||
}
|
||||
}
|
||||
|
||||
for (x = 0; x < data.width; x++) {
|
||||
var ii = (y * data.width + x) * 4;
|
||||
|
||||
var avgvalue, alpha = data.data[ii+3];
|
||||
if (alpha === 0) {
|
||||
avgvalue = 765;
|
||||
} else {
|
||||
avgvalue = data.data[ii] + data.data[ii+1] + data.data[ii+2];
|
||||
}
|
||||
|
||||
if (even_x) {
|
||||
block_left = block_right = Math.floor(x / block_width);
|
||||
weight_left = 1;
|
||||
weight_right = 0;
|
||||
} else {
|
||||
x_mod = (x + 1) % block_width;
|
||||
x_frac = x_mod - Math.floor(x_mod);
|
||||
x_int = x_mod - x_frac;
|
||||
|
||||
weight_left = (1 - x_frac);
|
||||
weight_right = x_frac;
|
||||
|
||||
// x_int will be 0 on bottom/right borders and on block boundaries
|
||||
if (x_int > 0 || (x + 1) === data.width) {
|
||||
block_left = block_right = Math.floor(x / block_width);
|
||||
} else {
|
||||
block_left = Math.floor(x / block_width);
|
||||
block_right = Math.ceil(x / block_width);
|
||||
}
|
||||
}
|
||||
|
||||
// add weighted pixel value to relevant blocks
|
||||
blocks[block_top][block_left] += avgvalue * weight_top * weight_left;
|
||||
blocks[block_top][block_right] += avgvalue * weight_top * weight_right;
|
||||
blocks[block_bottom][block_left] += avgvalue * weight_bottom * weight_left;
|
||||
blocks[block_bottom][block_right] += avgvalue * weight_bottom * weight_right;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < bits; i++) {
|
||||
for (j = 0; j < bits; j++) {
|
||||
result.push(blocks[i][j]);
|
||||
}
|
||||
}
|
||||
|
||||
translate_blocks_to_bits(result, block_width * block_height);
|
||||
return bits_to_hexhash(result);
|
||||
};
|
||||
|
||||
var blockhashData = function(imgData: BlockImageData, bits: number, method: number) {
|
||||
var hash;
|
||||
|
||||
if (method === 1) {
|
||||
hash = bmvbhash_even(imgData, bits);
|
||||
}
|
||||
else if (method === 2) {
|
||||
hash = bmvbhash(imgData, bits);
|
||||
}
|
||||
else {
|
||||
throw new Error("Bad hashing method");
|
||||
}
|
||||
|
||||
return hash;
|
||||
};
|
||||
|
||||
export const blockhash = async function(src: Sharp, bits: number, method: number = 2): Promise<string> {
|
||||
const {data: buff, info} = await src.ensureAlpha().raw().toBuffer({resolveWithObject: true});
|
||||
return blockhashData({
|
||||
width: info.width,
|
||||
height: info.height,
|
||||
data: buff,
|
||||
}, bits, method);
|
||||
};
|
||||
@@ -1,2 +1,31 @@
|
||||
import {HistoricalStats} from "./interfaces";
|
||||
|
||||
export const cacheOptDefaults = {ttl: 60, max: 500, checkPeriod: 600};
|
||||
export const cacheTTLDefaults = {authorTTL: 60, userNotesTTL: 300, wikiTTL: 300, submissionTTL: 60, commentTTL: 60, filterCriteriaTTL: 60, subredditTTL: 600};
|
||||
export const cacheTTLDefaults = {authorTTL: 60, userNotesTTL: 300, wikiTTL: 300, submissionTTL: 60, commentTTL: 60, filterCriteriaTTL: 60, subredditTTL: 600, selfTTL: 60};
|
||||
export const historicalDefaults: HistoricalStats = {
|
||||
eventsCheckedTotal: 0,
|
||||
eventsActionedTotal: 0,
|
||||
checksRun: new Map(),
|
||||
checksFromCache: new Map(),
|
||||
checksTriggered: new Map(),
|
||||
rulesRun: new Map(),
|
||||
//rulesCached: new Map(),
|
||||
rulesCachedTotal: 0,
|
||||
rulesTriggered: new Map(),
|
||||
actionsRun: new Map(),
|
||||
}
|
||||
|
||||
export const createHistoricalDefaults = (): HistoricalStats => {
|
||||
return {
|
||||
eventsCheckedTotal: 0,
|
||||
eventsActionedTotal: 0,
|
||||
checksRun: new Map(),
|
||||
checksFromCache: new Map(),
|
||||
checksTriggered: new Map(),
|
||||
rulesRun: new Map(),
|
||||
//rulesCached: new Map(),
|
||||
rulesCachedTotal: 0,
|
||||
rulesTriggered: new Map(),
|
||||
actionsRun: new Map(),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -5,6 +5,9 @@ import Poll from "snoostorm/out/util/Poll";
|
||||
import Snoowrap from "snoowrap";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {IncomingMessage} from "http";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import Comment from "snoowrap/dist/objects/Comment";
|
||||
import RedditUser from "snoowrap/dist/objects/RedditUser";
|
||||
|
||||
/**
|
||||
* An ISO 8601 Duration
|
||||
@@ -224,6 +227,186 @@ export interface ReferenceSubmission {
|
||||
useSubmissionAsReference?: boolean,
|
||||
}
|
||||
|
||||
/**
|
||||
* When comparing submissions detect if the reference submission is an image and do a pixel-comparison to other detected image submissions.
|
||||
*
|
||||
* **Note:** This is an **experimental feature**
|
||||
* */
|
||||
export interface ImageDetection {
|
||||
/**
|
||||
* Is image detection enabled?
|
||||
* */
|
||||
enable?: boolean
|
||||
/**
|
||||
* Determines how and when to check if a URL is an image
|
||||
*
|
||||
* **Note:** After fetching a URL the **Content-Type** is validated to contain `image` before detection occurs
|
||||
*
|
||||
* **When `extension`:** (default)
|
||||
*
|
||||
* * Only URLs that end in known image extensions (.png, .jpg, etc...) are fetched
|
||||
*
|
||||
* **When `unknown`:**
|
||||
*
|
||||
* * URLs that end in known image extensions (.png, .jpg, etc...) are fetched
|
||||
* * URLs with no extension or unknown (IE non-video, non-doc, etc...) are fetched
|
||||
*
|
||||
* **When `all`:**
|
||||
*
|
||||
* * All submissions that have URLs (non-self) will be fetched, regardless of extension
|
||||
* * **Note:** This can be bandwidth/CPU intensive if history window is large so use with care
|
||||
*
|
||||
* @default "extension"
|
||||
* */
|
||||
fetchBehavior?: 'extension' | 'unknown' | 'all',
|
||||
/**
|
||||
* The percentage, as a whole number, of difference between two images at which point they will not be considered the same.
|
||||
*
|
||||
* Will be used as `hash.hardThreshold` and `pixel.threshold` if those values are not specified
|
||||
*
|
||||
* Default is `5`
|
||||
*
|
||||
* @default 5
|
||||
* */
|
||||
threshold?: number
|
||||
|
||||
/**
|
||||
* Use perceptual hashing (blockhash-js) to compare images
|
||||
*
|
||||
* Pros:
|
||||
*
|
||||
* * very fast
|
||||
* * low cpu/memory usage
|
||||
* * results can be cached
|
||||
*
|
||||
* Cons:
|
||||
*
|
||||
* * not as accurate as pixel comparison
|
||||
* * weaker for text-heavy images
|
||||
* * mostly color-blind
|
||||
*
|
||||
* Best uses:
|
||||
*
|
||||
* * Detecting (general) duplicate images
|
||||
* * Comparing large number of images
|
||||
* */
|
||||
hash?: {
|
||||
/**
|
||||
* Enabled by default.
|
||||
*
|
||||
* If both `hash` and `pixel` are enabled then `pixel` will be used to verify image comparison when hashes matches
|
||||
*
|
||||
* @default true
|
||||
* */
|
||||
enable?: boolean
|
||||
|
||||
/**
|
||||
* Bit count determines accuracy of hash and granularity of hash comparison (comparison to other hashes)
|
||||
*
|
||||
* Default is `32`
|
||||
*
|
||||
* **NOTE:** Hashes of different sizes (bits) cannot be compared. If you are caching results make sure all rules where results may be shared use the same bit count to ensure hashes can be compared. Otherwise hashes will be recomputed.
|
||||
*
|
||||
* @default 32
|
||||
* */
|
||||
bits?: number
|
||||
|
||||
/**
|
||||
* Number of seconds to cache image hash
|
||||
* */
|
||||
ttl?: number
|
||||
/**
|
||||
* High Confidence Threshold
|
||||
*
|
||||
* If the difference in comparison is equal to or less than this number the images are considered the same and pixel comparison WILL NOT occur
|
||||
*
|
||||
* Defaults to the parent-level `threshold` value if not present
|
||||
*
|
||||
* Use `null` if you want pixel comparison to ALWAYS occur (softThreshold must be present)
|
||||
* */
|
||||
hardThreshold?: number | null
|
||||
/**
|
||||
* Low Confidence Threshold -- only used if `pixel` is enabled
|
||||
*
|
||||
* If the difference in comparison is
|
||||
*
|
||||
* 1) equal to or less than this value and
|
||||
* 2) the value is greater than `hardThreshold`
|
||||
*
|
||||
* the images will be compared using the `pixel` method
|
||||
* */
|
||||
softThreshold?: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Use pixel counting to compare images
|
||||
*
|
||||
* Pros:
|
||||
*
|
||||
* * most accurate
|
||||
* * strong with text or color-only changes
|
||||
*
|
||||
* Cons:
|
||||
*
|
||||
* * much slower than hashing
|
||||
* * memory/cpu intensive
|
||||
*
|
||||
* Best uses:
|
||||
*
|
||||
* * Comparison text-only images
|
||||
* * Comparison requires high degree of accuracy or changes are subtle
|
||||
* */
|
||||
pixel?: {
|
||||
/**
|
||||
* Disabled by default.
|
||||
*
|
||||
* @default false
|
||||
* */
|
||||
enable?: boolean
|
||||
/**
|
||||
* The percentage, as a whole number, of pixels that are **different** between the two images at which point the images are not considered the same.
|
||||
* */
|
||||
threshold?: number
|
||||
}
|
||||
}
|
||||
|
||||
export interface StrongImageDetection {
|
||||
enable: boolean,
|
||||
fetchBehavior: 'extension' | 'unknown' | 'all'
|
||||
threshold: number,
|
||||
hash: {
|
||||
enable: boolean
|
||||
bits: number
|
||||
ttl?: number
|
||||
hardThreshold: number | null
|
||||
softThreshold?: number
|
||||
}
|
||||
pixel: {
|
||||
enable: boolean
|
||||
threshold: number
|
||||
}
|
||||
}
|
||||
|
||||
// export interface ImageData {
|
||||
// data: Promise<Buffer>,
|
||||
// buf?: Buffer,
|
||||
// width: number,
|
||||
// height: number
|
||||
// pixels?: number
|
||||
// url: string
|
||||
// variants?: ImageData[]
|
||||
// }
|
||||
|
||||
export interface ImageComparisonResult {
|
||||
isSameDimensions: boolean
|
||||
dimensionDifference: {
|
||||
width: number;
|
||||
height: number;
|
||||
};
|
||||
misMatchPercentage: number;
|
||||
analysisTime: number;
|
||||
}
|
||||
|
||||
export interface RichContent {
|
||||
/**
|
||||
* The Content to submit for this Action. Content is interpreted as reddit-flavored Markdown.
|
||||
@@ -306,6 +489,38 @@ export type PollOn = 'unmoderated' | 'modqueue' | 'newSub' | 'newComm';
|
||||
export interface PollingOptionsStrong extends PollingOptions {
|
||||
limit: number,
|
||||
interval: number,
|
||||
clearProcessed: ClearProcessedOptions
|
||||
}
|
||||
|
||||
/**
|
||||
* For very long-running, high-volume subreddits clearing the list of processed activities helps manage memory bloat
|
||||
*
|
||||
* All of these options have default values based on the limit and/or interval set for polling options on each subreddit stream. They only need to modified if the defaults are not sufficient.
|
||||
*
|
||||
* If both `after` and `size` are defined whichever is hit first will trigger the list to clear. `after` will be reset after ever clear.
|
||||
* */
|
||||
export interface ClearProcessedOptions {
|
||||
/**
|
||||
* An interval the processed list should be cleared after.
|
||||
*
|
||||
* * EX `9 days`
|
||||
* * EX `3 months`
|
||||
* * EX `5 minutes`
|
||||
* @pattern ^\s*(?<time>\d+)\s*(?<unit>days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)\s*$
|
||||
* */
|
||||
after?: string,
|
||||
/**
|
||||
* Number of activities found in processed list after which the list should be cleared.
|
||||
*
|
||||
* Defaults to the `limit` value from `PollingOptions`
|
||||
* */
|
||||
size?: number,
|
||||
/**
|
||||
* The number of activities to retain in processed list after clearing.
|
||||
*
|
||||
* Defaults to `limit` value from `PollingOptions`
|
||||
* */
|
||||
retain?: number,
|
||||
}
|
||||
|
||||
export interface PollingDefaults {
|
||||
@@ -379,6 +594,8 @@ export interface PollingOptions extends PollingDefaults {
|
||||
*
|
||||
* */
|
||||
pollOn: 'unmoderated' | 'modqueue' | 'newSub' | 'newComm'
|
||||
|
||||
clearProcessed?: ClearProcessedOptions
|
||||
}
|
||||
|
||||
export interface TTLConfig {
|
||||
@@ -456,6 +673,24 @@ export interface TTLConfig {
|
||||
* @default 60
|
||||
* */
|
||||
filterCriteriaTTL?: number | boolean;
|
||||
|
||||
/**
|
||||
* Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling
|
||||
*
|
||||
* This is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:
|
||||
*
|
||||
* * Ignore comments created through an Action
|
||||
* * Ignore Activity polled from modqueue that the bot just reported
|
||||
*
|
||||
* This value should be at least as long as the longest polling interval for modqueue/newComm
|
||||
*
|
||||
* * If `0` or `true` will cache indefinitely (not recommended)
|
||||
* * If `false` will not cache
|
||||
*
|
||||
* @examples [50]
|
||||
* @default 50
|
||||
* */
|
||||
selfTTL?: number | boolean
|
||||
}
|
||||
|
||||
export interface CacheConfig extends TTLConfig {
|
||||
@@ -618,6 +853,8 @@ export interface ManagerOptions {
|
||||
nickname?: string
|
||||
|
||||
notifications?: NotificationConfig
|
||||
|
||||
credentials?: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -631,6 +868,22 @@ export interface ManagerOptions {
|
||||
* */
|
||||
export type CompareValue = string;
|
||||
|
||||
/**
|
||||
* A duration and how to compare it against a value
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`
|
||||
*
|
||||
* * EX `> 100 days` => Passes if the date being compared is before 100 days ago
|
||||
* * EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months
|
||||
*
|
||||
* Unit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)
|
||||
*
|
||||
* [See] https://regexr.com/609n8 for example
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\s*$
|
||||
* */
|
||||
export type DurationComparor = string;
|
||||
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare against
|
||||
*
|
||||
@@ -677,7 +930,22 @@ export interface ActivityState {
|
||||
distinguished?: boolean
|
||||
approved?: boolean
|
||||
score?: CompareValue
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>`
|
||||
*
|
||||
* * EX `> 2` => greater than 2 total reports
|
||||
*
|
||||
* Defaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:
|
||||
*
|
||||
* * EX `> 3 mod` => greater than 3 mod reports
|
||||
* * EX `>= 1 user` => greater than 1 user report
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* */
|
||||
reports?: CompareValue
|
||||
age?: DurationComparor
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -701,6 +969,9 @@ export interface SubmissionState extends ActivityState {
|
||||
link_flair_css_class?: string
|
||||
}
|
||||
|
||||
// properties calculated/derived by CM -- not provided as plain values by reddit
|
||||
export const cmActivityProperties = ['submissionState','score','reports','removed','deleted','filtered','age','title'];
|
||||
|
||||
/**
|
||||
* Different attributes a `Comment` can be in. Only include a property if you want to check it.
|
||||
* @examples [{"op": true, "removed": false}]
|
||||
@@ -714,6 +985,14 @@ export interface CommentState extends ActivityState {
|
||||
* A list of SubmissionState attributes to test the Submission this comment is in
|
||||
* */
|
||||
submissionState?: SubmissionState[]
|
||||
|
||||
/**
|
||||
* The (nested) level of a comment.
|
||||
*
|
||||
* * 0 mean the comment is at top-level (replying to submission)
|
||||
* * non-zero, Nth value means the comment has N parent comments
|
||||
* */
|
||||
depth?: DurationComparor
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -745,6 +1024,8 @@ export interface SubredditState {
|
||||
* A friendly description of what this State is trying to parse
|
||||
* */
|
||||
stateDescription?: string
|
||||
|
||||
isUserProfile?: boolean
|
||||
}
|
||||
|
||||
export interface StrongSubredditState extends SubredditState {
|
||||
@@ -772,6 +1053,28 @@ export const STOPPED = 'stopped';
|
||||
export const RUNNING = 'running';
|
||||
export const PAUSED = 'paused';
|
||||
|
||||
export interface SearchAndReplaceRegExp {
|
||||
/**
|
||||
* The search value to test for
|
||||
*
|
||||
* Can be a normal string (converted to a case-sensitive literal) or a valid regular expression
|
||||
*
|
||||
* EX `["find this string", "/some string*\/ig"]`
|
||||
*
|
||||
* @examples ["find this string", "/some string*\/ig"]
|
||||
* */
|
||||
search: string
|
||||
|
||||
/**
|
||||
* The replacement string/value to use when search is found
|
||||
*
|
||||
* This can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value
|
||||
*
|
||||
* See replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace
|
||||
* */
|
||||
replace: string
|
||||
}
|
||||
|
||||
export interface NamedGroup {
|
||||
[name: string]: string
|
||||
}
|
||||
@@ -804,6 +1107,7 @@ export type StrongCache = {
|
||||
submissionTTL: number | boolean,
|
||||
commentTTL: number | boolean,
|
||||
subredditTTL: number | boolean,
|
||||
selfTTL: number | boolean,
|
||||
filterCriteriaTTL: number | boolean,
|
||||
provider: CacheOptions
|
||||
actionedEventsMax?: number,
|
||||
@@ -1021,6 +1325,32 @@ export interface WebCredentials {
|
||||
redirectUri?: string,
|
||||
}
|
||||
|
||||
export interface SnoowrapOptions {
|
||||
/**
|
||||
* Proxy all requests to Reddit's API through this endpoint
|
||||
*
|
||||
* * ENV => `PROXY`
|
||||
* * ARG => `--proxy <proxyEndpoint>`
|
||||
*
|
||||
* @examples ["http://localhost:4443"]
|
||||
* */
|
||||
proxy?: string,
|
||||
/**
|
||||
* Manually set the debug status for snoowrap
|
||||
*
|
||||
* When snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level
|
||||
*
|
||||
* * Set to `true` to always output
|
||||
* * Set to `false` to never output
|
||||
*
|
||||
* If not present or `null` will be set based on `logLevel`
|
||||
*
|
||||
* * ENV => `SNOO_DEBUG`
|
||||
* * ARG => `--snooDebug`
|
||||
* */
|
||||
debug?: boolean,
|
||||
}
|
||||
|
||||
/**
|
||||
* The configuration for an **individual reddit account** ContextMod will run as a bot.
|
||||
*
|
||||
@@ -1030,7 +1360,7 @@ export interface WebCredentials {
|
||||
*
|
||||
* */
|
||||
export interface BotInstanceJsonConfig {
|
||||
credentials?: RedditCredentials
|
||||
credentials?: BotCredentialsJsonConfig | RedditCredentials
|
||||
/*
|
||||
* The name to display for the bot. If not specified will use the name of the reddit account IE `u/TheBotName`
|
||||
* */
|
||||
@@ -1041,33 +1371,13 @@ export interface BotInstanceJsonConfig {
|
||||
notifications?: NotificationConfig
|
||||
|
||||
/**
|
||||
* Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior
|
||||
* Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior.
|
||||
*
|
||||
* Overrides any defaults provided at top-level operator config.
|
||||
*
|
||||
* Set to an empty object to "ignore" any top-level config
|
||||
* */
|
||||
snoowrap?: {
|
||||
/**
|
||||
* Proxy all requests to Reddit's API through this endpoint
|
||||
*
|
||||
* * ENV => `PROXY`
|
||||
* * ARG => `--proxy <proxyEndpoint>`
|
||||
*
|
||||
* @examples ["http://localhost:4443"]
|
||||
* */
|
||||
proxy?: string,
|
||||
/**
|
||||
* Manually set the debug status for snoowrap
|
||||
*
|
||||
* When snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level
|
||||
*
|
||||
* * Set to `true` to always output
|
||||
* * Set to `false` to never output
|
||||
*
|
||||
* If not present or `null` will be set based on `logLevel`
|
||||
*
|
||||
* * ENV => `SNOO_DEBUG`
|
||||
* * ARG => `--snooDebug`
|
||||
* */
|
||||
debug?: boolean,
|
||||
}
|
||||
snoowrap?: SnoowrapOptions
|
||||
|
||||
/**
|
||||
* Settings related to bot behavior for subreddits it is managing
|
||||
@@ -1145,6 +1455,13 @@ export interface BotInstanceJsonConfig {
|
||||
* @default false
|
||||
* */
|
||||
sharedMod?: boolean,
|
||||
|
||||
/**
|
||||
* If sharing a mod stream stagger pushing relevant Activities to individual subreddits.
|
||||
*
|
||||
* Useful when running many subreddits and rules are potentially cpu/memory/traffic heavy -- allows spreading out load
|
||||
* */
|
||||
stagger?: number,
|
||||
},
|
||||
/**
|
||||
* Settings related to default configurations for queue behavior for subreddits
|
||||
@@ -1283,6 +1600,11 @@ export interface OperatorJsonConfig {
|
||||
* */
|
||||
caching?: OperatorCacheConfig
|
||||
|
||||
/**
|
||||
* Set global snoowrap options as well as default snoowrap config for all bots that don't specify their own
|
||||
* */
|
||||
snoowrap?: SnoowrapOptions
|
||||
|
||||
bots?: BotInstanceJsonConfig[]
|
||||
|
||||
/**
|
||||
@@ -1403,6 +1725,8 @@ export interface OperatorJsonConfig {
|
||||
* */
|
||||
friendly?: string,
|
||||
}
|
||||
|
||||
credentials?: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
export interface RequiredOperatorRedditCredentials extends RedditCredentials {
|
||||
@@ -1416,8 +1740,23 @@ export interface RequiredWebRedditCredentials extends RedditCredentials {
|
||||
redirectUri: string
|
||||
}
|
||||
|
||||
export interface ThirdPartyCredentialsJsonConfig {
|
||||
youtube?: {
|
||||
apiKey: string
|
||||
}
|
||||
[key: string]: any
|
||||
}
|
||||
|
||||
export interface BotCredentialsJsonConfig extends ThirdPartyCredentialsJsonConfig {
|
||||
reddit: RedditCredentials
|
||||
}
|
||||
|
||||
export interface BotCredentialsConfig extends ThirdPartyCredentialsJsonConfig {
|
||||
reddit: RequiredOperatorRedditCredentials
|
||||
}
|
||||
|
||||
export interface BotInstanceConfig extends BotInstanceJsonConfig {
|
||||
credentials: RequiredOperatorRedditCredentials
|
||||
credentials: BotCredentialsJsonConfig
|
||||
snoowrap: {
|
||||
proxy?: string,
|
||||
debug?: boolean,
|
||||
@@ -1431,6 +1770,7 @@ export interface BotInstanceConfig extends BotInstanceJsonConfig {
|
||||
},
|
||||
polling: {
|
||||
sharedMod: boolean,
|
||||
stagger?: number,
|
||||
limit: number,
|
||||
interval: number,
|
||||
},
|
||||
@@ -1478,6 +1818,7 @@ export interface OperatorConfig extends OperatorJsonConfig {
|
||||
friendly?: string,
|
||||
}
|
||||
bots: BotInstanceConfig[]
|
||||
credentials: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
//export type OperatorConfig = Required<OperatorJsonConfig>;
|
||||
@@ -1507,20 +1848,18 @@ export interface LogInfo {
|
||||
bot?: string
|
||||
}
|
||||
|
||||
export interface ActionResult {
|
||||
export interface ActionResult extends ActionProcessResult {
|
||||
kind: string,
|
||||
name: string,
|
||||
run: boolean,
|
||||
runReason?: string,
|
||||
dryRun: boolean,
|
||||
success: boolean,
|
||||
result?: string,
|
||||
}
|
||||
|
||||
export interface ActionProcessResult {
|
||||
success: boolean,
|
||||
dryRun: boolean,
|
||||
result?: string
|
||||
touchedEntities?: (Submission | Comment | RedditUser | string)[]
|
||||
}
|
||||
|
||||
export interface ActionedEvent {
|
||||
@@ -1556,3 +1895,115 @@ export interface StatusCodeError extends Error {
|
||||
response: IncomingMessage,
|
||||
error: Error
|
||||
}
|
||||
|
||||
export interface RequestError extends Error {
|
||||
name: 'RequestError',
|
||||
statusCode: number,
|
||||
message: string,
|
||||
response: IncomingMessage,
|
||||
error: Error
|
||||
}
|
||||
|
||||
export interface HistoricalStatsDisplay extends HistoricalStats {
|
||||
checksRunTotal: number
|
||||
checksFromCacheTotal: number
|
||||
checksTriggeredTotal: number
|
||||
rulesRunTotal: number
|
||||
rulesCachedTotal: number
|
||||
rulesTriggeredTotal: number
|
||||
actionsRunTotal: number
|
||||
}
|
||||
|
||||
export interface HistoricalStats {
|
||||
eventsCheckedTotal: number
|
||||
eventsActionedTotal: number
|
||||
checksRun: Map<string, number>
|
||||
checksFromCache: Map<string, number>
|
||||
checksTriggered: Map<string, number>
|
||||
rulesRun: Map<string, number>
|
||||
//rulesCached: Map<string, number>
|
||||
rulesCachedTotal: number
|
||||
rulesTriggered: Map<string, number>
|
||||
actionsRun: Map<string, number>
|
||||
[index: string]: any
|
||||
}
|
||||
|
||||
export interface SubredditHistoricalStats {
|
||||
allTime: HistoricalStats
|
||||
lastReload: HistoricalStats
|
||||
}
|
||||
|
||||
export interface SubredditHistoricalStatsDisplay {
|
||||
allTime: HistoricalStatsDisplay
|
||||
lastReload: HistoricalStatsDisplay
|
||||
}
|
||||
|
||||
export interface ManagerStats {
|
||||
// eventsCheckedTotal: number
|
||||
// eventsCheckedSinceStartTotal: number
|
||||
eventsAvg: number
|
||||
// checksRunTotal: number
|
||||
// checksRunSinceStartTotal: number
|
||||
// checksTriggered: number
|
||||
// checksTriggeredTotal: number
|
||||
// checksTriggeredSinceStart: number
|
||||
// checksTriggeredSinceStartTotal: number
|
||||
// rulesRunTotal: number
|
||||
// rulesRunSinceStartTotal: number
|
||||
// rulesCachedTotal: number
|
||||
// rulesCachedSinceStartTotal: number
|
||||
// rulesTriggeredTotal: number
|
||||
// rulesTriggeredSinceStartTotal: number
|
||||
rulesAvg: number
|
||||
// actionsRun: number
|
||||
// actionsRunTotal: number
|
||||
// actionsRunSinceStart: number,
|
||||
// actionsRunSinceStartTotal: number
|
||||
historical: SubredditHistoricalStatsDisplay
|
||||
cache: {
|
||||
provider: string,
|
||||
currentKeyCount: number,
|
||||
isShared: boolean,
|
||||
totalRequests: number,
|
||||
totalMiss: number,
|
||||
missPercent: string,
|
||||
requestRate: number,
|
||||
types: ResourceStats
|
||||
},
|
||||
}
|
||||
|
||||
export interface HistoricalStatUpdateData {
|
||||
eventsCheckedTotal?: number
|
||||
eventsActionedTotal?: number
|
||||
checksRun: string[] | string
|
||||
checksTriggered: string[] | string
|
||||
checksFromCache: string[] | string
|
||||
actionsRun: string[] | string
|
||||
rulesRun: string[] | string
|
||||
rulesCachedTotal: number
|
||||
rulesTriggered: string[] | string
|
||||
}
|
||||
|
||||
export type SearchFacetType = 'title' | 'url' | 'duplicates' | 'crossposts' | 'external';
|
||||
|
||||
export interface RepostItem {
|
||||
value: string
|
||||
createdOn?: number
|
||||
source: string
|
||||
sourceUrl?: string
|
||||
score?: number
|
||||
id: string
|
||||
itemType: string
|
||||
acquisitionType: SearchFacetType | 'comment'
|
||||
sourceObj?: any
|
||||
reqSameness?: number
|
||||
}
|
||||
|
||||
export interface RepostItemResult extends RepostItem {
|
||||
sameness: number
|
||||
}
|
||||
|
||||
export interface StringComparisonOptions {
|
||||
lengthWeight?: number,
|
||||
transforms?: ((str: string) => string)[]
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import {RepeatActivityJSONConfig} from "../Rule/RepeatActivityRule";
|
||||
import {AuthorRuleJSONConfig} from "../Rule/AuthorRule";
|
||||
import {AttributionJSONConfig} from "../Rule/AttributionRule";
|
||||
import {FlairActionJson} from "../Action/SubmissionAction/FlairAction";
|
||||
import {UserFlairActionJson} from "../Action/UserFlairAction";
|
||||
import {CommentActionJson} from "../Action/CommentAction";
|
||||
import {ReportActionJson} from "../Action/ReportAction";
|
||||
import {LockActionJson} from "../Action/LockAction";
|
||||
@@ -13,9 +14,17 @@ import {ApproveActionJson} from "../Action/ApproveAction";
|
||||
import {BanActionJson} from "../Action/BanAction";
|
||||
import {RegexRuleJSONConfig} from "../Rule/RegexRule";
|
||||
import {MessageActionJson} from "../Action/MessageAction";
|
||||
import {RepostRuleJSONConfig} from "../Rule/RepostRule";
|
||||
|
||||
export type RuleJson = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | string;
|
||||
export type RuleJson = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | RepostRuleJSONConfig | string;
|
||||
export type RuleObjectJson = Exclude<RuleJson, string>
|
||||
|
||||
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | string;
|
||||
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | UserFlairActionJson | string;
|
||||
export type ActionObjectJson = Exclude<ActionJson, string>;
|
||||
|
||||
// borrowed from https://github.com/jabacchetta/set-random-interval/blob/master/src/index.ts
|
||||
export type SetRandomInterval = (
|
||||
intervalFunction: () => void,
|
||||
minDelay: number,
|
||||
maxDelay: number,
|
||||
) => { clear: () => void };
|
||||
|
||||
@@ -31,7 +31,7 @@ import {
|
||||
CacheOptions,
|
||||
BotInstanceJsonConfig,
|
||||
BotInstanceConfig,
|
||||
RequiredWebRedditCredentials
|
||||
RequiredWebRedditCredentials, RedditCredentials, BotCredentialsJsonConfig, BotCredentialsConfig
|
||||
} from "./Common/interfaces";
|
||||
import {isRuleSetJSON, RuleSetJson, RuleSetObjectJson} from "./Rule/RuleSet";
|
||||
import deepEqual from "fast-deep-equal";
|
||||
@@ -142,15 +142,30 @@ export const buildPollingOptions = (values: (string | PollingOptions)[]): Pollin
|
||||
let opts: PollingOptionsStrong[] = [];
|
||||
for (const v of values) {
|
||||
if (typeof v === 'string') {
|
||||
opts.push({pollOn: v as PollOn, interval: DEFAULT_POLLING_INTERVAL, limit: DEFAULT_POLLING_LIMIT});
|
||||
opts.push({
|
||||
pollOn: v as PollOn,
|
||||
interval: DEFAULT_POLLING_INTERVAL,
|
||||
limit: DEFAULT_POLLING_LIMIT,
|
||||
clearProcessed: {
|
||||
size: DEFAULT_POLLING_LIMIT,
|
||||
retain: DEFAULT_POLLING_LIMIT,
|
||||
}
|
||||
});
|
||||
} else {
|
||||
const {
|
||||
pollOn: p,
|
||||
interval = DEFAULT_POLLING_INTERVAL,
|
||||
limit = DEFAULT_POLLING_LIMIT,
|
||||
delayUntil,
|
||||
clearProcessed = {size: limit, retain: limit},
|
||||
} = v;
|
||||
opts.push({pollOn: p as PollOn, interval, limit, delayUntil});
|
||||
opts.push({
|
||||
pollOn: p as PollOn,
|
||||
interval,
|
||||
limit,
|
||||
delayUntil,
|
||||
clearProcessed
|
||||
});
|
||||
}
|
||||
}
|
||||
return opts;
|
||||
@@ -266,8 +281,6 @@ export const parseDefaultBotInstanceFromArgs = (args: any): BotInstanceJsonConfi
|
||||
heartbeat,
|
||||
hardLimit,
|
||||
authorTTL,
|
||||
snooProxy,
|
||||
snooDebug,
|
||||
sharedMod,
|
||||
caching,
|
||||
} = args || {};
|
||||
@@ -279,10 +292,6 @@ export const parseDefaultBotInstanceFromArgs = (args: any): BotInstanceJsonConfi
|
||||
accessToken,
|
||||
refreshToken,
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: snooProxy,
|
||||
debug: snooDebug,
|
||||
},
|
||||
subreddits: {
|
||||
names: subreddits,
|
||||
wikiConfig,
|
||||
@@ -315,6 +324,8 @@ export const parseOpConfigFromArgs = (args: any): OperatorJsonConfig => {
|
||||
mode,
|
||||
caching,
|
||||
authorTTL,
|
||||
snooProxy,
|
||||
snooDebug,
|
||||
} = args || {};
|
||||
|
||||
const data = {
|
||||
@@ -331,6 +342,10 @@ export const parseOpConfigFromArgs = (args: any): OperatorJsonConfig => {
|
||||
provider: caching,
|
||||
authorTTL
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: snooProxy,
|
||||
debug: snooDebug,
|
||||
},
|
||||
web: {
|
||||
enabled: web,
|
||||
port,
|
||||
@@ -372,10 +387,13 @@ const parseListFromEnv = (val: string | undefined) => {
|
||||
export const parseDefaultBotInstanceFromEnv = (): BotInstanceJsonConfig => {
|
||||
const data = {
|
||||
credentials: {
|
||||
clientId: process.env.CLIENT_ID,
|
||||
clientSecret: process.env.CLIENT_SECRET,
|
||||
accessToken: process.env.ACCESS_TOKEN,
|
||||
refreshToken: process.env.REFRESH_TOKEN,
|
||||
reddit: {
|
||||
clientId: process.env.CLIENT_ID,
|
||||
clientSecret: process.env.CLIENT_SECRET,
|
||||
accessToken: process.env.ACCESS_TOKEN,
|
||||
refreshToken: process.env.REFRESH_TOKEN,
|
||||
},
|
||||
youtube: process.env.YOUTUBE_API_KEY
|
||||
},
|
||||
subreddits: {
|
||||
names: parseListFromEnv(process.env.SUBREDDITS),
|
||||
@@ -383,10 +401,6 @@ export const parseDefaultBotInstanceFromEnv = (): BotInstanceJsonConfig => {
|
||||
dryRun: parseBool(process.env.DRYRUN, undefined),
|
||||
heartbeatInterval: process.env.HEARTBEAT !== undefined ? parseInt(process.env.HEARTBEAT) : undefined,
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: process.env.PROXY,
|
||||
debug: parseBool(process.env.SNOO_DEBUG, undefined),
|
||||
},
|
||||
polling: {
|
||||
sharedMod: parseBool(process.env.SHARE_MOD),
|
||||
},
|
||||
@@ -417,6 +431,10 @@ export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
|
||||
},
|
||||
authorTTL: process.env.AUTHOR_TTL !== undefined ? parseInt(process.env.AUTHOR_TTL) : undefined
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: process.env.PROXY,
|
||||
debug: parseBool(process.env.SNOO_DEBUG, undefined),
|
||||
},
|
||||
web: {
|
||||
port: process.env.PORT !== undefined ? parseInt(process.env.PORT) : undefined,
|
||||
session: {
|
||||
@@ -428,6 +446,11 @@ export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
|
||||
clientSecret: process.env.CLIENT_SECRET,
|
||||
redirectUri: process.env.REDIRECT_URI,
|
||||
},
|
||||
},
|
||||
credentials: {
|
||||
youtube: {
|
||||
apiKey: process.env.YOUTUBE_API_KEY
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -461,7 +484,7 @@ export const parseOperatorConfigFromSources = async (args: any): Promise<Operato
|
||||
process.env[k] = v;
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
let msg = 'No .env file found at default location (./env)';
|
||||
if (envPath !== undefined) {
|
||||
msg = `${msg} or OPERATOR_ENV path (${envPath})`;
|
||||
@@ -477,14 +500,14 @@ export const parseOperatorConfigFromSources = async (args: any): Promise<Operato
|
||||
let rawConfig;
|
||||
try {
|
||||
rawConfig = await readConfigFile(operatorConfig, {log: initLogger}) as object;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
initLogger.error('Cannot continue app startup because operator config file was not parseable.');
|
||||
err.logged = true;
|
||||
throw err;
|
||||
}
|
||||
try {
|
||||
configFromFile = validateJson(rawConfig, operatorSchema, initLogger) as OperatorJsonConfig;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
initLogger.error('Cannot continue app startup because operator config file was not valid.');
|
||||
throw err;
|
||||
}
|
||||
@@ -545,11 +568,13 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
credentials: webCredentials,
|
||||
operators,
|
||||
} = {},
|
||||
snoowrap: snoowrapOp = {},
|
||||
api: {
|
||||
port: apiPort = 8095,
|
||||
secret: apiSecret = randomId(),
|
||||
friendly,
|
||||
} = {},
|
||||
credentials = {},
|
||||
bots = [],
|
||||
} = data;
|
||||
|
||||
@@ -604,6 +629,7 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
name: botName,
|
||||
polling: {
|
||||
sharedMod = false,
|
||||
stagger,
|
||||
limit = 100,
|
||||
interval = 30,
|
||||
} = {},
|
||||
@@ -615,12 +641,8 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
softLimit = 250,
|
||||
hardLimit = 50
|
||||
} = {},
|
||||
snoowrap = {},
|
||||
credentials: {
|
||||
clientId: ci,
|
||||
clientSecret: cs,
|
||||
...restCred
|
||||
} = {},
|
||||
snoowrap = snoowrapOp,
|
||||
credentials = {},
|
||||
subreddits: {
|
||||
names = [],
|
||||
exclude = [],
|
||||
@@ -630,7 +652,6 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
} = {},
|
||||
} = x;
|
||||
|
||||
|
||||
let botCache: StrongCache;
|
||||
let botActionedEventsDefault: number;
|
||||
|
||||
@@ -640,10 +661,7 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
...cacheTTLDefaults,
|
||||
actionedEventsDefault: opActionedEventsDefault,
|
||||
actionedEventsMax: opActionedEventsMax,
|
||||
provider: {
|
||||
store: 'memory',
|
||||
...cacheOptDefaults
|
||||
}
|
||||
provider: {...defaultProvider}
|
||||
};
|
||||
} else {
|
||||
const {
|
||||
@@ -654,7 +672,7 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
} = caching;
|
||||
|
||||
botActionedEventsDefault = actionedEventsDefault;
|
||||
if(actionedEventsMax !== undefined) {
|
||||
if (actionedEventsMax !== undefined) {
|
||||
botActionedEventsDefault = Math.min(actionedEventsDefault, actionedEventsMax);
|
||||
}
|
||||
|
||||
@@ -684,11 +702,42 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
}
|
||||
}
|
||||
|
||||
const botCreds = {
|
||||
clientId: (ci as string),
|
||||
clientSecret: (cs as string),
|
||||
...restCred,
|
||||
};
|
||||
let botCreds: BotCredentialsConfig;
|
||||
|
||||
if((credentials as any).clientId !== undefined) {
|
||||
const creds = credentials as RedditCredentials;
|
||||
const {
|
||||
clientId: ci,
|
||||
clientSecret: cs,
|
||||
...restCred
|
||||
} = creds;
|
||||
botCreds = {
|
||||
reddit: {
|
||||
clientId: (ci as string),
|
||||
clientSecret: (cs as string),
|
||||
...restCred,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const creds = credentials as BotCredentialsJsonConfig;
|
||||
const {
|
||||
reddit: {
|
||||
clientId: ci,
|
||||
clientSecret: cs,
|
||||
...restRedditCreds
|
||||
},
|
||||
...rest
|
||||
} = creds;
|
||||
botCreds = {
|
||||
reddit: {
|
||||
clientId: (ci as string),
|
||||
clientSecret: (cs as string),
|
||||
...restRedditCreds,
|
||||
},
|
||||
...rest
|
||||
}
|
||||
}
|
||||
|
||||
if (botCache.provider.prefix === undefined || botCache.provider.prefix === defaultProvider.prefix) {
|
||||
// need to provide unique prefix to bot
|
||||
botCache.provider.prefix = buildCachePrefix([botCache.provider.prefix, 'bot', (botName || objectHash.sha1(botCreds))]);
|
||||
@@ -708,6 +757,7 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
caching: botCache,
|
||||
polling: {
|
||||
sharedMod,
|
||||
stagger,
|
||||
limit,
|
||||
interval,
|
||||
},
|
||||
@@ -759,6 +809,7 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
friendly
|
||||
},
|
||||
bots: hydratedBots,
|
||||
credentials,
|
||||
};
|
||||
|
||||
return config;
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import {SubmissionRule, SubmissionRuleJSONConfig} from "./SubmissionRule";
|
||||
import {ActivityWindowType, DomainInfo, ReferenceSubmission} from "../Common/interfaces";
|
||||
import {ActivityWindowType, CommentState, DomainInfo, ReferenceSubmission, SubmissionState} from "../Common/interfaces";
|
||||
import {Rule, RuleOptions, RuleResult} from "./index";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {getAttributionIdentifier} from "../Utils/SnoowrapUtils";
|
||||
import dayjs from "dayjs";
|
||||
import {
|
||||
asSubmission,
|
||||
comparisonTextOp,
|
||||
comparisonTextOp, convertSubredditsRawToStrong,
|
||||
FAIL,
|
||||
formatNumber, getActivitySubredditName, isSubmission,
|
||||
parseGenericValueOrPercentComparison,
|
||||
@@ -15,6 +15,7 @@ import {
|
||||
} from "../util";
|
||||
import { Comment } from "snoowrap/dist/objects";
|
||||
import SimpleError from "../Utils/SimpleError";
|
||||
import as from "async";
|
||||
|
||||
|
||||
export interface AttributionCriteria {
|
||||
@@ -76,25 +77,41 @@ export interface AttributionCriteria {
|
||||
domainsCombined?: boolean,
|
||||
|
||||
/**
|
||||
* Only include Activities from this list of Subreddits (by name, case-insensitive)
|
||||
* When present, Activities WILL ONLY be counted if they are found in this list of Subreddits
|
||||
*
|
||||
* Each value in the list can be either:
|
||||
*
|
||||
* EX `["mealtimevideos","askscience"]`
|
||||
* @examples ["mealtimevideos","askscience"]
|
||||
* @minItems 1
|
||||
* * string (name of subreddit)
|
||||
* * regular expression to run on the subreddit name
|
||||
* * `SubredditState`
|
||||
*
|
||||
* EX `["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]`
|
||||
* @examples [["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]]
|
||||
* */
|
||||
include?: string[],
|
||||
/**
|
||||
* Do not include Activities from this list of Subreddits (by name, case-insensitive)
|
||||
* When present, Activities WILL NOT be counted if they are found in this list of Subreddits
|
||||
*
|
||||
* Will be ignored if `include` is present.
|
||||
* Each value in the list can be either:
|
||||
*
|
||||
* EX `["mealtimevideos","askscience"]`
|
||||
* @examples ["mealtimevideos","askscience"]
|
||||
* @minItems 1
|
||||
* * string (name of subreddit)
|
||||
* * regular expression to run on the subreddit name
|
||||
* * `SubredditState`
|
||||
*
|
||||
* EX `["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]`
|
||||
* @examples [["mealtimevideos","askscience", "/onlyfans*\/i", {"over18": true}]]
|
||||
* */
|
||||
exclude?: string[],
|
||||
|
||||
/**
|
||||
* When present, Submissions from `window` will only be counted if they meet this criteria
|
||||
* */
|
||||
submissionState?: SubmissionState
|
||||
/**
|
||||
* When present, Comments from `window` will only be counted if they meet this criteria
|
||||
* */
|
||||
commentState?: CommentState
|
||||
|
||||
/**
|
||||
* This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`
|
||||
*
|
||||
@@ -103,7 +120,7 @@ export interface AttributionCriteria {
|
||||
* * If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`
|
||||
* * If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit
|
||||
*
|
||||
* If nothing is specified or list is empty (default) all domains are aggregated
|
||||
* If nothing is specified or list is empty (default) rule will only aggregate on `link` and `media` (ignores reddit-hosted content and self-posts)
|
||||
*
|
||||
* @default undefined
|
||||
* @examples [[]]
|
||||
@@ -174,25 +191,40 @@ export class AttributionRule extends Rule {
|
||||
window,
|
||||
thresholdOn = 'all',
|
||||
minActivityCount = 10,
|
||||
aggregateOn = [],
|
||||
aggregateOn = ['link','media'],
|
||||
consolidateMediaDomains = false,
|
||||
domains = [],
|
||||
domainsCombined = false,
|
||||
include: includeRaw = [],
|
||||
exclude: excludeRaw = [],
|
||||
include = [],
|
||||
exclude = [],
|
||||
commentState,
|
||||
submissionState,
|
||||
} = criteria;
|
||||
|
||||
const include = includeRaw.map(x => parseSubredditName(x).toLowerCase());
|
||||
const exclude = excludeRaw.map(x => parseSubredditName(x).toLowerCase());
|
||||
|
||||
const {operator, value, isPercent, extra = ''} = parseGenericValueOrPercentComparison(threshold);
|
||||
|
||||
let activities = thresholdOn === 'submissions' ? await this.resources.getAuthorSubmissions(item.author, {window: window}) : await this.resources.getAuthorActivities(item.author, {window: window});
|
||||
activities = activities.filter(act => {
|
||||
if (include.length > 0) {
|
||||
return include.some(x => x === getActivitySubredditName(act).toLowerCase());
|
||||
} else if (exclude.length > 0) {
|
||||
return !exclude.some(x => x === getActivitySubredditName(act).toLowerCase())
|
||||
|
||||
if(include.length > 0 || exclude.length > 0) {
|
||||
const defaultOpts = {
|
||||
defaultFlags: 'i',
|
||||
generateDescription: true
|
||||
};
|
||||
if(include.length > 0) {
|
||||
const subStates = include.map(x => convertSubredditsRawToStrong(x, defaultOpts));
|
||||
activities = await this.resources.batchTestSubredditCriteria(activities, subStates);
|
||||
} else {
|
||||
const subStates = exclude.map(x => convertSubredditsRawToStrong(x, defaultOpts));
|
||||
const toExclude = (await this.resources.batchTestSubredditCriteria(activities, subStates)).map(x => x.id);
|
||||
activities = activities.filter(x => !toExclude.includes(x.id));
|
||||
}
|
||||
}
|
||||
|
||||
activities = await as.filter(activities, async (activity) => {
|
||||
if (asSubmission(activity) && submissionState !== undefined) {
|
||||
return await this.resources.testItemCriteria(activity, [submissionState]);
|
||||
} else if (commentState !== undefined) {
|
||||
return await this.resources.testItemCriteria(activity, [commentState]);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
@@ -392,7 +424,7 @@ export class AttributionRule extends Rule {
|
||||
|
||||
}
|
||||
|
||||
interface AttributionConfig extends ReferenceSubmission {
|
||||
interface AttributionConfig {
|
||||
|
||||
/**
|
||||
* A list threshold-window values to test attribution against
|
||||
|
||||
@@ -12,11 +12,11 @@ export interface AuthorRuleConfig {
|
||||
/**
|
||||
* Will "pass" if any set of AuthorCriteria passes
|
||||
* */
|
||||
include: AuthorCriteria[];
|
||||
include?: AuthorCriteria[];
|
||||
/**
|
||||
* Only runs if include is not present. Will "pass" if any of set of the AuthorCriteria does not pass
|
||||
* */
|
||||
exclude: AuthorCriteria[];
|
||||
exclude?: AuthorCriteria[];
|
||||
}
|
||||
|
||||
export interface AuthorRuleOptions extends AuthorRuleConfig, RuleOptions {
|
||||
@@ -34,8 +34,13 @@ export class AuthorRule extends Rule {
|
||||
constructor(options: AuthorRuleOptions) {
|
||||
super(options);
|
||||
|
||||
this.include = options.include.map(x => new Author(x));
|
||||
this.exclude = options.exclude.map(x => new Author(x));
|
||||
const {
|
||||
include,
|
||||
exclude,
|
||||
} = options;
|
||||
|
||||
this.include = include !== undefined ? include.map(x => new Author(x)) : [];
|
||||
this.exclude = exclude !== undefined ? exclude.map(x => new Author(x)) : [];
|
||||
|
||||
if(this.include.length === 0 && this.exclude.length === 0) {
|
||||
throw new Error('At least one of the properties [include,exclude] on Author Rule must not be empty');
|
||||
|
||||
@@ -1,37 +1,98 @@
|
||||
import {Rule, RuleJSONConfig, RuleOptions, RulePremise, RuleResult} from "./index";
|
||||
import {Comment, VoteableContent} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import as from 'async';
|
||||
import pMap from 'p-map';
|
||||
// @ts-ignore
|
||||
import subImageMatch from 'matches-subimage';
|
||||
import {
|
||||
activityWindowText, asSubmission,
|
||||
comparisonTextOp, FAIL, formatNumber, getActivitySubredditName, isSubmission, objectToStringSummary,
|
||||
parseGenericValueOrPercentComparison, parseStringToRegex, parseSubredditName,
|
||||
activityWindowText,
|
||||
asSubmission, bitsToHexLength,
|
||||
// blockHashImage,
|
||||
compareImages,
|
||||
comparisonTextOp, convertSubredditsRawToStrong,
|
||||
FAIL,
|
||||
formatNumber,
|
||||
getActivitySubredditName, imageCompareMaxConcurrencyGuess,
|
||||
//getImageDataFromUrl,
|
||||
isSubmission,
|
||||
isValidImageURL,
|
||||
objectToStringSummary,
|
||||
parseGenericValueOrPercentComparison,
|
||||
parseStringToRegex,
|
||||
parseSubredditName,
|
||||
parseUsableLinkIdentifier,
|
||||
PASS, toStrongSubredditState
|
||||
PASS, sleep,
|
||||
toStrongSubredditState
|
||||
} from "../util";
|
||||
import {
|
||||
ActivityWindow,
|
||||
ActivityWindowCriteria,
|
||||
ActivityWindowType, CommentState,
|
||||
ReferenceSubmission, StrongSubredditState, SubmissionState,
|
||||
//ImageData,
|
||||
ImageDetection,
|
||||
ReferenceSubmission, StrongImageDetection, StrongSubredditState, SubmissionState,
|
||||
SubredditCriteria, SubredditState
|
||||
} from "../Common/interfaces";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import ImageData from "../Common/ImageData";
|
||||
import {blockhash, hammingDistance} from "../Common/blockhash/blockhash";
|
||||
import leven from "leven";
|
||||
|
||||
const parseLink = parseUsableLinkIdentifier();
|
||||
|
||||
export class RecentActivityRule extends Rule {
|
||||
window: ActivityWindowType;
|
||||
thresholds: ActivityThreshold[];
|
||||
useSubmissionAsReference: boolean;
|
||||
useSubmissionAsReference: boolean | undefined;
|
||||
imageDetection: StrongImageDetection
|
||||
lookAt?: 'comments' | 'submissions';
|
||||
|
||||
constructor(options: RecentActivityRuleOptions) {
|
||||
super(options);
|
||||
const {
|
||||
window = 15,
|
||||
useSubmissionAsReference = true,
|
||||
useSubmissionAsReference,
|
||||
imageDetection,
|
||||
lookAt,
|
||||
} = options || {};
|
||||
|
||||
const {
|
||||
enable = false,
|
||||
fetchBehavior = 'extension',
|
||||
threshold = 5,
|
||||
hash = {},
|
||||
pixel = {},
|
||||
} = imageDetection || {};
|
||||
|
||||
const {
|
||||
enable: hEnable = true,
|
||||
bits = 16,
|
||||
ttl = 60,
|
||||
hardThreshold = threshold,
|
||||
softThreshold
|
||||
} = hash || {};
|
||||
|
||||
const {
|
||||
enable: pEnable = true,
|
||||
threshold: pThreshold = threshold,
|
||||
} = pixel || {};
|
||||
|
||||
this.imageDetection = {
|
||||
enable,
|
||||
fetchBehavior,
|
||||
threshold,
|
||||
hash: {
|
||||
enable: hEnable,
|
||||
hardThreshold,
|
||||
softThreshold,
|
||||
bits,
|
||||
ttl,
|
||||
},
|
||||
pixel: {
|
||||
enable: pEnable,
|
||||
threshold: pThreshold
|
||||
}
|
||||
};
|
||||
this.lookAt = lookAt;
|
||||
this.useSubmissionAsReference = useSubmissionAsReference;
|
||||
this.window = window;
|
||||
@@ -67,22 +128,150 @@ export class RecentActivityRule extends Rule {
|
||||
}
|
||||
|
||||
let viableActivity = activities;
|
||||
if (this.useSubmissionAsReference) {
|
||||
// if config does not specify reference then we set the default based on whether the item is a submission or not
|
||||
// -- this is essentially the same as defaulting reference to true BUT eliminates noisy "can't use comment as reference" log statement when item is a comment
|
||||
let inferredSubmissionAsRef = this.useSubmissionAsReference;
|
||||
if(inferredSubmissionAsRef === undefined) {
|
||||
inferredSubmissionAsRef = isSubmission(item);
|
||||
}
|
||||
if (inferredSubmissionAsRef) {
|
||||
if (!asSubmission(item)) {
|
||||
this.logger.warn('Cannot use post as reference because triggered item is not a Submission');
|
||||
} else if (item.is_self) {
|
||||
this.logger.warn('Cannot use post as reference because triggered Submission is not a link type');
|
||||
} else {
|
||||
const usableUrl = parseLink(await item.url);
|
||||
viableActivity = viableActivity.filter((x) => {
|
||||
if (!asSubmission(x)) {
|
||||
return false;
|
||||
const itemId = item.id;
|
||||
const referenceUrl = await item.url;
|
||||
const usableUrl = parseLink(referenceUrl);
|
||||
let filteredActivity: (Submission|Comment)[] = [];
|
||||
let analysisTimes: number[] = [];
|
||||
let referenceImage: ImageData | undefined;
|
||||
if (this.imageDetection.enable) {
|
||||
try {
|
||||
referenceImage = ImageData.fromSubmission(item);
|
||||
referenceImage.setPreferredResolutionByWidth(800);
|
||||
if(this.imageDetection.hash.enable) {
|
||||
let refHash: string | undefined;
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
refHash = await this.resources.getImageHash(referenceImage);
|
||||
if(refHash === undefined) {
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
await this.resources.setImageHash(referenceImage, refHash, this.imageDetection.hash.ttl);
|
||||
} else if(refHash.length !== bitsToHexLength(this.imageDetection.hash.bits)) {
|
||||
this.logger.warn('Reference image hash length did not correspond to bits specified in config. Recomputing...');
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
await this.resources.setImageHash(referenceImage, refHash, this.imageDetection.hash.ttl);
|
||||
}
|
||||
} else {
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
}
|
||||
}
|
||||
//await referenceImage.sharp();
|
||||
// await referenceImage.hash();
|
||||
// if (referenceImage.preferredResolution !== undefined) {
|
||||
// await (referenceImage.getSimilarResolutionVariant(...referenceImage.preferredResolution) as ImageData).sharp();
|
||||
// }
|
||||
} catch (err: any) {
|
||||
this.logger.verbose(err.message);
|
||||
}
|
||||
}
|
||||
let longRun;
|
||||
if (referenceImage !== undefined) {
|
||||
const l = this.logger;
|
||||
longRun = setTimeout(() => {
|
||||
l.verbose('FYI: Image processing is causing rule to take longer than normal');
|
||||
}, 2500);
|
||||
}
|
||||
// @ts-ignore
|
||||
const ci = async (x: (Submission|Comment)) => {
|
||||
if (!asSubmission(x) || x.id === itemId) {
|
||||
return null;
|
||||
}
|
||||
if (x.url === undefined) {
|
||||
return false;
|
||||
return null;
|
||||
}
|
||||
return parseLink(x.url) === usableUrl;
|
||||
});
|
||||
if (parseLink(x.url) === usableUrl) {
|
||||
return x;
|
||||
}
|
||||
// only do image detection if regular URL comparison and other conditions fail first
|
||||
// to reduce CPU/bandwidth usage
|
||||
if (referenceImage !== undefined) {
|
||||
try {
|
||||
let imgData = ImageData.fromSubmission(x);
|
||||
imgData.setPreferredResolutionByWidth(800);
|
||||
if(this.imageDetection.hash.enable) {
|
||||
let compareHash: string | undefined;
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
compareHash = await this.resources.getImageHash(imgData);
|
||||
}
|
||||
if(compareHash === undefined)
|
||||
{
|
||||
compareHash = await imgData.hash(this.imageDetection.hash.bits);
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
await this.resources.setImageHash(imgData, compareHash, this.imageDetection.hash.ttl);
|
||||
}
|
||||
}
|
||||
const refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
if(refHash.length !== compareHash.length) {
|
||||
this.logger.debug(`Hash lengths were not the same! Will need to recompute compare hash to match reference.\n\nReference: ${referenceImage.baseUrl} has is ${refHash.length} char long | Comparing: ${imgData.baseUrl} has is ${compareHash} ${compareHash.length} long`);
|
||||
compareHash = await imgData.hash(this.imageDetection.hash.bits)
|
||||
}
|
||||
const distance = leven(refHash, compareHash);
|
||||
const diff = (distance/refHash.length)*100;
|
||||
|
||||
|
||||
// return image if hard is defined and diff is less
|
||||
if(null !== this.imageDetection.hash.hardThreshold && diff <= this.imageDetection.hash.hardThreshold) {
|
||||
return x;
|
||||
}
|
||||
// hard is either not defined or diff was gerater than hard
|
||||
|
||||
// if soft is defined
|
||||
if (this.imageDetection.hash.softThreshold !== undefined) {
|
||||
// and diff is greater than soft allowance
|
||||
if(diff > this.imageDetection.hash.softThreshold) {
|
||||
// not similar enough
|
||||
return null;
|
||||
}
|
||||
// similar enough, will continue on to pixel (if enabled!)
|
||||
} else {
|
||||
// only hard was defined and did not pass
|
||||
return null;
|
||||
}
|
||||
}
|
||||
// at this point either hash was not enabled or it was and we hit soft threshold but not hard
|
||||
if(this.imageDetection.pixel.enable) {
|
||||
try {
|
||||
const [compareResult, sameImage] = await compareImages(referenceImage, imgData, this.imageDetection.pixel.threshold / 100);
|
||||
analysisTimes.push(compareResult.analysisTime);
|
||||
if (sameImage) {
|
||||
return x;
|
||||
}
|
||||
} catch (err: any) {
|
||||
this.logger.warn(`Unexpected error encountered while pixel-comparing images, will skip comparison => ${err.message}`);
|
||||
}
|
||||
}
|
||||
} catch (err: any) {
|
||||
if(!err.message.includes('did not end with a valid image extension')) {
|
||||
this.logger.warn(`Will not compare image from Submission ${x.id} due to error while parsing image URL => ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
// parallel all the things
|
||||
this.logger.profile('asyncCompare');
|
||||
const results = await pMap(viableActivity, ci, {concurrency: imageCompareMaxConcurrencyGuess});
|
||||
this.logger.profile('asyncCompare', {level: 'debug', message: 'Total time for image comparison (incl download/cache calls)'});
|
||||
const totalAnalysisTime = analysisTimes.reduce((acc, x) => acc + x,0);
|
||||
if(analysisTimes.length > 0) {
|
||||
this.logger.debug(`Reference image pixel-compared ${analysisTimes.length} times. Timings: Avg ${formatNumber(totalAnalysisTime / analysisTimes.length, {toFixed: 0})}ms | Max: ${Math.max(...analysisTimes)}ms | Min: ${Math.min(...analysisTimes)}ms | Total: ${totalAnalysisTime}ms (${formatNumber(totalAnalysisTime/1000)}s)`);
|
||||
}
|
||||
filteredActivity = filteredActivity.concat(results.filter(x => x !== null));
|
||||
if (longRun !== undefined) {
|
||||
clearTimeout(longRun);
|
||||
}
|
||||
viableActivity = filteredActivity;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,43 +290,71 @@ export class RecentActivityRule extends Rule {
|
||||
} = triggerSet;
|
||||
|
||||
// convert subreddits array into entirely StrongSubredditState
|
||||
const subStates: StrongSubredditState[] = subreddits.map((x) => {
|
||||
if(typeof x === 'string') {
|
||||
return toStrongSubredditState({name: x, stateDescription: x}, {defaultFlags: 'i', generateDescription: true});
|
||||
const defaultOpts = {
|
||||
defaultFlags: 'i',
|
||||
generateDescription: true
|
||||
};
|
||||
const subStates: StrongSubredditState[] = subreddits.map((x) => convertSubredditsRawToStrong(x, defaultOpts));
|
||||
|
||||
let validActivity: (Comment | Submission)[] = await as.filter(viableActivity, async (activity) => {
|
||||
if (asSubmission(activity) && submissionState !== undefined) {
|
||||
return await this.resources.testItemCriteria(activity, [submissionState]);
|
||||
} else if (commentState !== undefined) {
|
||||
return await this.resources.testItemCriteria(activity, [commentState]);
|
||||
}
|
||||
return toStrongSubredditState(x, {defaultFlags: 'i', generateDescription: true});
|
||||
return true;
|
||||
});
|
||||
|
||||
for(const activity of viableActivity) {
|
||||
if(asSubmission(activity) && submissionState !== undefined) {
|
||||
if(!(await this.resources.testItemCriteria(activity, [submissionState]))) {
|
||||
validActivity = await this.resources.batchTestSubredditCriteria(validActivity, subStates);
|
||||
for (const activity of validActivity) {
|
||||
currCount++;
|
||||
// @ts-ignore
|
||||
combinedKarma += activity.score;
|
||||
const pSub = getActivitySubredditName(activity);
|
||||
if (!presentSubs.includes(pSub)) {
|
||||
presentSubs.push(pSub);
|
||||
}
|
||||
}
|
||||
|
||||
for (const activity of viableActivity) {
|
||||
if (asSubmission(activity) && submissionState !== undefined) {
|
||||
if (!(await this.resources.testItemCriteria(activity, [submissionState]))) {
|
||||
continue;
|
||||
}
|
||||
} else if(commentState !== undefined) {
|
||||
if(!(await this.resources.testItemCriteria(activity, [commentState]))) {
|
||||
} else if (commentState !== undefined) {
|
||||
if (!(await this.resources.testItemCriteria(activity, [commentState]))) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let inSubreddits = false;
|
||||
for(const ss of subStates) {
|
||||
for (const ss of subStates) {
|
||||
const res = await this.resources.testSubredditCriteria(activity, ss);
|
||||
if(res) {
|
||||
if (res) {
|
||||
inSubreddits = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(inSubreddits) {
|
||||
if (inSubreddits) {
|
||||
currCount++;
|
||||
combinedKarma += activity.score;
|
||||
const pSub = getActivitySubredditName(activity);
|
||||
if(!presentSubs.includes(pSub)) {
|
||||
if (!presentSubs.includes(pSub)) {
|
||||
presentSubs.push(pSub);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const {operator, value, isPercent} = parseGenericValueOrPercentComparison(threshold);
|
||||
let sum = {subsWithActivity: presentSubs, combinedKarma, karmaThreshold, subreddits: subStates.map(x => x.stateDescription), count: currCount, threshold, triggered: false, testValue: currCount.toString()};
|
||||
let sum = {
|
||||
subsWithActivity: presentSubs,
|
||||
combinedKarma,
|
||||
karmaThreshold,
|
||||
subreddits: subStates.map(x => x.stateDescription),
|
||||
count: currCount,
|
||||
threshold,
|
||||
triggered: false,
|
||||
testValue: currCount.toString()
|
||||
};
|
||||
if (isPercent) {
|
||||
sum.testValue = `${formatNumber((currCount / viableActivity.length) * 100)}%`;
|
||||
if (comparisonTextOp(currCount / viableActivity.length, operator, value / 100)) {
|
||||
@@ -149,9 +366,9 @@ export class RecentActivityRule extends Rule {
|
||||
totalTriggeredOn = sum;
|
||||
}
|
||||
// if we would trigger on threshold need to also test for karma
|
||||
if(totalTriggeredOn !== undefined && karmaThreshold !== undefined) {
|
||||
if (totalTriggeredOn !== undefined && karmaThreshold !== undefined) {
|
||||
const {operator: opKarma, value: valueKarma} = parseGenericValueOrPercentComparison(karmaThreshold);
|
||||
if(!comparisonTextOp(combinedKarma, opKarma, valueKarma)) {
|
||||
if (!comparisonTextOp(combinedKarma, opKarma, valueKarma)) {
|
||||
sum.triggered = false;
|
||||
totalTriggeredOn = undefined;
|
||||
}
|
||||
@@ -169,7 +386,7 @@ export class RecentActivityRule extends Rule {
|
||||
result = `${PASS} ${resultData.result}`;
|
||||
this.logger.verbose(result);
|
||||
return Promise.resolve([true, this.getResult(true, resultData)]);
|
||||
} else if(summaries.length === 1) {
|
||||
} else if (summaries.length === 1) {
|
||||
// can display result if its only one summary otherwise need to log to debug
|
||||
const res = this.generateResultData(summaries[0], viableActivity);
|
||||
result = `${FAIL} ${res.result}`;
|
||||
@@ -182,7 +399,7 @@ export class RecentActivityRule extends Rule {
|
||||
|
||||
return Promise.resolve([false, this.getResult(false, {result})]);
|
||||
}
|
||||
|
||||
|
||||
generateResultData(summary: any, activities: (Submission | Comment)[] = []) {
|
||||
const {
|
||||
count,
|
||||
@@ -196,7 +413,7 @@ export class RecentActivityRule extends Rule {
|
||||
} = summary;
|
||||
const relevantSubs = subsWithActivity.length === 0 ? subreddits : subsWithActivity;
|
||||
let totalSummary = `${testValue} activities over ${relevantSubs.length} subreddits${karmaThreshold !== undefined ? ` with ${combinedKarma} combined karma` : ''} ${triggered ? 'met' : 'did not meet'} threshold of ${threshold}${karmaThreshold !== undefined ? ` and ${karmaThreshold} combined karma` : ''}`;
|
||||
if(triggered && subsWithActivity.length > 0) {
|
||||
if (triggered && subsWithActivity.length > 0) {
|
||||
totalSummary = `${totalSummary} -- subreddits: ${subsWithActivity.join(', ')}`;
|
||||
}
|
||||
return {
|
||||
@@ -210,6 +427,7 @@ export class RecentActivityRule extends Rule {
|
||||
threshold,
|
||||
testValue,
|
||||
karmaThreshold,
|
||||
combinedKarma,
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -223,12 +441,12 @@ export class RecentActivityRule extends Rule {
|
||||
* */
|
||||
export interface ActivityThreshold {
|
||||
/**
|
||||
* When present, a Submission will only be counted if it meets this criteria
|
||||
* */
|
||||
* When present, a Submission will only be counted if it meets this criteria
|
||||
* */
|
||||
submissionState?: SubmissionState
|
||||
/**
|
||||
* When present, a Comment will only be counted if it meets this criteria
|
||||
* */
|
||||
* When present, a Comment will only be counted if it meets this criteria
|
||||
* */
|
||||
commentState?: CommentState
|
||||
|
||||
/**
|
||||
@@ -288,6 +506,18 @@ interface RecentActivityConfig extends ActivityWindow, ReferenceSubmission {
|
||||
* @minItems 1
|
||||
* */
|
||||
thresholds: ActivityThreshold[],
|
||||
|
||||
imageDetection?: ImageDetection
|
||||
|
||||
/**
|
||||
* When Activity is a submission should we only include activities that are other submissions with the same content?
|
||||
*
|
||||
* * When the Activity is a submission this defaults to **true**
|
||||
* * When the Activity is a comment it is ignored (not relevant)
|
||||
*
|
||||
* @default true
|
||||
* */
|
||||
useSubmissionAsReference?: boolean
|
||||
}
|
||||
|
||||
export interface RecentActivityRuleOptions extends RecentActivityConfig, RuleOptions {
|
||||
|
||||
@@ -296,6 +296,7 @@ export class RegexRule extends Rule {
|
||||
|
||||
const logSummary: string[] = [];
|
||||
let index = 0;
|
||||
let matchSample = undefined;
|
||||
for (const c of criteriaResults) {
|
||||
index++;
|
||||
let msg = `Criteria ${c.criteria.name || `#${index}`} ${triggeredIndicator(c.triggered)}`;
|
||||
@@ -309,8 +310,8 @@ export class RegexRule extends Rule {
|
||||
}
|
||||
msg = `${msg} (Window: ${c.criteria.window})`;
|
||||
if(c.matches.length > 0) {
|
||||
let matchSample = `-- Matched Values: ${c.matches.slice(0, 3).map(x => `"${x}"`).join(', ')}${c.matches.length > 3 ? `, and ${c.matches.length - 3} more...` : ''}`;
|
||||
logSummary.push(`${msg} ${matchSample}`);
|
||||
matchSample = `${c.matches.slice(0, 3).map(x => `"${x}"`).join(', ')}${c.matches.length > 3 ? `, and ${c.matches.length - 3} more...` : ''}`;
|
||||
logSummary.push(`${msg} -- Matched Values: ${matchSample}`);
|
||||
} else {
|
||||
logSummary.push(msg);
|
||||
}
|
||||
@@ -319,7 +320,7 @@ export class RegexRule extends Rule {
|
||||
const result = `${triggeredIndicator(criteriaMet)} ${logSummary.join(' || ')}`;
|
||||
this.logger.verbose(result);
|
||||
|
||||
return Promise.resolve([criteriaMet, this.getResult(criteriaMet, {result, data: criteriaResults})]);
|
||||
return Promise.resolve([criteriaMet, this.getResult(criteriaMet, {result, data: {results: criteriaResults, matchSample }})]);
|
||||
}
|
||||
|
||||
protected getMatchesFromActivity(a: (Submission | Comment), testOn: string[], reg: RegExp): string[] {
|
||||
|
||||
@@ -4,7 +4,7 @@ import {
|
||||
activityWindowText, asSubmission,
|
||||
comparisonTextOp, FAIL, getActivitySubredditName, isExternalUrlSubmission, isRedditMedia,
|
||||
parseGenericValueComparison, parseSubredditName,
|
||||
parseUsableLinkIdentifier as linkParser, PASS, toStrongSubredditState
|
||||
parseUsableLinkIdentifier as linkParser, PASS, subredditStateIsNameOnly, toStrongSubredditState
|
||||
} from "../util";
|
||||
import {
|
||||
ActivityWindow,
|
||||
@@ -58,6 +58,7 @@ export class RepeatActivityRule extends Rule {
|
||||
lookAt: 'submissions' | 'all';
|
||||
include: (string | SubredditState)[];
|
||||
exclude: (string | SubredditState)[];
|
||||
hasFullSubredditCrits: boolean = false;
|
||||
activityFilterFunc: (x: Submission|Comment) => Promise<boolean> = async (x) => true;
|
||||
keepRemoved: boolean;
|
||||
minWordCount: number;
|
||||
@@ -91,6 +92,7 @@ export class RepeatActivityRule extends Rule {
|
||||
}
|
||||
return toStrongSubredditState(x, {defaultFlags: 'i', generateDescription: true});
|
||||
});
|
||||
this.hasFullSubredditCrits = !subStates.every(x => subredditStateIsNameOnly(x));
|
||||
this.activityFilterFunc = async (x: Submission|Comment) => {
|
||||
for(const ss of subStates) {
|
||||
if(await this.resources.testSubredditCriteria(x, ss)) {
|
||||
@@ -106,6 +108,7 @@ export class RepeatActivityRule extends Rule {
|
||||
}
|
||||
return toStrongSubredditState(x, {defaultFlags: 'i', generateDescription: true});
|
||||
});
|
||||
this.hasFullSubredditCrits = !subStates.every(x => subredditStateIsNameOnly(x));
|
||||
this.activityFilterFunc = async (x: Submission|Comment) => {
|
||||
for(const ss of subStates) {
|
||||
if(await this.resources.testSubredditCriteria(x, ss)) {
|
||||
@@ -149,6 +152,12 @@ export class RepeatActivityRule extends Rule {
|
||||
break;
|
||||
}
|
||||
|
||||
if(this.hasFullSubredditCrits) {
|
||||
// go ahead and cache subreddits now
|
||||
// because we can't use batch test since testing activities in order is important for this rule
|
||||
await this.resources.cacheSubreddits(activities.map(x => x.subreddit));
|
||||
}
|
||||
|
||||
const condensedActivities = await activities.reduce(async (accProm: Promise<RepeatActivityReducer>, activity: (Submission | Comment), index: number) => {
|
||||
const acc = await accProm;
|
||||
const {openSets = [], allSets = []} = acc;
|
||||
|
||||
897
src/Rule/RepostRule.ts
Normal file
897
src/Rule/RepostRule.ts
Normal file
@@ -0,0 +1,897 @@
|
||||
import {Rule, RuleJSONConfig, RuleOptions, RuleResult} from "./index";
|
||||
import {Listing, SearchOptions} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import Comment from "snoowrap/dist/objects/Comment";
|
||||
import {
|
||||
compareDurationValue,
|
||||
comparisonTextOp,
|
||||
FAIL, formatNumber,
|
||||
isRepostItemResult, parseDurationComparison, parseGenericValueComparison,
|
||||
parseUsableLinkIdentifier,
|
||||
PASS, searchAndReplace, stringSameness, triggeredIndicator, windowToActivityWindowCriteria, wordCount
|
||||
} from "../util";
|
||||
import {
|
||||
ActivityWindow,
|
||||
ActivityWindowType,
|
||||
CompareValue, DurationComparor,
|
||||
JoinOperands,
|
||||
RepostItem,
|
||||
RepostItemResult,
|
||||
SearchAndReplaceRegExp,
|
||||
SearchFacetType,
|
||||
} from "../Common/interfaces";
|
||||
import objectHash from "object-hash";
|
||||
import {getActivities, getAttributionIdentifier} from "../Utils/SnoowrapUtils";
|
||||
import Fuse from "fuse.js";
|
||||
import leven from "leven";
|
||||
import {YoutubeClient, commentsAsRepostItems} from "../Utils/ThirdParty/YoutubeClient";
|
||||
import dayjs from "dayjs";
|
||||
import {rest} from "lodash";
|
||||
|
||||
const parseYtIdentifier = parseUsableLinkIdentifier();
|
||||
|
||||
export interface TextMatchOptions {
|
||||
/**
|
||||
* The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match
|
||||
*
|
||||
* Note: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere
|
||||
*
|
||||
* Defaults to `85` (85%)
|
||||
*
|
||||
* @default 85
|
||||
* @example [85]
|
||||
* */
|
||||
matchScore?: number
|
||||
|
||||
/**
|
||||
* The minimum number of words in the activity being checked for which this rule will run on
|
||||
*
|
||||
* If the word count is below the minimum the rule fails
|
||||
*
|
||||
* Defaults to 2
|
||||
*
|
||||
* @default 2
|
||||
* @example [2]
|
||||
* */
|
||||
minWordCount?: number
|
||||
|
||||
/**
|
||||
* Should text matching be case sensitive?
|
||||
*
|
||||
* Defaults to false
|
||||
*
|
||||
* @default false
|
||||
* @example [false]
|
||||
**/
|
||||
caseSensitive?: boolean
|
||||
}
|
||||
|
||||
export interface TextTransformOptions {
|
||||
/**
|
||||
* A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.
|
||||
*
|
||||
* * If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text
|
||||
* * If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text
|
||||
* */
|
||||
transformations?: SearchAndReplaceRegExp[]
|
||||
|
||||
/**
|
||||
* Specify a separate set of transformations for the activity text (submission title or comment)
|
||||
*
|
||||
* To perform no transformations when `transformations` is defined set this to an empty array (`[]`)
|
||||
* */
|
||||
transformationsActivity?: SearchAndReplaceRegExp[]
|
||||
}
|
||||
|
||||
export interface SearchFacetJSONConfig extends TextMatchOptions, TextTransformOptions, ActivityWindow {
|
||||
kind: SearchFacetType | SearchFacetType[]
|
||||
}
|
||||
|
||||
export interface SearchFacet extends SearchFacetJSONConfig {
|
||||
kind: SearchFacetType
|
||||
}
|
||||
|
||||
export type TimeBasedSelector = "newest" | "oldest" | "any" | "all";
|
||||
|
||||
export interface OccurredAt {
|
||||
/**
|
||||
* Which repost to test on
|
||||
*
|
||||
* * `any` -- ANY repost passing `condition` will cause this criteria to be true
|
||||
* * `all` -- ALL reposts must pass `condition` for this criteria to be true
|
||||
* */
|
||||
"testOn": TimeBasedSelector,
|
||||
"condition": DurationComparor
|
||||
}
|
||||
|
||||
export interface OccurrenceTests {
|
||||
count?: {
|
||||
condition?: JoinOperands
|
||||
/**
|
||||
* An array of strings containing a comparison operator and the number of repost occurrences to compare against
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* * `">= 7"` -- TRUE if 7 or more reposts were found
|
||||
* * `"< 1"` -- TRUE if less than 0 reposts were found
|
||||
* */
|
||||
test: CompareValue[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the time the reposts occurred at
|
||||
* */
|
||||
time?: {
|
||||
/**
|
||||
* How to test all the specified comparisons
|
||||
*
|
||||
* * AND -- All criteria must be true
|
||||
* * OR -- Any criteria must be true
|
||||
*
|
||||
* Defaults to AND
|
||||
*
|
||||
* @default AND
|
||||
* @example ["AND", "OR"]
|
||||
* */
|
||||
condition?: JoinOperands
|
||||
/**
|
||||
* An array of time-based conditions to test against found reposts (test when a repost was made)
|
||||
* */
|
||||
test: OccurredAt[]
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A set of criteria used to find reposts
|
||||
*
|
||||
* Contains options and conditions used to define how candidate reposts are retrieved and if they are a match.
|
||||
*
|
||||
* */
|
||||
export interface RepostCriteria extends ActivityWindow, TextMatchOptions, TextTransformOptions {
|
||||
/**
|
||||
* Define how to find candidate reposts
|
||||
*
|
||||
* * **title** -- search reddit for submissions with the same title
|
||||
* * **url** -- search reddit for submissions with the same url
|
||||
* * **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)
|
||||
* */
|
||||
searchOn?: (SearchFacetType | SearchFacetJSONConfig)[]
|
||||
|
||||
/**
|
||||
* A set of comparisons to test against the number of reposts found
|
||||
*
|
||||
* If not specified the default is "AND [occurrences] > 0" IE any reposts makes this test pass
|
||||
* */
|
||||
occurrences?: {
|
||||
/**
|
||||
* How to test all the specified comparisons
|
||||
*
|
||||
* * AND -- All criteria must be true
|
||||
* * OR -- Any criteria must be true
|
||||
*
|
||||
* Defaults to AND
|
||||
*
|
||||
* @default AND
|
||||
* @example ["AND", "OR"]
|
||||
* */
|
||||
condition?: JoinOperands
|
||||
|
||||
criteria?: OccurrenceTests[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the time the reposts occurred at
|
||||
* */
|
||||
occurredAt?: {
|
||||
/**
|
||||
* How to test all the specified comparisons
|
||||
*
|
||||
* * AND -- All criteria must be true
|
||||
* * OR -- Any criteria must be true
|
||||
*
|
||||
* Defaults to AND
|
||||
*
|
||||
* @default AND
|
||||
* @example ["AND", "OR"]
|
||||
* */
|
||||
condition?: JoinOperands
|
||||
/**
|
||||
* An array of time-based conditions to test against found reposts (test when a repost was made)
|
||||
* */
|
||||
criteria: OccurredAt[]
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum number of comments/submissions to check
|
||||
*
|
||||
* In both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the "top" maximum specified
|
||||
*
|
||||
* For comment checks this is the number of comments cached
|
||||
*
|
||||
* @default 50
|
||||
* @example [50]
|
||||
* */
|
||||
maxRedditItems?: number
|
||||
|
||||
/**
|
||||
* The maximum number of external items (youtube comments) to check (and cache for comment checks)
|
||||
*
|
||||
* @default 50
|
||||
* @example [50]
|
||||
* */
|
||||
maxExternalItems?: number
|
||||
}
|
||||
|
||||
export interface CriteriaResult {
|
||||
passed: boolean
|
||||
conditionsSummary: string
|
||||
items: RepostItemResult[]
|
||||
}
|
||||
|
||||
const parentSubmissionSearchFacetDefaults = {
|
||||
title: {
|
||||
matchScore: 85,
|
||||
minWordCount: 3
|
||||
},
|
||||
url: {
|
||||
matchScore: 0, // when looking for submissions to find repost comments on automatically include any with exact same url
|
||||
},
|
||||
duplicates: {
|
||||
matchScore: 0, // when looking for submissions to find repost comments on automatically include any that reddit thinks are duplicates
|
||||
},
|
||||
crossposts: {
|
||||
matchScore: 0, // when looking for submissions to find repost comments on automatically include any that reddit thinks are crossposts
|
||||
},
|
||||
external: {}
|
||||
}
|
||||
|
||||
const isSearchFacetType = (val: any): val is SearchFacetType => {
|
||||
if (typeof val === 'string') {
|
||||
return ['title', 'url', 'duplicates', 'crossposts', 'external'].includes(val);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const generateSearchFacet = (val: SearchFacetType | SearchFacetJSONConfig): SearchFacet[] => {
|
||||
let facets: SearchFacet[] = [];
|
||||
if (isSearchFacetType(val)) {
|
||||
facets.push({
|
||||
kind: val
|
||||
});
|
||||
} else if (Array.isArray(val.kind)) {
|
||||
facets.concat(val.kind.map(x => ({...val, kind: x})));
|
||||
} else {
|
||||
facets.push(val as SearchFacet);
|
||||
}
|
||||
|
||||
return facets.map(x => {
|
||||
return {
|
||||
...parentSubmissionSearchFacetDefaults[x.kind],
|
||||
...x,
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export class RepostRule extends Rule {
|
||||
criteria: RepostCriteria[]
|
||||
condition: JoinOperands;
|
||||
|
||||
submission?: Submission;
|
||||
|
||||
constructor(options: RepostRuleOptions) {
|
||||
super(options);
|
||||
const {
|
||||
criteria = [{}],
|
||||
condition = 'OR'
|
||||
} = options || {};
|
||||
if (criteria.length < 1) {
|
||||
throw new Error('Must provide at least one RepostCriteria');
|
||||
}
|
||||
this.criteria = criteria;
|
||||
this.condition = condition;
|
||||
}
|
||||
|
||||
getKind(): string {
|
||||
return 'Repost';
|
||||
}
|
||||
|
||||
protected getSpecificPremise(): object {
|
||||
return {
|
||||
criteria: this.criteria,
|
||||
condition: this.condition
|
||||
}
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
protected async getSubmission(item: Submission | Comment) {
|
||||
if (item instanceof Comment) {
|
||||
// @ts-ignore
|
||||
return await this.client.getSubmission(item.link_id).fetch();
|
||||
}
|
||||
return item;
|
||||
}
|
||||
|
||||
protected async process(item: Submission | Comment): Promise<[boolean, RuleResult]> {
|
||||
|
||||
let criteriaResults: CriteriaResult[] = [];
|
||||
let ytClient: YoutubeClient | undefined = undefined;
|
||||
let criteriaMatchedResults: RepostItemResult[] = [];
|
||||
let totalSubs = 0;
|
||||
let totalCommentSubs = 0;
|
||||
let totalComments = 0;
|
||||
let totalExternal = new Map<string,number>();
|
||||
let fromCache = false;
|
||||
let andFail = false;
|
||||
|
||||
for (const rCriteria of this.criteria) {
|
||||
criteriaMatchedResults = [];
|
||||
const {
|
||||
searchOn = (item instanceof Submission ? ['title', 'url', 'duplicates', 'crossposts'] : ['external', 'title', 'url', 'duplicates', 'crossposts']),
|
||||
//criteria = {},
|
||||
maxRedditItems = 50,
|
||||
maxExternalItems = 50,
|
||||
window = 20,
|
||||
...restCriteria
|
||||
} = rCriteria;
|
||||
|
||||
const searchFacets = searchOn.map(x => generateSearchFacet(x)).flat(1) as SearchFacet[];
|
||||
|
||||
const includeCrossposts = searchFacets.some(x => x.kind === 'crossposts');
|
||||
|
||||
// in getDuplicate() options add "crossposts_only=1" to get only crossposts https://www.reddit.com/r/redditdev/comments/b4t5g4/get_all_the_subreddits_that_a_post_has_been/
|
||||
// if a submission is a crosspost it has "crosspost_parent" attribute https://www.reddit.com/r/redditdev/comments/l46y2l/check_if_post_is_a_crosspost/
|
||||
|
||||
const strongWindow = windowToActivityWindowCriteria(window);
|
||||
|
||||
const candidateHash = `repostItems-${item instanceof Submission ? item.id : item.link_id}-${objectHash.sha1({
|
||||
window,
|
||||
searchOn
|
||||
})}`;
|
||||
let items: (RepostItem|RepostItemResult)[] = [];
|
||||
let cacheRes = undefined;
|
||||
if (item instanceof Comment) {
|
||||
cacheRes = await this.resources.cache.get(candidateHash) as ((RepostItem|RepostItemResult)[] | undefined | null);
|
||||
}
|
||||
|
||||
if (cacheRes === undefined || cacheRes === null) {
|
||||
|
||||
const sub = await this.getSubmission(item);
|
||||
let dups: (Submission[] | undefined) = undefined;
|
||||
|
||||
for (const sf of searchFacets) {
|
||||
|
||||
const {
|
||||
matchScore = 85,
|
||||
minWordCount = 3,
|
||||
transformations = [],
|
||||
} = sf;
|
||||
|
||||
if (sf.kind === 'external') {
|
||||
const attribution = getAttributionIdentifier(sub);
|
||||
switch (attribution.provider) {
|
||||
case 'YouTube':
|
||||
const ytCreds = this.resources.getThirdPartyCredentials('youtube')
|
||||
if (ytCreds === undefined) {
|
||||
throw new Error('Cannot extract comments from Youtube because a Youtube Data API key was not provided in configuration');
|
||||
}
|
||||
if (ytClient === undefined) {
|
||||
ytClient = new YoutubeClient(ytCreds.apiKey);
|
||||
}
|
||||
const ytComments = commentsAsRepostItems(await ytClient.getVideoTopComments(sub.url, maxExternalItems));
|
||||
items = items.concat(ytComments)
|
||||
totalExternal.set('Youtube comments', (totalExternal.get('Youtube comments') ?? 0) + ytComments.length);
|
||||
break;
|
||||
default:
|
||||
if (attribution.provider === undefined) {
|
||||
this.logger.debug('Unable to determine external provider');
|
||||
continue;
|
||||
} else {
|
||||
this.logger.debug(`External parsing of ${attribution} is not supported yet.`);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let subs: Submission[];
|
||||
|
||||
if (['title', 'url'].includes(sf.kind)) {
|
||||
let query: string;
|
||||
let searchFunc: (limit: number) => Promise<Listing<Submission | Comment>>;
|
||||
if (sf.kind === 'title') {
|
||||
query = (await this.getSubmission(item)).title;
|
||||
searchFunc = (limit: number) => {
|
||||
let opts: SearchOptions = {
|
||||
query,
|
||||
limit,
|
||||
sort: 'relevance'
|
||||
};
|
||||
if (strongWindow.subreddits?.include !== undefined && strongWindow.subreddits?.include.length > 0) {
|
||||
opts.restrictSr = true;
|
||||
opts.subreddit = strongWindow.subreddits?.include.join('+');
|
||||
}
|
||||
return this.client.search(opts);
|
||||
}
|
||||
} else {
|
||||
const attr = getAttributionIdentifier(sub);
|
||||
if (attr.provider === 'YouTube') {
|
||||
const ytId = parseYtIdentifier(sub.url);
|
||||
query = `url:https://youtu.be/${ytId}`;
|
||||
} else {
|
||||
query = `url:${sub.url}`;
|
||||
}
|
||||
searchFunc = (limit: number) => {
|
||||
let opts: SearchOptions = {
|
||||
query,
|
||||
limit,
|
||||
sort: 'top'
|
||||
};
|
||||
if (strongWindow.subreddits?.include !== undefined && strongWindow.subreddits?.include.length > 0) {
|
||||
opts.restrictSr = true;
|
||||
opts.subreddit = strongWindow.subreddits?.include.join('+');
|
||||
}
|
||||
return this.client.search(opts);
|
||||
}
|
||||
}
|
||||
subs = await getActivities(searchFunc, {window: strongWindow}) as Submission[];
|
||||
} else {
|
||||
|
||||
if (dups === undefined) {
|
||||
let searchFunc: (limit: number) => Promise<Listing<Submission | Comment>> = (limit: number) => {
|
||||
// this does not work correctly
|
||||
// see https://github.com/not-an-aardvark/snoowrap/issues/320
|
||||
// searchFunc = (limit: number) => {
|
||||
// return sub.getDuplicates({crossposts_only: 0, limit});
|
||||
// };
|
||||
return this.client.oauthRequest({
|
||||
uri: `duplicates/${sub.id}`,
|
||||
qs: {
|
||||
limit,
|
||||
}
|
||||
}).then(x => {
|
||||
return Promise.resolve(x.comments) as Promise<Listing<Submission>>
|
||||
});
|
||||
};
|
||||
subs = await getActivities(searchFunc, {window: strongWindow}) as Submission[];
|
||||
dups = subs;
|
||||
} else {
|
||||
subs = dups;
|
||||
}
|
||||
|
||||
if (sf.kind === 'duplicates') {
|
||||
// @ts-ignore
|
||||
subs = subs.filter(x => x.crosspost_parent === undefined)
|
||||
} else {
|
||||
// @ts-ignore
|
||||
subs = subs.filter(x => x.crosspost_parent !== undefined && x.crosspost_parent === sub.id)
|
||||
}
|
||||
}
|
||||
|
||||
// filter by minimum word count
|
||||
subs = subs.filter(x => wordCount(x.title) > minWordCount);
|
||||
|
||||
items = items.concat(subs.map(x => ({
|
||||
value: searchAndReplace(x.title, transformations),
|
||||
createdOn: x.created,
|
||||
source: 'reddit',
|
||||
sourceUrl: x.permalink,
|
||||
id: x.id,
|
||||
score: x.score,
|
||||
itemType: 'submission',
|
||||
acquisitionType: sf.kind,
|
||||
sourceObj: x,
|
||||
reqSameness: matchScore,
|
||||
})));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (!includeCrossposts) {
|
||||
const sub = await this.getSubmission(item);
|
||||
// remove submissions if they are official crossposts of the submission being checked and searchOn did not include 'crossposts'
|
||||
items = items.filter(x => x.itemType !== 'submission' || !(x.sourceObj.crosspost_parent !== undefined && x.sourceObj.crosspost_parent === sub.id))
|
||||
}
|
||||
|
||||
let sourceTitle = searchAndReplace(sub.title, restCriteria.transformationsActivity ?? []);
|
||||
|
||||
// do submission scoring BEFORE pruning duplicates bc...
|
||||
// might end up in a situation where we get same submission for both title and url
|
||||
// -- url is always a repost but title is not guaranteed and we if remove the url item but not the title we could potentially filter the title submission out and miss this repost
|
||||
items = items.reduce((acc: (RepostItem|RepostItemResult)[], x) => {
|
||||
if(x.itemType === 'submission') {
|
||||
totalSubs++;
|
||||
const sf = searchFacets.find(y => y.kind === x.acquisitionType) as SearchFacet;
|
||||
|
||||
let cleanTitle = x.value;
|
||||
if (!(sf.caseSensitive ?? false)) {
|
||||
cleanTitle = cleanTitle.toLowerCase();
|
||||
}
|
||||
const strMatchResults = stringSameness(sourceTitle, cleanTitle);
|
||||
if(strMatchResults.highScoreWeighted >= (x.reqSameness as number)) {
|
||||
return acc.concat({
|
||||
...x,
|
||||
sameness: Math.min(strMatchResults.highScoreWeighted, 100),
|
||||
});
|
||||
}
|
||||
return acc;
|
||||
}
|
||||
return acc.concat(x);
|
||||
}, []);
|
||||
|
||||
// now remove duplicate submissions
|
||||
items = items.reduce((acc: RepostItem[], curr) => {
|
||||
if(curr.itemType !== 'submission') {
|
||||
return acc.concat(curr);
|
||||
}
|
||||
const subId = curr.sourceObj.id;
|
||||
if (sub.id !== subId && !acc.some(x => x.itemType === 'submission' && x.sourceObj.id === subId)) {
|
||||
return acc.concat(curr);
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
|
||||
if (item instanceof Comment) {
|
||||
// we need to gather comments from submissions
|
||||
|
||||
// first cut down the number of submissions to retrieve because we don't care about have ALL submissions,
|
||||
// just most popular comments (which will be in the most popular submissions)
|
||||
let subs = items.filter(x => x.itemType === 'submission').map(x => x.sourceObj) as Submission[];
|
||||
totalCommentSubs += subs.length;
|
||||
|
||||
const nonSubItems = items.filter(x => x.itemType !== 'submission' && wordCount(x.value) > (restCriteria.minWordCount ?? 3));
|
||||
|
||||
subs.sort((a, b) => a.score - b.score).reverse();
|
||||
// take top 10 submissions
|
||||
subs = subs.slice(0, 10);
|
||||
|
||||
let comments: Comment[] = [];
|
||||
for (const sub of subs) {
|
||||
|
||||
const commFunc = (limit: number) => {
|
||||
return this.client.oauthRequest({
|
||||
uri: `${sub.subreddit_name_prefixed}/comments/${sub.id}`,
|
||||
// get ONLY top-level comments, sorted by Top
|
||||
qs: {
|
||||
sort: 'top',
|
||||
depth: 0,
|
||||
limit,
|
||||
}
|
||||
}).then(x => {
|
||||
return x.comments as Promise<Listing<Comment>>
|
||||
});
|
||||
}
|
||||
// and return the top 20 most popular
|
||||
const subComments = await getActivities(commFunc, {window: {count: 20}, skipReplies: true}) as Listing<Comment>;
|
||||
comments = comments.concat(subComments);
|
||||
}
|
||||
|
||||
// sort by highest scores
|
||||
comments.sort((a, b) => a.score - b.score).reverse();
|
||||
// filter out all comments with fewer words than required (prevent false negatives)
|
||||
comments.filter(x => wordCount(x.body) > (restCriteria.minWordCount ?? 3));
|
||||
totalComments += Math.min(comments.length, maxRedditItems);
|
||||
|
||||
// and take the user-defined maximum number of items
|
||||
items = nonSubItems.concat(comments.slice(0, maxRedditItems).map(x => ({
|
||||
value: searchAndReplace(x.body, restCriteria.transformations ?? []),
|
||||
createdOn: x.created,
|
||||
source: 'reddit',
|
||||
id: x.id,
|
||||
sourceUrl: x.permalink,
|
||||
score: x.score,
|
||||
itemType: 'comment',
|
||||
acquisitionType: 'comment'
|
||||
})));
|
||||
}
|
||||
|
||||
// cache items for 20 minutes
|
||||
await this.resources.cache.set(candidateHash, items, {ttl: 1200});
|
||||
} else {
|
||||
items = cacheRes;
|
||||
totalExternal = items.reduce((acc, curr) => {
|
||||
if(curr.acquisitionType === 'external') {
|
||||
acc.set(`${curr.source} comments`, (acc.get(`${curr.source} comments`) ?? 0 ) + 1);
|
||||
return acc;
|
||||
}
|
||||
return acc;
|
||||
}, new Map<string, number>());
|
||||
//totalSubs = items.filter(x => x.itemType === 'submission').length;
|
||||
//totalCommentSubs = totalSubs;
|
||||
totalComments = items.filter(x => x.itemType === 'comment' && x.source === 'reddit').length;
|
||||
fromCache = true;
|
||||
}
|
||||
|
||||
const {
|
||||
matchScore = 85,
|
||||
caseSensitive = false,
|
||||
transformations = [],
|
||||
transformationsActivity = transformations,
|
||||
occurrences = {
|
||||
condition: 'AND',
|
||||
criteria: [
|
||||
{
|
||||
count: {
|
||||
test: ['> 0']
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
} = restCriteria;
|
||||
|
||||
if(item instanceof Submission) {
|
||||
// we've already done difference calculations in the searchFacet phase
|
||||
// and when the check is for a sub it means we are only checking if the submissions has been reposted which means either:
|
||||
// * very similar title (default sameness of 85% or more)
|
||||
// * duplicate/same URL -- which is a repost, duh
|
||||
// so just add all items to critMatches at this point
|
||||
criteriaMatchedResults = criteriaMatchedResults.concat(items.filter(x => "sameness" in x) as RepostItemResult[]);
|
||||
} else {
|
||||
let sourceContent = searchAndReplace(item.body, transformationsActivity);
|
||||
if (!caseSensitive) {
|
||||
sourceContent = sourceContent.toLowerCase();
|
||||
}
|
||||
|
||||
for (const i of items) {
|
||||
const itemContent = !caseSensitive ? i.value.toLowerCase() : i.value;
|
||||
const strMatchResults = stringSameness(sourceContent, itemContent);
|
||||
if(strMatchResults.highScoreWeighted >= matchScore) {
|
||||
criteriaMatchedResults.push({
|
||||
...i,
|
||||
// @ts-ignore
|
||||
reqSameness: matchScore,
|
||||
sameness: Math.min(strMatchResults.highScoreWeighted, 100)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now do occurrence and time tests
|
||||
|
||||
const {
|
||||
condition: occCondition = 'AND',
|
||||
criteria: occCriteria = [
|
||||
{
|
||||
count: {
|
||||
test: ['> 0']
|
||||
}
|
||||
}
|
||||
]
|
||||
} = occurrences;
|
||||
|
||||
let orPass = false;
|
||||
let occurrenceReason = null;
|
||||
|
||||
for(const occurrenceTest of occCriteria) {
|
||||
|
||||
const {
|
||||
count:{
|
||||
condition: oCondition = 'AND',
|
||||
test: oCriteria = []
|
||||
} = {},
|
||||
time: {
|
||||
condition: tCondition = 'AND',
|
||||
test: tCriteria = [],
|
||||
} = {}
|
||||
} = occurrenceTest;
|
||||
|
||||
let conditionFailSummaries = [];
|
||||
|
||||
const passedConditions = [];
|
||||
const failedConditions = [];
|
||||
|
||||
for (const oc of oCriteria) {
|
||||
const ocCompare = parseGenericValueComparison(oc);
|
||||
const ocMatch = comparisonTextOp(criteriaMatchedResults.length, ocCompare.operator, ocCompare.value);
|
||||
if (ocMatch) {
|
||||
passedConditions.push(oc);
|
||||
} else {
|
||||
failedConditions.push(oc);
|
||||
if (oCondition === 'AND') {
|
||||
conditionFailSummaries.push(`(AND) ${oc} occurrences was not true`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (passedConditions.length === 0 && oCriteria.length > 0) {
|
||||
conditionFailSummaries.push('(OR) No occurrence tests passed');
|
||||
}
|
||||
|
||||
const existingPassed = passedConditions.length;
|
||||
if (conditionFailSummaries.length === 0) {
|
||||
const timeAwareReposts = [...criteriaMatchedResults].filter(x => x.createdOn !== undefined).sort((a, b) => (a.createdOn as number) - (b.createdOn as number));
|
||||
for (const tc of tCriteria) {
|
||||
let toTest: RepostItemResult[] = [];
|
||||
const durationCompare = parseDurationComparison(tc.condition);
|
||||
switch (tc.testOn) {
|
||||
case 'newest':
|
||||
case 'oldest':
|
||||
if (tc.testOn === 'newest') {
|
||||
toTest = timeAwareReposts.slice(-1);
|
||||
} else {
|
||||
toTest = timeAwareReposts.slice(0, 1);
|
||||
}
|
||||
break;
|
||||
case 'any':
|
||||
case 'all':
|
||||
toTest = timeAwareReposts;
|
||||
break;
|
||||
}
|
||||
const timePass = tc.testOn === 'any' ? toTest.some(x => compareDurationValue(durationCompare, dayjs.unix(x.createdOn as number))) : toTest.every(x => compareDurationValue(durationCompare, dayjs.unix(x.createdOn as number)));
|
||||
if (timePass) {
|
||||
passedConditions.push(tc.condition);
|
||||
} else {
|
||||
failedConditions.push(tc.condition);
|
||||
if (tCondition === 'AND') {
|
||||
conditionFailSummaries.push(`(AND) ${tc.condition} was not true`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (tCriteria.length > 0 && passedConditions.length === existingPassed) {
|
||||
conditionFailSummaries.push('(OR) No time-based tests passed');
|
||||
}
|
||||
}
|
||||
|
||||
if(conditionFailSummaries.length !== 0 && occCondition === 'AND') {
|
||||
// failed occurrence tests (high-level)
|
||||
occurrenceReason = conditionFailSummaries.join(' | ');
|
||||
break;
|
||||
}
|
||||
|
||||
if(passedConditions.length > 0 && occCondition === 'OR') {
|
||||
occurrenceReason = passedConditions.join(' | ');
|
||||
orPass = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let passed = occCriteria.length === 0;
|
||||
|
||||
if(occCriteria.length > 0) {
|
||||
if(occCondition === 'OR') {
|
||||
passed = orPass;
|
||||
occurrenceReason = occurrenceReason === null ? 'No occurrence test sets passed' : occurrenceReason;
|
||||
} else if(occCondition === 'AND') {
|
||||
passed = occurrenceReason === null;
|
||||
occurrenceReason = occurrenceReason === null ? 'All tests passed' : occurrenceReason;
|
||||
}
|
||||
//passed = (occCondition === 'OR' && orPass) || (occurrenceFailureReason === null && occCondition === 'AND')
|
||||
}
|
||||
|
||||
const results = {
|
||||
passed,
|
||||
conditionsSummary: occurrenceReason as string,
|
||||
items: criteriaMatchedResults
|
||||
};
|
||||
criteriaResults.push(results)
|
||||
|
||||
|
||||
if(!results.passed) {
|
||||
if(this.condition === 'AND') {
|
||||
andFail = true;
|
||||
break;
|
||||
}
|
||||
} else if(this.condition === 'OR') {
|
||||
break;
|
||||
}
|
||||
if (!results.passed && this.condition === 'AND') {
|
||||
andFail = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// get all repost items for stats and SCIENCE
|
||||
const repostItemResults = [...criteriaResults
|
||||
// only want reposts from criteria that passed
|
||||
.filter(x => x.passed).map(x => x.items)
|
||||
.flat()
|
||||
// make sure we are only accumulating unique reposts
|
||||
.reduce((acc, curr) => {
|
||||
const hash = `${curr.source}-${curr.itemType}-${curr.id}`;
|
||||
if (!acc.has(hash)) {
|
||||
acc.set(hash, curr);
|
||||
}
|
||||
return acc;
|
||||
}, new Map<string, RepostItemResult>()).values()];
|
||||
|
||||
repostItemResults.sort((a, b) => a.sameness - b.sameness).reverse();
|
||||
const foundRepost = criteriaResults.length > 0;
|
||||
|
||||
|
||||
let avgSameness = null;
|
||||
let closestSummary = null;
|
||||
let closestSameness = null;
|
||||
let searchCandidateSummary = '';
|
||||
|
||||
if(item instanceof Comment) {
|
||||
searchCandidateSummary = `Searched top ${totalComments} comments in top 10 ${fromCache ? '' : `of ${totalCommentSubs} `}most popular submissions`;
|
||||
if(totalExternal.size > 0) {
|
||||
searchCandidateSummary += ", ";
|
||||
const extSumm: string[] = [];
|
||||
totalExternal.forEach((v, k) => {
|
||||
extSumm.push(`${v} ${k}`);
|
||||
});
|
||||
searchCandidateSummary += extSumm.join(', ');
|
||||
}
|
||||
} else {
|
||||
searchCandidateSummary = `Searched ${totalSubs}`
|
||||
}
|
||||
|
||||
let summary = `${searchCandidateSummary} and found ${repostItemResults.length} reposts.`;
|
||||
|
||||
if(repostItemResults.length > 0) {
|
||||
avgSameness = formatNumber(repostItemResults.reduce((acc, curr) => acc + curr.sameness, 0) / criteriaResults.length);
|
||||
const closest = repostItemResults[0];
|
||||
summary += ` --- Closest Match => >> ${closest.value} << from ${closest.source} (${closest.sourceUrl}) with ${formatNumber(closest.sameness)}% sameness.`
|
||||
closestSummary = `matched a ${closest.itemType} from ${closest.source}`;
|
||||
closestSameness = closest.sameness;
|
||||
if(criteriaResults.length > 1) {
|
||||
summary += ` Avg ${formatNumber(avgSameness)}%`;
|
||||
}
|
||||
}
|
||||
|
||||
let passed;
|
||||
|
||||
if(this.condition === 'AND') {
|
||||
const failedCrit = criteriaResults.find(x => !x.passed);
|
||||
if(failedCrit !== undefined) {
|
||||
summary += `BUT a criteria failed >> ${failedCrit.conditionsSummary} << and rule has AND condition.`;
|
||||
passed = false;
|
||||
} else {
|
||||
passed = true;
|
||||
}
|
||||
} else {
|
||||
const passedCrit = criteriaResults.find(x => x.passed);
|
||||
if(passedCrit === undefined) {
|
||||
summary += `BUT all criteria failed`;
|
||||
passed = false;
|
||||
} else {
|
||||
passed = true;
|
||||
}
|
||||
}
|
||||
|
||||
const result = `${passed ? PASS : FAIL} ${summary}`;
|
||||
this.logger.verbose(result);
|
||||
|
||||
return [passed, this.getResult(passed, {
|
||||
result,
|
||||
data: {
|
||||
allResults: criteriaResults,
|
||||
closestSameness: passed ? formatNumber(closestSameness as number) : undefined,
|
||||
closestSummary: passed ? closestSummary : undefined,
|
||||
}
|
||||
})];
|
||||
}
|
||||
}
|
||||
|
||||
interface RepostConfig {
|
||||
/**
|
||||
* A list of Regular Expressions and conditions under which tested Activity(ies) are matched
|
||||
* @minItems 1
|
||||
* @examples [{"regex": "/reddit/", "matchThreshold": "> 3"}]
|
||||
* */
|
||||
criteria?: RepostCriteria[]
|
||||
/**
|
||||
* * If `OR` then any set of Criteria that pass will trigger the Rule
|
||||
* * If `AND` then all Criteria sets must pass to trigger the Rule
|
||||
*
|
||||
* @default "OR"
|
||||
* */
|
||||
condition?: 'AND' | 'OR'
|
||||
}
|
||||
|
||||
export interface RepostRuleOptions extends RepostConfig, RuleOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* Search for reposts of a Submission or Comment
|
||||
*
|
||||
* * For submissions the title or URL can searched and matched against
|
||||
* * For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against
|
||||
*
|
||||
* */
|
||||
export interface RepostRuleJSONConfig extends RepostConfig, RuleJSONConfig {
|
||||
/**
|
||||
* @examples ["repost"]
|
||||
* */
|
||||
kind: 'repost'
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import HistoryRule, {HistoryJSONConfig} from "./HistoryRule";
|
||||
import RegexRule, {RegexRuleJSONConfig} from "./RegexRule";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import Snoowrap from "snoowrap";
|
||||
import {RepostRule, RepostRuleJSONConfig} from "./RepostRule";
|
||||
|
||||
export function ruleFactory
|
||||
(config: RuleJSONConfig, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Rule {
|
||||
@@ -31,6 +32,9 @@ export function ruleFactory
|
||||
case 'regex':
|
||||
cfg = config as RegexRuleJSONConfig;
|
||||
return new RegexRule({...cfg, logger, subredditName, resources, client});
|
||||
case 'repost':
|
||||
cfg = config as RepostRuleJSONConfig;
|
||||
return new RepostRule({...cfg, logger, subredditName, resources, client});
|
||||
default:
|
||||
throw new Error('rule "kind" was not recognized.');
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ interface ResultContext {
|
||||
|
||||
export interface RuleResult extends ResultContext {
|
||||
premise: RulePremise
|
||||
kind: string
|
||||
name: string
|
||||
triggered: (boolean | null)
|
||||
}
|
||||
@@ -116,13 +117,13 @@ export abstract class Rule implements IRule, Triggerable {
|
||||
this.logger.verbose('(Skipped) Exclusive author criteria not matched');
|
||||
return Promise.resolve([null, this.getResult(null, {result: 'Exclusive author criteria not matched'})]);
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error occurred during Rule pre-process checks');
|
||||
throw err;
|
||||
}
|
||||
try {
|
||||
return this.process(item);
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error occurred while processing rule');
|
||||
throw err;
|
||||
}
|
||||
@@ -153,6 +154,7 @@ export abstract class Rule implements IRule, Triggerable {
|
||||
protected getResult(triggered: (boolean | null) = null, context: ResultContext = {}): RuleResult {
|
||||
return {
|
||||
premise: this.getPremise(),
|
||||
kind: this.getKind(),
|
||||
name: this.name,
|
||||
triggered,
|
||||
...context,
|
||||
@@ -209,22 +211,6 @@ export interface UserNoteCriteria {
|
||||
search?: 'current' | 'consecutive' | 'total'
|
||||
}
|
||||
|
||||
/**
|
||||
* A duration and how to compare it against a value
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`
|
||||
*
|
||||
* * EX `> 100 days` => Passes if the date being compared is before 100 days ago
|
||||
* * EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months
|
||||
*
|
||||
* Unit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)
|
||||
*
|
||||
* [See] https://regexr.com/609n8 for example
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\s*$
|
||||
* */
|
||||
export type DurationComparor = string;
|
||||
|
||||
export interface IRule extends ChecksActivityState {
|
||||
/**
|
||||
* An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.
|
||||
@@ -254,6 +240,6 @@ export interface RuleJSONConfig extends IRule {
|
||||
* The kind of rule to run
|
||||
* @examples ["recentActivity", "repeatActivity", "author", "attribution", "history"]
|
||||
*/
|
||||
kind: 'recentActivity' | 'repeatActivity' | 'author' | 'attribution' | 'history' | 'regex'
|
||||
kind: 'recentActivity' | 'repeatActivity' | 'author' | 'attribution' | 'history' | 'regex' | 'repost'
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,26 @@
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"description": "An (array of) string/regular expression to test contents of an Author's profile description against\n\nIf no flags are specified then the **insensitive** flag is used by default\n\nIf using an array then if **any** value in the array passes the description test passes",
|
||||
"examples": [
|
||||
[
|
||||
"/test$/i",
|
||||
"look for this string literal"
|
||||
]
|
||||
]
|
||||
},
|
||||
"flairCssClass": {
|
||||
"description": "A list of (user) flair css class values from the subreddit to match against",
|
||||
"examples": [
|
||||
@@ -136,12 +156,22 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"age": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"approved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"depth": {
|
||||
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"distinguished": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -159,7 +189,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -193,6 +223,11 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"age": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"approved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -228,7 +263,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -356,6 +391,7 @@
|
||||
"message",
|
||||
"remove",
|
||||
"report",
|
||||
"userflair",
|
||||
"usernote"
|
||||
],
|
||||
"type": "string"
|
||||
|
||||
@@ -233,7 +233,7 @@
|
||||
"properties": {
|
||||
"aggregateOn": {
|
||||
"default": "undefined",
|
||||
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) all domains are aggregated",
|
||||
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) rule will only aggregate on `link` and `media` (ignores reddit-hosted content and self-posts)",
|
||||
"examples": [
|
||||
[
|
||||
]
|
||||
@@ -249,6 +249,16 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"commentState": {
|
||||
"$ref": "#/definitions/CommentState",
|
||||
"description": "When present, Comments from `window` will only be counted if they meet this criteria",
|
||||
"examples": [
|
||||
{
|
||||
"op": true,
|
||||
"removed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"consolidateMediaDomains": {
|
||||
"default": false,
|
||||
"description": "Should the criteria consolidate recognized media domains into the parent domain?\n\nSubmissions to major media domains (youtube, vimeo) can be identified by individual Channel/Author...\n\n* If `false` then domains will be aggregated at the channel level IE Youtube Channel A (2 counts), Youtube Channel B (3 counts)\n* If `true` then then media domains will be consolidated at domain level and then aggregated IE youtube.com (5 counts)",
|
||||
@@ -277,27 +287,37 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"exclude": {
|
||||
"description": "Do not include Activities from this list of Subreddits (by name, case-insensitive)\n\nWill be ignored if `include` is present.\n\nEX `[\"mealtimevideos\",\"askscience\"]`",
|
||||
"description": "When present, Activities WILL NOT be counted if they are found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
|
||||
"examples": [
|
||||
"mealtimevideos",
|
||||
"askscience"
|
||||
[
|
||||
"mealtimevideos",
|
||||
"askscience",
|
||||
"/onlyfans*/i",
|
||||
{
|
||||
"over18": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"include": {
|
||||
"description": "Only include Activities from this list of Subreddits (by name, case-insensitive)\n\n\nEX `[\"mealtimevideos\",\"askscience\"]`",
|
||||
"description": "When present, Activities WILL ONLY be counted if they are found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
|
||||
"examples": [
|
||||
"mealtimevideos",
|
||||
"askscience"
|
||||
[
|
||||
"mealtimevideos",
|
||||
"askscience",
|
||||
"/onlyfans*/i",
|
||||
{
|
||||
"over18": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"minActivityCount": {
|
||||
@@ -308,6 +328,16 @@
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"submissionState": {
|
||||
"$ref": "#/definitions/SubmissionState",
|
||||
"description": "When present, Submissions from `window` will only be counted if they meet this criteria",
|
||||
"examples": [
|
||||
{
|
||||
"over_18": true,
|
||||
"removed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"threshold": {
|
||||
"default": "> 10%",
|
||||
"description": "A string containing a comparison operator and a value to compare comments against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign]`\n\n* EX `> 12` => greater than 12 activities originate from same attribution\n* EX `<= 10%` => less than 10% of all Activities have the same attribution",
|
||||
@@ -419,11 +449,6 @@
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
@@ -459,6 +484,26 @@
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"description": "An (array of) string/regular expression to test contents of an Author's profile description against\n\nIf no flags are specified then the **insensitive** flag is used by default\n\nIf using an array then if **any** value in the array passes the description test passes",
|
||||
"examples": [
|
||||
[
|
||||
"/test$/i",
|
||||
"look for this string literal"
|
||||
]
|
||||
]
|
||||
},
|
||||
"flairCssClass": {
|
||||
"description": "A list of (user) flair css class values from the subreddit to match against",
|
||||
"examples": [
|
||||
@@ -626,8 +671,6 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exclude",
|
||||
"include",
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
@@ -813,6 +856,17 @@
|
||||
],
|
||||
"description": "The cache provider and, optionally, a custom configuration for that provider\n\nIf not present or `null` provider will be `memory`.\n\nTo specify another `provider` but use its default configuration set this property to a string of one of the available providers: `memory`, `redis`, or `none`"
|
||||
},
|
||||
"selfTTL": {
|
||||
"default": 50,
|
||||
"description": "Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling\n\nThis is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:\n\n* Ignore comments created through an Action\n* Ignore Activity polled from modqueue that the bot just reported\n\nThis value should be at least as long as the longest polling interval for modqueue/newComm\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
"examples": [
|
||||
50
|
||||
],
|
||||
"type": [
|
||||
"number",
|
||||
"boolean"
|
||||
]
|
||||
},
|
||||
"submissionTTL": {
|
||||
"default": 60,
|
||||
"description": "Amount of time, in seconds, a submission should be cached\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
@@ -927,6 +981,25 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ClearProcessedOptions": {
|
||||
"description": "For very long-running, high-volume subreddits clearing the list of processed activities helps manage memory bloat\n\nAll of these options have default values based on the limit and/or interval set for polling options on each subreddit stream. They only need to modified if the defaults are not sufficient.\n\nIf both `after` and `size` are defined whichever is hit first will trigger the list to clear. `after` will be reset after ever clear.",
|
||||
"properties": {
|
||||
"after": {
|
||||
"description": "An interval the processed list should be cleared after.\n\n* EX `9 days`\n* EX `3 months`\n* EX `5 minutes`",
|
||||
"pattern": "^\\s*(?<time>\\d+)\\s*(?<unit>days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"retain": {
|
||||
"description": "The number of activities to retain in processed list after clearing.\n\nDefaults to `limit` value from `PollingOptions`",
|
||||
"type": "number"
|
||||
},
|
||||
"size": {
|
||||
"description": "Number of activities found in processed list after which the list should be cleared.\n\nDefaults to the `limit` value from `PollingOptions`",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"CommentActionJson": {
|
||||
"description": "Reply to the Activity. For a submission the reply will be a top-level comment.",
|
||||
"properties": {
|
||||
@@ -1056,6 +1129,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/FlairActionJson"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/UserFlairActionJson"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/CommentActionJson"
|
||||
},
|
||||
@@ -1194,6 +1270,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RuleSetJson"
|
||||
},
|
||||
@@ -1221,12 +1300,22 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"age": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"approved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"depth": {
|
||||
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"distinguished": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -1244,7 +1333,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1390,6 +1479,10 @@
|
||||
],
|
||||
"type": "boolean"
|
||||
},
|
||||
"flair_template_id": {
|
||||
"description": "Flair template ID to assign",
|
||||
"type": "string"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -1609,6 +1702,77 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ImageDetection": {
|
||||
"description": "When comparing submissions detect if the reference submission is an image and do a pixel-comparison to other detected image submissions.\n\n**Note:** This is an **experimental feature**",
|
||||
"properties": {
|
||||
"enable": {
|
||||
"description": "Is image detection enabled?",
|
||||
"type": "boolean"
|
||||
},
|
||||
"fetchBehavior": {
|
||||
"default": "extension",
|
||||
"description": "Determines how and when to check if a URL is an image\n\n**Note:** After fetching a URL the **Content-Type** is validated to contain `image` before detection occurs\n\n**When `extension`:** (default)\n\n* Only URLs that end in known image extensions (.png, .jpg, etc...) are fetched\n\n**When `unknown`:**\n\n* URLs that end in known image extensions (.png, .jpg, etc...) are fetched\n* URLs with no extension or unknown (IE non-video, non-doc, etc...) are fetched\n\n**When `all`:**\n\n* All submissions that have URLs (non-self) will be fetched, regardless of extension\n* **Note:** This can be bandwidth/CPU intensive if history window is large so use with care",
|
||||
"enum": [
|
||||
"all",
|
||||
"extension",
|
||||
"unknown"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"hash": {
|
||||
"description": "Use perceptual hashing (blockhash-js) to compare images\n\nPros:\n\n* very fast\n* low cpu/memory usage\n* results can be cached\n\nCons:\n\n* not as accurate as pixel comparison\n* weaker for text-heavy images\n* mostly color-blind\n\nBest uses:\n\n* Detecting (general) duplicate images\n* Comparing large number of images",
|
||||
"properties": {
|
||||
"bits": {
|
||||
"default": 32,
|
||||
"description": "Bit count determines accuracy of hash and granularity of hash comparison (comparison to other hashes)\n\nDefault is `32`\n\n**NOTE:** Hashes of different sizes (bits) cannot be compared. If you are caching results make sure all rules where results may be shared use the same bit count to ensure hashes can be compared. Otherwise hashes will be recomputed.",
|
||||
"type": "number"
|
||||
},
|
||||
"enable": {
|
||||
"default": true,
|
||||
"description": "Enabled by default.\n\nIf both `hash` and `pixel` are enabled then `pixel` will be used to verify image comparison when hashes matches",
|
||||
"type": "boolean"
|
||||
},
|
||||
"hardThreshold": {
|
||||
"description": "High Confidence Threshold\n\nIf the difference in comparison is equal to or less than this number the images are considered the same and pixel comparison WILL NOT occur\n\nDefaults to the parent-level `threshold` value if not present\n\nUse `null` if you want pixel comparison to ALWAYS occur (softThreshold must be present)",
|
||||
"type": [
|
||||
"null",
|
||||
"number"
|
||||
]
|
||||
},
|
||||
"softThreshold": {
|
||||
"description": "Low Confidence Threshold -- only used if `pixel` is enabled\n\nIf the difference in comparison is\n\n1) equal to or less than this value and\n2) the value is greater than `hardThreshold`\n\nthe images will be compared using the `pixel` method",
|
||||
"type": "number"
|
||||
},
|
||||
"ttl": {
|
||||
"description": "Number of seconds to cache image hash",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"pixel": {
|
||||
"description": "Use pixel counting to compare images\n\nPros:\n\n* most accurate\n* strong with text or color-only changes\n\nCons:\n\n* much slower than hashing\n* memory/cpu intensive\n\nBest uses:\n\n* Comparison text-only images\n* Comparison requires high degree of accuracy or changes are subtle",
|
||||
"properties": {
|
||||
"enable": {
|
||||
"default": false,
|
||||
"description": "Disabled by default.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"threshold": {
|
||||
"description": "The percentage, as a whole number, of pixels that are **different** between the two images at which point the images are not considered the same.",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"threshold": {
|
||||
"default": 5,
|
||||
"description": "The percentage, as a whole number, of difference between two images at which point they will not be considered the same.\n\nWill be used as `hash.hardThreshold` and `pixel.threshold` if those values are not specified\n\nDefault is `5`",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"LockActionJson": {
|
||||
"description": "Lock the Activity",
|
||||
"properties": {
|
||||
@@ -1866,6 +2030,76 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"OccurredAt": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"testOn": {
|
||||
"$ref": "#/definitions/TimeBasedSelector",
|
||||
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"condition",
|
||||
"testOn"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"OccurrenceTests": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"time": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"PollingOptions": {
|
||||
"description": "A configuration for where, how, and when to poll Reddit for Activities to process",
|
||||
"examples": [
|
||||
@@ -1876,6 +2110,10 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"clearProcessed": {
|
||||
"$ref": "#/definitions/ClearProcessedOptions",
|
||||
"description": "For very long-running, high-volume subreddits clearing the list of processed activities helps manage memory bloat\n\nAll of these options have default values based on the limit and/or interval set for polling options on each subreddit stream. They only need to modified if the defaults are not sufficient.\n\nIf both `after` and `size` are defined whichever is hit first will trigger the list to clear. `after` will be reset after ever clear."
|
||||
},
|
||||
"delayUntil": {
|
||||
"description": "Delay processing Activity until it is `N` seconds old\n\nUseful if there are other bots that may process an Activity and you want this bot to run first/last/etc.\n\nIf the Activity is already `N` seconds old when it is initially retrieved no refresh of the Activity occurs (no API request is made) and it is immediately processed.",
|
||||
"type": "number"
|
||||
@@ -1934,6 +2172,10 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"imageDetection": {
|
||||
"$ref": "#/definitions/ImageDetection",
|
||||
"description": "When comparing submissions detect if the reference submission is an image and do a pixel-comparison to other detected image submissions.\n\n**Note:** This is an **experimental feature**"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -1991,7 +2233,7 @@
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"window": {
|
||||
@@ -2570,6 +2812,224 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RepostCriteria": {
|
||||
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxExternalItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxRedditItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"occurredAt": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"criteria"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"occurrences": {
|
||||
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurrenceTests"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"searchOn": {
|
||||
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SearchFacetJSONConfig"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RepostRuleJSONConfig": {
|
||||
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"$ref": "#/definitions/AuthorOptions",
|
||||
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
|
||||
"examples": [
|
||||
{
|
||||
"include": [
|
||||
{
|
||||
"flairText": [
|
||||
"Contributor",
|
||||
"Veteran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"condition": {
|
||||
"default": "OR",
|
||||
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
|
||||
"examples": [
|
||||
{
|
||||
"matchThreshold": "> 3",
|
||||
"regex": "/reddit/"
|
||||
}
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/RepostCriteria"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind of rule to run",
|
||||
"enum": [
|
||||
"repost"
|
||||
],
|
||||
"examples": [
|
||||
"repost"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
|
||||
"examples": [
|
||||
"myNewRule"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RuleSetJson": {
|
||||
"description": "A RuleSet is a \"nested\" set of `Rule` objects that can be used to create more complex AND/OR behavior. Think of the outcome of a `RuleSet` as the result of all of its run `Rule` objects (based on `condition`)",
|
||||
"properties": {
|
||||
@@ -2607,6 +3067,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
@@ -2621,6 +3084,111 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchAndReplaceRegExp": {
|
||||
"properties": {
|
||||
"replace": {
|
||||
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
|
||||
"examples": [
|
||||
"find this string",
|
||||
"/some string*/ig"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replace",
|
||||
"search"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchFacetJSONConfig": {
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"kind": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SubmissionCheckJson": {
|
||||
"properties": {
|
||||
"actions": {
|
||||
@@ -2642,6 +3210,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/FlairActionJson"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/UserFlairActionJson"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/CommentActionJson"
|
||||
},
|
||||
@@ -2780,6 +3351,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RuleSetJson"
|
||||
},
|
||||
@@ -2807,6 +3381,11 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"age": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"approved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -2842,7 +3421,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2875,6 +3454,9 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"isUserProfile": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -2905,6 +3487,122 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ThirdPartyCredentialsJsonConfig": {
|
||||
"additionalProperties": {
|
||||
},
|
||||
"properties": {
|
||||
"youtube": {
|
||||
"properties": {
|
||||
"apiKey": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiKey"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"TimeBasedSelector": {
|
||||
"enum": [
|
||||
"all",
|
||||
"any",
|
||||
"newest",
|
||||
"oldest"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"UserFlairActionJson": {
|
||||
"description": "Flair the Submission",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"$ref": "#/definitions/AuthorOptions",
|
||||
"description": "If present then these Author criteria are checked before running the Action. If criteria fails then the Action is not run.",
|
||||
"examples": [
|
||||
{
|
||||
"include": [
|
||||
{
|
||||
"flairText": [
|
||||
"Contributor",
|
||||
"Veteran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"css": {
|
||||
"description": "The text of the css class of the flair to apply",
|
||||
"type": "string"
|
||||
},
|
||||
"dryRun": {
|
||||
"default": false,
|
||||
"description": "If `true` the Action will not make the API request to Reddit to perform its action.",
|
||||
"examples": [
|
||||
false,
|
||||
true
|
||||
],
|
||||
"type": "boolean"
|
||||
},
|
||||
"enable": {
|
||||
"default": true,
|
||||
"description": "If set to `false` the Action will not be run",
|
||||
"examples": [
|
||||
true
|
||||
],
|
||||
"type": "boolean"
|
||||
},
|
||||
"flair_template_id": {
|
||||
"description": "Flair template to pick.\n\n**Note:** If this template is used text/css are ignored",
|
||||
"type": "string"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the Action.\n\nIf any set of criteria passes the Action will be run."
|
||||
},
|
||||
"kind": {
|
||||
"description": "The type of action that will be performed",
|
||||
"enum": [
|
||||
"userflair"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this Action. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes",
|
||||
"examples": [
|
||||
"myDescriptiveAction"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
},
|
||||
"text": {
|
||||
"description": "The text of the flair to apply",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"UserNoteActionJson": {
|
||||
"description": "Add a Toolbox User Note to the Author of this Activity",
|
||||
"properties": {
|
||||
@@ -3088,6 +3786,9 @@
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"credentials": {
|
||||
"$ref": "#/definitions/ThirdPartyCredentialsJsonConfig"
|
||||
},
|
||||
"dryRun": {
|
||||
"default": "undefined",
|
||||
"description": "Use this option to override the `dryRun` setting for all `Checks`",
|
||||
|
||||
@@ -19,6 +19,28 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"BotCredentialsJsonConfig": {
|
||||
"properties": {
|
||||
"reddit": {
|
||||
"$ref": "#/definitions/RedditCredentials"
|
||||
},
|
||||
"youtube": {
|
||||
"properties": {
|
||||
"apiKey": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiKey"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"reddit"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"BotInstanceJsonConfig": {
|
||||
"description": "The configuration for an **individual reddit account** ContextMod will run as a bot.\n\nMultiple bot configs may be specified (one per reddit account).\n\n**NOTE:** If `bots` is not specified in a `FILE` then a default `bot` is generated using `ENV/ARG` values IE `CLIENT_ID`, etc...but if `bots` IS specified the default is not generated.",
|
||||
"properties": {
|
||||
@@ -27,15 +49,12 @@
|
||||
"description": "Settings to configure the default caching behavior for this bot\n\nEvery setting not specified will default to what is specified by the global operator caching config"
|
||||
},
|
||||
"credentials": {
|
||||
"$ref": "#/definitions/RedditCredentials",
|
||||
"description": "Credentials required for the bot to interact with Reddit's API\n\nThese credentials will provided to both the API and Web interface unless otherwise specified with the `web.credentials` property\n\nRefer to the [required credentials table](https://github.com/FoxxMD/context-mod/blob/master/docs/operatorConfiguration.md#minimum-required-configuration) to see what is necessary to run the bot.",
|
||||
"examples": [
|
||||
"anyOf": [
|
||||
{
|
||||
"accessToken": "p75_1c467b2",
|
||||
"clientId": "f4b4df1_9oiu",
|
||||
"clientSecret": "34v5q1c564_yt7",
|
||||
"redirectUri": "http://localhost:8085/callback",
|
||||
"refreshToken": "34_f1w1v4"
|
||||
"$ref": "#/definitions/RedditCredentials"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/BotCredentialsJsonConfig"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -79,6 +98,10 @@
|
||||
"default": false,
|
||||
"description": "If set to `true` all subreddits polling unmoderated/modqueue with default polling settings will share a request to \"r/mod\"\notherwise each subreddit will poll its own mod view\n\n* ENV => `SHARE_MOD`\n* ARG => `--shareMod`",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stagger": {
|
||||
"description": "If sharing a mod stream stagger pushing relevant Activities to individual subreddits.\n\nUseful when running many subreddits and rules are potentially cpu/memory/traffic heavy -- allows spreading out load",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -101,21 +124,8 @@
|
||||
"type": "object"
|
||||
},
|
||||
"snoowrap": {
|
||||
"description": "Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior",
|
||||
"properties": {
|
||||
"debug": {
|
||||
"description": "Manually set the debug status for snoowrap\n\nWhen snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level\n\n* Set to `true` to always output\n* Set to `false` to never output\n\nIf not present or `null` will be set based on `logLevel`\n\n* ENV => `SNOO_DEBUG`\n* ARG => `--snooDebug`",
|
||||
"type": "boolean"
|
||||
},
|
||||
"proxy": {
|
||||
"description": "Proxy all requests to Reddit's API through this endpoint\n\n* ENV => `PROXY`\n* ARG => `--proxy <proxyEndpoint>`",
|
||||
"examples": [
|
||||
"http://localhost:4443"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
"$ref": "#/definitions/SnoowrapOptions",
|
||||
"description": "Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior.\n\nOverrides any defaults provided at top-level operator config.\n\nSet to an empty object to \"ignore\" any top-level config"
|
||||
},
|
||||
"subreddits": {
|
||||
"description": "Settings related to bot behavior for subreddits it is managing",
|
||||
@@ -391,6 +401,17 @@
|
||||
],
|
||||
"description": "The cache provider and, optionally, a custom configuration for that provider\n\nIf not present or `null` provider will be `memory`.\n\nTo specify another `provider` but use its default configuration set this property to a string of one of the available providers: `memory`, `redis`, or `none`"
|
||||
},
|
||||
"selfTTL": {
|
||||
"default": 50,
|
||||
"description": "Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling\n\nThis is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:\n\n* Ignore comments created through an Action\n* Ignore Activity polled from modqueue that the bot just reported\n\nThis value should be at least as long as the longest polling interval for modqueue/newComm\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
"examples": [
|
||||
50
|
||||
],
|
||||
"type": [
|
||||
"number",
|
||||
"boolean"
|
||||
]
|
||||
},
|
||||
"submissionTTL": {
|
||||
"default": 60,
|
||||
"description": "Amount of time, in seconds, a submission should be cached\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
@@ -506,6 +527,40 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"SnoowrapOptions": {
|
||||
"properties": {
|
||||
"debug": {
|
||||
"description": "Manually set the debug status for snoowrap\n\nWhen snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level\n\n* Set to `true` to always output\n* Set to `false` to never output\n\nIf not present or `null` will be set based on `logLevel`\n\n* ENV => `SNOO_DEBUG`\n* ARG => `--snooDebug`",
|
||||
"type": "boolean"
|
||||
},
|
||||
"proxy": {
|
||||
"description": "Proxy all requests to Reddit's API through this endpoint\n\n* ENV => `PROXY`\n* ARG => `--proxy <proxyEndpoint>`",
|
||||
"examples": [
|
||||
"http://localhost:4443"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ThirdPartyCredentialsJsonConfig": {
|
||||
"additionalProperties": {
|
||||
},
|
||||
"properties": {
|
||||
"youtube": {
|
||||
"properties": {
|
||||
"apiKey": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiKey"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"WebCredentials": {
|
||||
"description": "Separate credentials for the web interface can be provided when also running the api.\n\nAll properties not specified will default to values given in ENV/ARG credential properties\n\nRefer to the [required credentials table](https://github.com/FoxxMD/context-mod/blob/master/docs/operatorConfiguration.md#minimum-required-configuration) to see what is necessary for the web interface.",
|
||||
"examples": [
|
||||
@@ -575,6 +630,9 @@
|
||||
"$ref": "#/definitions/OperatorCacheConfig",
|
||||
"description": "Settings to configure the default caching behavior globally\n\nThese settings will be used by each bot, and subreddit, that does not specify their own"
|
||||
},
|
||||
"credentials": {
|
||||
"$ref": "#/definitions/ThirdPartyCredentialsJsonConfig"
|
||||
},
|
||||
"logging": {
|
||||
"description": "Settings to configure global logging defaults",
|
||||
"properties": {
|
||||
@@ -650,6 +708,10 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"snoowrap": {
|
||||
"$ref": "#/definitions/SnoowrapOptions",
|
||||
"description": "Set global snoowrap options as well as default snoowrap config for all bots that don't specify their own"
|
||||
},
|
||||
"web": {
|
||||
"description": "Settings for the web interface",
|
||||
"properties": {
|
||||
|
||||
@@ -19,6 +19,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
@@ -179,7 +182,7 @@
|
||||
"properties": {
|
||||
"aggregateOn": {
|
||||
"default": "undefined",
|
||||
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) all domains are aggregated",
|
||||
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) rule will only aggregate on `link` and `media` (ignores reddit-hosted content and self-posts)",
|
||||
"examples": [
|
||||
[
|
||||
]
|
||||
@@ -195,6 +198,16 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"commentState": {
|
||||
"$ref": "#/definitions/CommentState",
|
||||
"description": "When present, Comments from `window` will only be counted if they meet this criteria",
|
||||
"examples": [
|
||||
{
|
||||
"op": true,
|
||||
"removed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"consolidateMediaDomains": {
|
||||
"default": false,
|
||||
"description": "Should the criteria consolidate recognized media domains into the parent domain?\n\nSubmissions to major media domains (youtube, vimeo) can be identified by individual Channel/Author...\n\n* If `false` then domains will be aggregated at the channel level IE Youtube Channel A (2 counts), Youtube Channel B (3 counts)\n* If `true` then then media domains will be consolidated at domain level and then aggregated IE youtube.com (5 counts)",
|
||||
@@ -223,27 +236,37 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"exclude": {
|
||||
"description": "Do not include Activities from this list of Subreddits (by name, case-insensitive)\n\nWill be ignored if `include` is present.\n\nEX `[\"mealtimevideos\",\"askscience\"]`",
|
||||
"description": "When present, Activities WILL NOT be counted if they are found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
|
||||
"examples": [
|
||||
"mealtimevideos",
|
||||
"askscience"
|
||||
[
|
||||
"mealtimevideos",
|
||||
"askscience",
|
||||
"/onlyfans*/i",
|
||||
{
|
||||
"over18": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"include": {
|
||||
"description": "Only include Activities from this list of Subreddits (by name, case-insensitive)\n\n\nEX `[\"mealtimevideos\",\"askscience\"]`",
|
||||
"description": "When present, Activities WILL ONLY be counted if they are found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
|
||||
"examples": [
|
||||
"mealtimevideos",
|
||||
"askscience"
|
||||
[
|
||||
"mealtimevideos",
|
||||
"askscience",
|
||||
"/onlyfans*/i",
|
||||
{
|
||||
"over18": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"minActivityCount": {
|
||||
@@ -254,6 +277,16 @@
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"submissionState": {
|
||||
"$ref": "#/definitions/SubmissionState",
|
||||
"description": "When present, Submissions from `window` will only be counted if they meet this criteria",
|
||||
"examples": [
|
||||
{
|
||||
"over_18": true,
|
||||
"removed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"threshold": {
|
||||
"default": "> 10%",
|
||||
"description": "A string containing a comparison operator and a value to compare comments against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign]`\n\n* EX `> 12` => greater than 12 activities originate from same attribution\n* EX `<= 10%` => less than 10% of all Activities have the same attribution",
|
||||
@@ -365,11 +398,6 @@
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
@@ -405,6 +433,26 @@
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"description": "An (array of) string/regular expression to test contents of an Author's profile description against\n\nIf no flags are specified then the **insensitive** flag is used by default\n\nIf using an array then if **any** value in the array passes the description test passes",
|
||||
"examples": [
|
||||
[
|
||||
"/test$/i",
|
||||
"look for this string literal"
|
||||
]
|
||||
]
|
||||
},
|
||||
"flairCssClass": {
|
||||
"description": "A list of (user) flair css class values from the subreddit to match against",
|
||||
"examples": [
|
||||
@@ -572,8 +620,6 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exclude",
|
||||
"include",
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
@@ -587,12 +633,22 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"age": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"approved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"depth": {
|
||||
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"distinguished": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -610,7 +666,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -868,6 +924,147 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ImageDetection": {
|
||||
"description": "When comparing submissions detect if the reference submission is an image and do a pixel-comparison to other detected image submissions.\n\n**Note:** This is an **experimental feature**",
|
||||
"properties": {
|
||||
"enable": {
|
||||
"description": "Is image detection enabled?",
|
||||
"type": "boolean"
|
||||
},
|
||||
"fetchBehavior": {
|
||||
"default": "extension",
|
||||
"description": "Determines how and when to check if a URL is an image\n\n**Note:** After fetching a URL the **Content-Type** is validated to contain `image` before detection occurs\n\n**When `extension`:** (default)\n\n* Only URLs that end in known image extensions (.png, .jpg, etc...) are fetched\n\n**When `unknown`:**\n\n* URLs that end in known image extensions (.png, .jpg, etc...) are fetched\n* URLs with no extension or unknown (IE non-video, non-doc, etc...) are fetched\n\n**When `all`:**\n\n* All submissions that have URLs (non-self) will be fetched, regardless of extension\n* **Note:** This can be bandwidth/CPU intensive if history window is large so use with care",
|
||||
"enum": [
|
||||
"all",
|
||||
"extension",
|
||||
"unknown"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"hash": {
|
||||
"description": "Use perceptual hashing (blockhash-js) to compare images\n\nPros:\n\n* very fast\n* low cpu/memory usage\n* results can be cached\n\nCons:\n\n* not as accurate as pixel comparison\n* weaker for text-heavy images\n* mostly color-blind\n\nBest uses:\n\n* Detecting (general) duplicate images\n* Comparing large number of images",
|
||||
"properties": {
|
||||
"bits": {
|
||||
"default": 32,
|
||||
"description": "Bit count determines accuracy of hash and granularity of hash comparison (comparison to other hashes)\n\nDefault is `32`\n\n**NOTE:** Hashes of different sizes (bits) cannot be compared. If you are caching results make sure all rules where results may be shared use the same bit count to ensure hashes can be compared. Otherwise hashes will be recomputed.",
|
||||
"type": "number"
|
||||
},
|
||||
"enable": {
|
||||
"default": true,
|
||||
"description": "Enabled by default.\n\nIf both `hash` and `pixel` are enabled then `pixel` will be used to verify image comparison when hashes matches",
|
||||
"type": "boolean"
|
||||
},
|
||||
"hardThreshold": {
|
||||
"description": "High Confidence Threshold\n\nIf the difference in comparison is equal to or less than this number the images are considered the same and pixel comparison WILL NOT occur\n\nDefaults to the parent-level `threshold` value if not present\n\nUse `null` if you want pixel comparison to ALWAYS occur (softThreshold must be present)",
|
||||
"type": [
|
||||
"null",
|
||||
"number"
|
||||
]
|
||||
},
|
||||
"softThreshold": {
|
||||
"description": "Low Confidence Threshold -- only used if `pixel` is enabled\n\nIf the difference in comparison is\n\n1) equal to or less than this value and\n2) the value is greater than `hardThreshold`\n\nthe images will be compared using the `pixel` method",
|
||||
"type": "number"
|
||||
},
|
||||
"ttl": {
|
||||
"description": "Number of seconds to cache image hash",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"pixel": {
|
||||
"description": "Use pixel counting to compare images\n\nPros:\n\n* most accurate\n* strong with text or color-only changes\n\nCons:\n\n* much slower than hashing\n* memory/cpu intensive\n\nBest uses:\n\n* Comparison text-only images\n* Comparison requires high degree of accuracy or changes are subtle",
|
||||
"properties": {
|
||||
"enable": {
|
||||
"default": false,
|
||||
"description": "Disabled by default.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"threshold": {
|
||||
"description": "The percentage, as a whole number, of pixels that are **different** between the two images at which point the images are not considered the same.",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"threshold": {
|
||||
"default": 5,
|
||||
"description": "The percentage, as a whole number, of difference between two images at which point they will not be considered the same.\n\nWill be used as `hash.hardThreshold` and `pixel.threshold` if those values are not specified\n\nDefault is `5`",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"OccurredAt": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"testOn": {
|
||||
"$ref": "#/definitions/TimeBasedSelector",
|
||||
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"condition",
|
||||
"testOn"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"OccurrenceTests": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"time": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RecentActivityRuleJSONConfig": {
|
||||
"description": "Checks a user's history for any Activity (Submission/Comment) in the subreddits specified in thresholds\n\nAvailable data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):\n\n```\nsummary => comma-deliminated list of subreddits that hit the threshold and their count EX subredditA(1), subredditB(4),...\nsubCount => Total number of subreddits that hit the threshold\ntotalCount => Total number of all activity occurrences in subreddits\n```",
|
||||
"properties": {
|
||||
@@ -890,6 +1087,10 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"imageDetection": {
|
||||
"$ref": "#/definitions/ImageDetection",
|
||||
"description": "When comparing submissions detect if the reference submission is an image and do a pixel-comparison to other detected image submissions.\n\n**Note:** This is an **experimental feature**"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -947,7 +1148,7 @@
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"window": {
|
||||
@@ -1363,6 +1564,329 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RepostCriteria": {
|
||||
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxExternalItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxRedditItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"occurredAt": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"criteria"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"occurrences": {
|
||||
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurrenceTests"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"searchOn": {
|
||||
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SearchFacetJSONConfig"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RepostRuleJSONConfig": {
|
||||
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"$ref": "#/definitions/AuthorOptions",
|
||||
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
|
||||
"examples": [
|
||||
{
|
||||
"include": [
|
||||
{
|
||||
"flairText": [
|
||||
"Contributor",
|
||||
"Veteran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"condition": {
|
||||
"default": "OR",
|
||||
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
|
||||
"examples": [
|
||||
{
|
||||
"matchThreshold": "> 3",
|
||||
"regex": "/reddit/"
|
||||
}
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/RepostCriteria"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind of rule to run",
|
||||
"enum": [
|
||||
"repost"
|
||||
],
|
||||
"examples": [
|
||||
"repost"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
|
||||
"examples": [
|
||||
"myNewRule"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchAndReplaceRegExp": {
|
||||
"properties": {
|
||||
"replace": {
|
||||
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
|
||||
"examples": [
|
||||
"find this string",
|
||||
"/some string*/ig"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replace",
|
||||
"search"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchFacetJSONConfig": {
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"kind": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SubmissionState": {
|
||||
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
|
||||
"examples": [
|
||||
@@ -1372,6 +1896,11 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"age": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"approved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -1407,7 +1936,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1440,6 +1969,9 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"isUserProfile": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -1470,6 +2002,15 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"TimeBasedSelector": {
|
||||
"enum": [
|
||||
"all",
|
||||
"any",
|
||||
"newest",
|
||||
"oldest"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"UserNoteCriteria": {
|
||||
"properties": {
|
||||
"count": {
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"properties": {
|
||||
"aggregateOn": {
|
||||
"default": "undefined",
|
||||
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) all domains are aggregated",
|
||||
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) rule will only aggregate on `link` and `media` (ignores reddit-hosted content and self-posts)",
|
||||
"examples": [
|
||||
[
|
||||
]
|
||||
@@ -172,6 +172,16 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"commentState": {
|
||||
"$ref": "#/definitions/CommentState",
|
||||
"description": "When present, Comments from `window` will only be counted if they meet this criteria",
|
||||
"examples": [
|
||||
{
|
||||
"op": true,
|
||||
"removed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"consolidateMediaDomains": {
|
||||
"default": false,
|
||||
"description": "Should the criteria consolidate recognized media domains into the parent domain?\n\nSubmissions to major media domains (youtube, vimeo) can be identified by individual Channel/Author...\n\n* If `false` then domains will be aggregated at the channel level IE Youtube Channel A (2 counts), Youtube Channel B (3 counts)\n* If `true` then then media domains will be consolidated at domain level and then aggregated IE youtube.com (5 counts)",
|
||||
@@ -200,27 +210,37 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"exclude": {
|
||||
"description": "Do not include Activities from this list of Subreddits (by name, case-insensitive)\n\nWill be ignored if `include` is present.\n\nEX `[\"mealtimevideos\",\"askscience\"]`",
|
||||
"description": "When present, Activities WILL NOT be counted if they are found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
|
||||
"examples": [
|
||||
"mealtimevideos",
|
||||
"askscience"
|
||||
[
|
||||
"mealtimevideos",
|
||||
"askscience",
|
||||
"/onlyfans*/i",
|
||||
{
|
||||
"over18": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"include": {
|
||||
"description": "Only include Activities from this list of Subreddits (by name, case-insensitive)\n\n\nEX `[\"mealtimevideos\",\"askscience\"]`",
|
||||
"description": "When present, Activities WILL ONLY be counted if they are found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
|
||||
"examples": [
|
||||
"mealtimevideos",
|
||||
"askscience"
|
||||
[
|
||||
"mealtimevideos",
|
||||
"askscience",
|
||||
"/onlyfans*/i",
|
||||
{
|
||||
"over18": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"minActivityCount": {
|
||||
@@ -231,6 +251,16 @@
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"submissionState": {
|
||||
"$ref": "#/definitions/SubmissionState",
|
||||
"description": "When present, Submissions from `window` will only be counted if they meet this criteria",
|
||||
"examples": [
|
||||
{
|
||||
"over_18": true,
|
||||
"removed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"threshold": {
|
||||
"default": "> 10%",
|
||||
"description": "A string containing a comparison operator and a value to compare comments against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign]`\n\n* EX `> 12` => greater than 12 activities originate from same attribution\n* EX `<= 10%` => less than 10% of all Activities have the same attribution",
|
||||
@@ -342,11 +372,6 @@
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
@@ -382,6 +407,26 @@
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"description": "An (array of) string/regular expression to test contents of an Author's profile description against\n\nIf no flags are specified then the **insensitive** flag is used by default\n\nIf using an array then if **any** value in the array passes the description test passes",
|
||||
"examples": [
|
||||
[
|
||||
"/test$/i",
|
||||
"look for this string literal"
|
||||
]
|
||||
]
|
||||
},
|
||||
"flairCssClass": {
|
||||
"description": "A list of (user) flair css class values from the subreddit to match against",
|
||||
"examples": [
|
||||
@@ -549,8 +594,6 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exclude",
|
||||
"include",
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
@@ -564,12 +607,22 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"age": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"approved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"depth": {
|
||||
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"distinguished": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -587,7 +640,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -845,6 +898,147 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ImageDetection": {
|
||||
"description": "When comparing submissions detect if the reference submission is an image and do a pixel-comparison to other detected image submissions.\n\n**Note:** This is an **experimental feature**",
|
||||
"properties": {
|
||||
"enable": {
|
||||
"description": "Is image detection enabled?",
|
||||
"type": "boolean"
|
||||
},
|
||||
"fetchBehavior": {
|
||||
"default": "extension",
|
||||
"description": "Determines how and when to check if a URL is an image\n\n**Note:** After fetching a URL the **Content-Type** is validated to contain `image` before detection occurs\n\n**When `extension`:** (default)\n\n* Only URLs that end in known image extensions (.png, .jpg, etc...) are fetched\n\n**When `unknown`:**\n\n* URLs that end in known image extensions (.png, .jpg, etc...) are fetched\n* URLs with no extension or unknown (IE non-video, non-doc, etc...) are fetched\n\n**When `all`:**\n\n* All submissions that have URLs (non-self) will be fetched, regardless of extension\n* **Note:** This can be bandwidth/CPU intensive if history window is large so use with care",
|
||||
"enum": [
|
||||
"all",
|
||||
"extension",
|
||||
"unknown"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"hash": {
|
||||
"description": "Use perceptual hashing (blockhash-js) to compare images\n\nPros:\n\n* very fast\n* low cpu/memory usage\n* results can be cached\n\nCons:\n\n* not as accurate as pixel comparison\n* weaker for text-heavy images\n* mostly color-blind\n\nBest uses:\n\n* Detecting (general) duplicate images\n* Comparing large number of images",
|
||||
"properties": {
|
||||
"bits": {
|
||||
"default": 32,
|
||||
"description": "Bit count determines accuracy of hash and granularity of hash comparison (comparison to other hashes)\n\nDefault is `32`\n\n**NOTE:** Hashes of different sizes (bits) cannot be compared. If you are caching results make sure all rules where results may be shared use the same bit count to ensure hashes can be compared. Otherwise hashes will be recomputed.",
|
||||
"type": "number"
|
||||
},
|
||||
"enable": {
|
||||
"default": true,
|
||||
"description": "Enabled by default.\n\nIf both `hash` and `pixel` are enabled then `pixel` will be used to verify image comparison when hashes matches",
|
||||
"type": "boolean"
|
||||
},
|
||||
"hardThreshold": {
|
||||
"description": "High Confidence Threshold\n\nIf the difference in comparison is equal to or less than this number the images are considered the same and pixel comparison WILL NOT occur\n\nDefaults to the parent-level `threshold` value if not present\n\nUse `null` if you want pixel comparison to ALWAYS occur (softThreshold must be present)",
|
||||
"type": [
|
||||
"null",
|
||||
"number"
|
||||
]
|
||||
},
|
||||
"softThreshold": {
|
||||
"description": "Low Confidence Threshold -- only used if `pixel` is enabled\n\nIf the difference in comparison is\n\n1) equal to or less than this value and\n2) the value is greater than `hardThreshold`\n\nthe images will be compared using the `pixel` method",
|
||||
"type": "number"
|
||||
},
|
||||
"ttl": {
|
||||
"description": "Number of seconds to cache image hash",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"pixel": {
|
||||
"description": "Use pixel counting to compare images\n\nPros:\n\n* most accurate\n* strong with text or color-only changes\n\nCons:\n\n* much slower than hashing\n* memory/cpu intensive\n\nBest uses:\n\n* Comparison text-only images\n* Comparison requires high degree of accuracy or changes are subtle",
|
||||
"properties": {
|
||||
"enable": {
|
||||
"default": false,
|
||||
"description": "Disabled by default.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"threshold": {
|
||||
"description": "The percentage, as a whole number, of pixels that are **different** between the two images at which point the images are not considered the same.",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"threshold": {
|
||||
"default": 5,
|
||||
"description": "The percentage, as a whole number, of difference between two images at which point they will not be considered the same.\n\nWill be used as `hash.hardThreshold` and `pixel.threshold` if those values are not specified\n\nDefault is `5`",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"OccurredAt": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"testOn": {
|
||||
"$ref": "#/definitions/TimeBasedSelector",
|
||||
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"condition",
|
||||
"testOn"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"OccurrenceTests": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"time": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RecentActivityRuleJSONConfig": {
|
||||
"description": "Checks a user's history for any Activity (Submission/Comment) in the subreddits specified in thresholds\n\nAvailable data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):\n\n```\nsummary => comma-deliminated list of subreddits that hit the threshold and their count EX subredditA(1), subredditB(4),...\nsubCount => Total number of subreddits that hit the threshold\ntotalCount => Total number of all activity occurrences in subreddits\n```",
|
||||
"properties": {
|
||||
@@ -867,6 +1061,10 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"imageDetection": {
|
||||
"$ref": "#/definitions/ImageDetection",
|
||||
"description": "When comparing submissions detect if the reference submission is an image and do a pixel-comparison to other detected image submissions.\n\n**Note:** This is an **experimental feature**"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -924,7 +1122,7 @@
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"window": {
|
||||
@@ -1340,6 +1538,329 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RepostCriteria": {
|
||||
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxExternalItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxRedditItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"occurredAt": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"criteria"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"occurrences": {
|
||||
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurrenceTests"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"searchOn": {
|
||||
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SearchFacetJSONConfig"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RepostRuleJSONConfig": {
|
||||
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"$ref": "#/definitions/AuthorOptions",
|
||||
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
|
||||
"examples": [
|
||||
{
|
||||
"include": [
|
||||
{
|
||||
"flairText": [
|
||||
"Contributor",
|
||||
"Veteran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"condition": {
|
||||
"default": "OR",
|
||||
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
|
||||
"examples": [
|
||||
{
|
||||
"matchThreshold": "> 3",
|
||||
"regex": "/reddit/"
|
||||
}
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/RepostCriteria"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind of rule to run",
|
||||
"enum": [
|
||||
"repost"
|
||||
],
|
||||
"examples": [
|
||||
"repost"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
|
||||
"examples": [
|
||||
"myNewRule"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchAndReplaceRegExp": {
|
||||
"properties": {
|
||||
"replace": {
|
||||
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
|
||||
"examples": [
|
||||
"find this string",
|
||||
"/some string*/ig"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replace",
|
||||
"search"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchFacetJSONConfig": {
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"kind": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SubmissionState": {
|
||||
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
|
||||
"examples": [
|
||||
@@ -1349,6 +1870,11 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"age": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"approved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -1384,7 +1910,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1417,6 +1943,9 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"isUserProfile": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -1447,6 +1976,15 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"TimeBasedSelector": {
|
||||
"enum": [
|
||||
"all",
|
||||
"any",
|
||||
"newest",
|
||||
"oldest"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"UserNoteCriteria": {
|
||||
"properties": {
|
||||
"count": {
|
||||
@@ -1521,6 +2059,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
|
||||
@@ -1,12 +1,22 @@
|
||||
import Snoowrap, {Comment, Subreddit} from "snoowrap";
|
||||
import Snoowrap, {Comment, Subreddit, WikiPage} from "snoowrap";
|
||||
import {Logger} from "winston";
|
||||
import {SubmissionCheck} from "../Check/SubmissionCheck";
|
||||
import {CommentCheck} from "../Check/CommentCheck";
|
||||
import {
|
||||
cacheStats,
|
||||
createHistoricalStatsDisplay,
|
||||
createRetryHandler,
|
||||
determineNewResults, findLastIndex, formatNumber,
|
||||
mergeArr, parseFromJsonOrYamlToObject, pollingInfo, resultsSummary, sleep, totalFromMapStats, triggeredIndicator,
|
||||
determineNewResults,
|
||||
findLastIndex,
|
||||
formatNumber, likelyJson5,
|
||||
mergeArr,
|
||||
parseFromJsonOrYamlToObject,
|
||||
parseRedditEntity,
|
||||
pollingInfo,
|
||||
resultsSummary,
|
||||
sleep,
|
||||
totalFromMapStats,
|
||||
triggeredIndicator,
|
||||
} from "../util";
|
||||
import {Poll} from "snoostorm";
|
||||
import pEvent from "p-event";
|
||||
@@ -17,7 +27,7 @@ import {
|
||||
ActionResult,
|
||||
DEFAULT_POLLING_INTERVAL,
|
||||
DEFAULT_POLLING_LIMIT, Invokee,
|
||||
ManagerOptions, ManagerStateChangeOption, PAUSED,
|
||||
ManagerOptions, ManagerStateChangeOption, ManagerStats, PAUSED,
|
||||
PollingOptionsStrong, ResourceStats, RUNNING, RunState, STOPPED, SYSTEM, USER
|
||||
} from "../Common/interfaces";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
@@ -39,6 +49,9 @@ import {JSONConfig} from "../JsonConfig";
|
||||
import {CheckStructuredJson} from "../Check";
|
||||
import NotificationManager from "../Notification/NotificationManager";
|
||||
import action from "../Web/Server/routes/authenticated/user/action";
|
||||
import {createHistoricalDefaults, historicalDefaults} from "../Common/defaults";
|
||||
import {ExtendedSnoowrap} from "../Utils/SnoowrapClients";
|
||||
import {isRateLimitError, isStatusError} from "../Utils/Errors";
|
||||
|
||||
export interface RunningState {
|
||||
state: RunState,
|
||||
@@ -50,6 +63,7 @@ export interface runCheckOptions {
|
||||
delayUntil?: number,
|
||||
dryRun?: boolean,
|
||||
refresh?: boolean,
|
||||
force?: boolean,
|
||||
}
|
||||
|
||||
export interface CheckTask {
|
||||
@@ -65,48 +79,15 @@ export interface RuntimeManagerOptions extends ManagerOptions {
|
||||
maxWorkers: number;
|
||||
}
|
||||
|
||||
export interface ManagerStats {
|
||||
eventsCheckedTotal: number
|
||||
eventsCheckedSinceStartTotal: number
|
||||
eventsAvg: number
|
||||
checksRunTotal: number
|
||||
checksRunSinceStartTotal: number
|
||||
checksTriggered: number
|
||||
checksTriggeredTotal: number
|
||||
checksTriggeredSinceStart: number
|
||||
checksTriggeredSinceStartTotal: number
|
||||
rulesRunTotal: number
|
||||
rulesRunSinceStartTotal: number
|
||||
rulesCachedTotal: number
|
||||
rulesCachedSinceStartTotal: number
|
||||
rulesTriggeredTotal: number
|
||||
rulesTriggeredSinceStartTotal: number
|
||||
rulesAvg: number
|
||||
actionsRun: number
|
||||
actionsRunTotal: number
|
||||
actionsRunSinceStart: number,
|
||||
actionsRunSinceStartTotal: number
|
||||
cache: {
|
||||
provider: string,
|
||||
currentKeyCount: number,
|
||||
isShared: boolean,
|
||||
totalRequests: number,
|
||||
totalMiss: number,
|
||||
missPercent: string,
|
||||
requestRate: number,
|
||||
types: ResourceStats
|
||||
},
|
||||
}
|
||||
|
||||
interface QueuedIdentifier {
|
||||
id: string,
|
||||
shouldRefresh: boolean
|
||||
state: 'queued' | 'processing'
|
||||
}
|
||||
|
||||
export class Manager {
|
||||
export class Manager extends EventEmitter {
|
||||
subreddit: Subreddit;
|
||||
client: Snoowrap;
|
||||
client: ExtendedSnoowrap;
|
||||
logger: Logger;
|
||||
botName: string;
|
||||
pollOptions: PollingOptionsStrong[] = [];
|
||||
@@ -116,6 +97,7 @@ export class Manager {
|
||||
wikiLocation: string;
|
||||
lastWikiRevision?: DayjsObj
|
||||
lastWikiCheck: DayjsObj = dayjs();
|
||||
wikiFormat: ('yaml' | 'json') = 'yaml';
|
||||
//wikiUpdateRunning: boolean = false;
|
||||
|
||||
streamListedOnce: string[] = [];
|
||||
@@ -125,7 +107,6 @@ export class Manager {
|
||||
sharedModqueue: boolean;
|
||||
cacheManager: BotResourcesManager;
|
||||
globalDryRun?: boolean;
|
||||
emitter: EventEmitter = new EventEmitter();
|
||||
queue: QueueObject<CheckTask>;
|
||||
// firehose is used to ensure all activities from different polling streams are unique
|
||||
// that is -- if the same activities is in both modqueue and unmoderated we don't want to process the activity twice or use stale data
|
||||
@@ -161,53 +142,27 @@ export class Manager {
|
||||
|
||||
notificationManager: NotificationManager;
|
||||
|
||||
modPermissions?: string[]
|
||||
|
||||
// use by api nanny to slow event consumption
|
||||
delayBy?: number;
|
||||
|
||||
eventsCheckedTotal: number = 0;
|
||||
eventsCheckedSinceStartTotal: number = 0;
|
||||
eventsSample: number[] = [];
|
||||
eventsSampleInterval: any;
|
||||
eventsRollingAvg: number = 0;
|
||||
checksRunTotal: number = 0;
|
||||
checksRunSinceStartTotal: number = 0;
|
||||
checksTriggered: Map<string, number> = new Map();
|
||||
checksTriggeredSinceStart: Map<string, number> = new Map();
|
||||
rulesRunTotal: number = 0;
|
||||
rulesRunSinceStartTotal: number = 0;
|
||||
rulesCachedTotal: number = 0;
|
||||
rulesCachedSinceStartTotal: number = 0;
|
||||
rulesTriggeredTotal: number = 0;
|
||||
rulesTriggeredSinceStartTotal: number = 0;
|
||||
rulesUniqueSample: number[] = [];
|
||||
rulesUniqueSampleInterval: any;
|
||||
rulesUniqueRollingAvg: number = 0;
|
||||
actionsRun: Map<string, number> = new Map();
|
||||
actionsRunSinceStart: Map<string, number> = new Map();
|
||||
actionedEvents: ActionedEvent[] = [];
|
||||
|
||||
getStats = async (): Promise<ManagerStats> => {
|
||||
const data: any = {
|
||||
eventsCheckedTotal: this.eventsCheckedTotal,
|
||||
eventsCheckedSinceStartTotal: this.eventsCheckedSinceStartTotal,
|
||||
eventsAvg: formatNumber(this.eventsRollingAvg),
|
||||
checksRunTotal: this.checksRunTotal,
|
||||
checksRunSinceStartTotal: this.checksRunSinceStartTotal,
|
||||
checksTriggered: this.checksTriggered,
|
||||
checksTriggeredTotal: totalFromMapStats(this.checksTriggered),
|
||||
checksTriggeredSinceStart: this.checksTriggeredSinceStart,
|
||||
checksTriggeredSinceStartTotal: totalFromMapStats(this.checksTriggeredSinceStart),
|
||||
rulesRunTotal: this.rulesRunTotal,
|
||||
rulesRunSinceStartTotal: this.rulesRunSinceStartTotal,
|
||||
rulesCachedTotal: this.rulesCachedTotal,
|
||||
rulesCachedSinceStartTotal: this.rulesCachedSinceStartTotal,
|
||||
rulesTriggeredTotal: this.rulesTriggeredTotal,
|
||||
rulesTriggeredSinceStartTotal: this.rulesTriggeredSinceStartTotal,
|
||||
rulesAvg: formatNumber(this.rulesUniqueRollingAvg),
|
||||
actionsRun: this.actionsRun,
|
||||
actionsRunTotal: totalFromMapStats(this.actionsRun),
|
||||
actionsRunSinceStart: this.actionsRunSinceStart,
|
||||
actionsRunSinceStartTotal: totalFromMapStats(this.actionsRunSinceStart),
|
||||
historical: {
|
||||
lastReload: createHistoricalStatsDisplay(createHistoricalDefaults()),
|
||||
allTime: createHistoricalStatsDisplay(createHistoricalDefaults()),
|
||||
},
|
||||
cache: {
|
||||
provider: 'none',
|
||||
currentKeyCount: 0,
|
||||
@@ -223,6 +178,7 @@ export class Manager {
|
||||
if (this.resources !== undefined) {
|
||||
const resStats = await this.resources.getStats();
|
||||
|
||||
data.historical = this.resources.getHistoricalDisplayStats();
|
||||
data.cache = resStats.cache;
|
||||
data.cache.currentKeyCount = await this.resources.getCacheKeyCount();
|
||||
data.cache.isShared = this.resources.cacheSettingsHash === 'default';
|
||||
@@ -239,7 +195,9 @@ export class Manager {
|
||||
return this.displayLabel;
|
||||
}
|
||||
|
||||
constructor(sub: Subreddit, client: Snoowrap, logger: Logger, cacheManager: BotResourcesManager, opts: RuntimeManagerOptions = {botName: 'ContextMod', maxWorkers: 1}) {
|
||||
constructor(sub: Subreddit, client: ExtendedSnoowrap, logger: Logger, cacheManager: BotResourcesManager, opts: RuntimeManagerOptions = {botName: 'ContextMod', maxWorkers: 1}) {
|
||||
super();
|
||||
|
||||
const {dryRun, sharedModqueue = false, wikiLocation = 'botconfig/contextbot', botName, maxWorkers} = opts;
|
||||
this.displayLabel = opts.nickname || `${sub.display_name_prefixed}`;
|
||||
const getLabels = this.getCurrentLabels;
|
||||
@@ -270,8 +228,9 @@ export class Manager {
|
||||
|
||||
this.eventsSampleInterval = setInterval((function(self) {
|
||||
return function() {
|
||||
const et = self.resources !== undefined ? self.resources.stats.historical.allTime.eventsCheckedTotal : 0;
|
||||
const rollingSample = self.eventsSample.slice(0, 7)
|
||||
rollingSample.unshift(self.eventsCheckedTotal)
|
||||
rollingSample.unshift(et)
|
||||
self.eventsSample = rollingSample;
|
||||
const diff = self.eventsSample.reduceRight((acc: number[], curr, index) => {
|
||||
if(self.eventsSample[index + 1] !== undefined) {
|
||||
@@ -291,7 +250,8 @@ export class Manager {
|
||||
this.rulesUniqueSampleInterval = setInterval((function(self) {
|
||||
return function() {
|
||||
const rollingSample = self.rulesUniqueSample.slice(0, 7)
|
||||
rollingSample.unshift(self.rulesRunTotal - self.rulesCachedTotal);
|
||||
const rt = self.resources !== undefined ? self.resources.stats.historical.allTime.rulesRunTotal - self.resources.stats.historical.allTime.rulesCachedTotal : 0;
|
||||
rollingSample.unshift(rt);
|
||||
self.rulesUniqueSample = rollingSample;
|
||||
const diff = self.rulesUniqueSample.reduceRight((acc: number[], curr, index) => {
|
||||
if(self.rulesUniqueSample[index + 1] !== undefined) {
|
||||
@@ -309,6 +269,18 @@ export class Manager {
|
||||
})(this), 10000);
|
||||
}
|
||||
|
||||
protected async getModPermissions(): Promise<string[]> {
|
||||
if(this.modPermissions !== undefined) {
|
||||
return this.modPermissions as string[];
|
||||
}
|
||||
this.logger.debug('Retrieving mod permissions for bot');
|
||||
const userInfo = parseRedditEntity(this.botName, 'user');
|
||||
const mods = this.subreddit.getModerators({name: userInfo.name});
|
||||
// @ts-ignore
|
||||
this.modPermissions = mods[0].mod_permissions;
|
||||
return this.modPermissions as string[];
|
||||
}
|
||||
|
||||
protected getMaxWorkers(subMaxWorkers?: number) {
|
||||
let maxWorkers = this.globalMaxWorkers;
|
||||
|
||||
@@ -387,7 +359,7 @@ export class Manager {
|
||||
return q;
|
||||
}
|
||||
|
||||
protected parseConfigurationFromObject(configObj: object) {
|
||||
protected async parseConfigurationFromObject(configObj: object) {
|
||||
try {
|
||||
const configBuilder = new ConfigBuilder({logger: this.logger});
|
||||
const validJson = configBuilder.validateJson(configObj);
|
||||
@@ -395,6 +367,7 @@ export class Manager {
|
||||
const {
|
||||
polling = [{pollOn: 'unmoderated', limit: DEFAULT_POLLING_LIMIT, interval: DEFAULT_POLLING_INTERVAL}],
|
||||
caching,
|
||||
credentials,
|
||||
dryRun,
|
||||
footer,
|
||||
nickname,
|
||||
@@ -435,9 +408,10 @@ export class Manager {
|
||||
logger: this.logger,
|
||||
subreddit: this.subreddit,
|
||||
caching,
|
||||
credentials,
|
||||
client: this.client,
|
||||
};
|
||||
this.resources = this.cacheManager.set(this.subreddit.display_name, resourceConfig);
|
||||
this.resources = await this.cacheManager.set(this.subreddit.display_name, resourceConfig);
|
||||
this.resources.setLogger(this.logger);
|
||||
|
||||
this.logger.info('Subreddit-specific options updated');
|
||||
@@ -446,6 +420,10 @@ export class Manager {
|
||||
const commentChecks: Array<CommentCheck> = [];
|
||||
const subChecks: Array<SubmissionCheck> = [];
|
||||
const structuredChecks = configBuilder.parseToStructured(validJson);
|
||||
|
||||
// TODO check that bot has permissions for subreddit for all specified actions
|
||||
// can find permissions in this.subreddit.mod_permissions
|
||||
|
||||
for (const jCheck of structuredChecks) {
|
||||
const checkConfig = {
|
||||
...jCheck,
|
||||
@@ -471,7 +449,7 @@ export class Manager {
|
||||
this.logger.info(checkSummary);
|
||||
}
|
||||
this.validConfigLoaded = true;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.validConfigLoaded = false;
|
||||
throw err;
|
||||
}
|
||||
@@ -484,9 +462,45 @@ export class Manager {
|
||||
|
||||
try {
|
||||
let sourceData: string;
|
||||
let wiki: WikiPage;
|
||||
try {
|
||||
// @ts-ignore
|
||||
const wiki = await this.subreddit.getWikiPage(this.wikiLocation).fetch();
|
||||
try {
|
||||
// @ts-ignore
|
||||
wiki = await this.subreddit.getWikiPage(this.wikiLocation).fetch();
|
||||
} catch (err: any) {
|
||||
if(isStatusError(err) && err.statusCode === 404) {
|
||||
// see if we can create the page
|
||||
if (!this.client.scope.includes('wikiedit')) {
|
||||
throw new Error(`Page does not exist and could not be created because Bot does not have oauth permission 'wikiedit'`);
|
||||
}
|
||||
const modPermissions = await this.getModPermissions();
|
||||
if (!modPermissions.includes('all') && !modPermissions.includes('wiki')) {
|
||||
throw new Error(`Page does not exist and could not be created because Bot not have mod permissions for creating wiki pages. Must have 'all' or 'wiki'`);
|
||||
}
|
||||
if(!this.client.scope.includes('modwiki')) {
|
||||
throw new Error(`Bot COULD create wiki config page but WILL NOT because it does not have the oauth permissions 'modwiki' which is required to set page visibility and editing permissions. Safety first!`);
|
||||
}
|
||||
// @ts-ignore
|
||||
wiki = await this.subreddit.getWikiPage(this.wikiLocation).edit({
|
||||
text: '',
|
||||
reason: 'Empty configuration created for ContextMod'
|
||||
});
|
||||
this.logger.info(`Wiki page at ${this.wikiLocation} did not exist, but bot created it!`);
|
||||
|
||||
// 0 = use subreddit wiki permissions
|
||||
// 1 = only approved wiki contributors
|
||||
// 2 = only mods may edit and view
|
||||
// @ts-ignore
|
||||
await this.subreddit.getWikiPage(this.wikiLocation).editSettings({
|
||||
permissionLevel: 2,
|
||||
// don't list this page on r/[subreddit]/wiki/pages
|
||||
listed: false,
|
||||
});
|
||||
this.logger.info('Bot set wiki page visibility to MODS ONLY');
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
const revisionDate = dayjs.unix(wiki.revision_date);
|
||||
if (!force && this.validConfigLoaded && (this.lastWikiRevision !== undefined && this.lastWikiRevision.isSame(revisionDate))) {
|
||||
// nothing to do, we already have this revision
|
||||
@@ -512,27 +526,45 @@ export class Manager {
|
||||
|
||||
this.lastWikiRevision = revisionDate;
|
||||
sourceData = await wiki.content_md;
|
||||
} catch (err) {
|
||||
const msg = `Could not read wiki configuration. Please ensure the page https://reddit.com${this.subreddit.url}wiki/${this.wikiLocation} exists and is readable -- error: ${err.message}`;
|
||||
} catch (err: any) {
|
||||
let hint = '';
|
||||
if(isStatusError(err) && err.statusCode === 403) {
|
||||
hint = `\r\nHINT: Either the page is restricted to mods only and the bot's reddit account does have the mod permission 'all' or 'wiki' OR the bot does not have the 'wikiread' oauth permission`;
|
||||
}
|
||||
const msg = `Could not read wiki configuration. Please ensure the page https://reddit.com${this.subreddit.url}wiki/${this.wikiLocation} exists and is readable${hint} -- error: ${err.message}`;
|
||||
this.logger.error(msg);
|
||||
throw new ConfigParseError(msg);
|
||||
}
|
||||
|
||||
if (sourceData === '') {
|
||||
if (sourceData.replace('\r\n', '').trim() === '') {
|
||||
this.logger.error(`Wiki page contents was empty`);
|
||||
throw new ConfigParseError('Wiki page contents was empty');
|
||||
}
|
||||
|
||||
const [configObj, jsonErr, yamlErr] = parseFromJsonOrYamlToObject(sourceData);
|
||||
if (jsonErr === undefined) {
|
||||
this.wikiFormat = 'json';
|
||||
} else if (yamlErr === undefined) {
|
||||
this.wikiFormat = 'yaml';
|
||||
} else {
|
||||
this.wikiFormat = likelyJson5(sourceData) ? 'json' : 'yaml';
|
||||
}
|
||||
|
||||
if (configObj === undefined) {
|
||||
this.logger.error(`Could not parse wiki page contents as JSON or YAML:`);
|
||||
this.logger.error(jsonErr);
|
||||
this.logger.error(yamlErr);
|
||||
this.logger.error(`Could not parse wiki page contents as JSON or YAML. Looks like it should be ${this.wikiFormat}?`);
|
||||
if (this.wikiFormat === 'json') {
|
||||
this.logger.error(jsonErr);
|
||||
this.logger.error('Check DEBUG output for yaml error');
|
||||
this.logger.debug(yamlErr);
|
||||
} else {
|
||||
this.logger.error(yamlErr);
|
||||
this.logger.error('Check DEBUG output for json error');
|
||||
this.logger.debug(jsonErr);
|
||||
}
|
||||
throw new ConfigParseError('Could not parse wiki page contents as JSON or YAML')
|
||||
}
|
||||
|
||||
this.parseConfigurationFromObject(configObj);
|
||||
await this.parseConfigurationFromObject(configObj);
|
||||
this.logger.info('Checks updated');
|
||||
|
||||
if(!suppressNotification) {
|
||||
@@ -540,7 +572,7 @@ export class Manager {
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.validConfigLoaded = false;
|
||||
throw err;
|
||||
}
|
||||
@@ -549,9 +581,19 @@ export class Manager {
|
||||
async runChecks(checkType: ('Comment' | 'Submission'), activity: (Submission | Comment), options?: runCheckOptions): Promise<void> {
|
||||
const checks = checkType === 'Comment' ? this.commentChecks : this.submissionChecks;
|
||||
let item = activity;
|
||||
this.eventsCheckedTotal++;
|
||||
this.eventsCheckedSinceStartTotal++;
|
||||
const itemId = await item.id;
|
||||
|
||||
if(await this.resources.hasRecentSelf(item)) {
|
||||
const {force = false} = options || {};
|
||||
let recentMsg = `Found in Activities recently (last ${this.resources.selfTTL} seconds) modified/created by this bot`;
|
||||
if(force) {
|
||||
this.logger.debug(`${recentMsg} but will run anyway because "force" option was true.`);
|
||||
} else {
|
||||
this.logger.debug(`${recentMsg} so will skip running.`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let allRuleResults: RuleResult[] = [];
|
||||
const itemIdentifier = `${checkType === 'Submission' ? 'SUB' : 'COM'} ${itemId}`;
|
||||
this.currentLabels = [itemIdentifier];
|
||||
@@ -560,50 +602,10 @@ export class Manager {
|
||||
const [peek, _] = await itemContentPeek(item);
|
||||
ePeek = peek;
|
||||
this.logger.info(`<EVENT> ${peek}`);
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error(`Error occurred while generate item peek for ${checkType} Activity ${itemId}`, err);
|
||||
}
|
||||
|
||||
const {
|
||||
checkNames = [],
|
||||
delayUntil,
|
||||
dryRun,
|
||||
refresh = false,
|
||||
} = options || {};
|
||||
|
||||
let wasRefreshed = false;
|
||||
|
||||
if (delayUntil !== undefined) {
|
||||
const created = dayjs.unix(item.created_utc);
|
||||
const diff = dayjs().diff(created, 's');
|
||||
if (diff < delayUntil) {
|
||||
this.logger.verbose(`Delaying processing until Activity is ${delayUntil} seconds old (${delayUntil - diff}s)`);
|
||||
await sleep(delayUntil - diff);
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
wasRefreshed = true;
|
||||
}
|
||||
}
|
||||
// refresh signal from firehose if activity was ingested multiple times before processing or re-queued while processing
|
||||
// want to make sure we have the most recent data
|
||||
if(!wasRefreshed && refresh === true) {
|
||||
this.logger.verbose('Refreshed data (probably due to signal from firehose)');
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
}
|
||||
|
||||
const startingApiLimit = this.client.ratelimitRemaining;
|
||||
|
||||
if (item instanceof Submission) {
|
||||
if (await item.removed_by_category === 'deleted') {
|
||||
this.logger.warn('Submission was deleted, cannot process.');
|
||||
return;
|
||||
}
|
||||
} else if (item.author.name === '[deleted]') {
|
||||
this.logger.warn('Comment was deleted, cannot process.');
|
||||
return;
|
||||
}
|
||||
|
||||
let checksRun = 0;
|
||||
let actionsRun = 0;
|
||||
let totalRulesRun = 0;
|
||||
@@ -622,8 +624,51 @@ export class Manager {
|
||||
actionResults: [],
|
||||
}
|
||||
let triggered = false;
|
||||
let triggeredCheckName;
|
||||
const checksRunNames = [];
|
||||
const cachedCheckNames = [];
|
||||
const startingApiLimit = this.client.ratelimitRemaining;
|
||||
|
||||
const {
|
||||
checkNames = [],
|
||||
delayUntil,
|
||||
dryRun,
|
||||
refresh = false,
|
||||
} = options || {};
|
||||
|
||||
let wasRefreshed = false;
|
||||
|
||||
try {
|
||||
|
||||
if (delayUntil !== undefined) {
|
||||
const created = dayjs.unix(item.created_utc);
|
||||
const diff = dayjs().diff(created, 's');
|
||||
if (diff < delayUntil) {
|
||||
this.logger.verbose(`Delaying processing until Activity is ${delayUntil} seconds old (${delayUntil - diff}s)`);
|
||||
await sleep(delayUntil - diff);
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
wasRefreshed = true;
|
||||
}
|
||||
}
|
||||
// refresh signal from firehose if activity was ingested multiple times before processing or re-queued while processing
|
||||
// want to make sure we have the most recent data
|
||||
if(!wasRefreshed && refresh === true) {
|
||||
this.logger.verbose('Refreshed data (probably due to signal from firehose)');
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
}
|
||||
|
||||
if (item instanceof Submission) {
|
||||
if (await item.removed_by_category === 'deleted') {
|
||||
this.logger.warn('Submission was deleted, cannot process.');
|
||||
return;
|
||||
}
|
||||
} else if (item.author.name === '[deleted]') {
|
||||
this.logger.warn('Comment was deleted, cannot process.');
|
||||
return;
|
||||
}
|
||||
|
||||
for (const check of checks) {
|
||||
if (checkNames.length > 0 && !checkNames.map(x => x.toLowerCase()).some(x => x === check.name.toLowerCase())) {
|
||||
this.logger.warn(`Check ${check.name} not in array of requested checks to run, skipping...`);
|
||||
@@ -633,6 +678,7 @@ export class Manager {
|
||||
this.logger.info(`Check ${check.name} not run because it is not enabled, skipping...`);
|
||||
continue;
|
||||
}
|
||||
checksRunNames.push(check.name);
|
||||
checksRun++;
|
||||
triggered = false;
|
||||
let isFromCache = false;
|
||||
@@ -642,6 +688,8 @@ export class Manager {
|
||||
isFromCache = fromCache;
|
||||
if(!fromCache) {
|
||||
await check.setCacheResult(item, {result: checkTriggered, ruleResults: checkResults});
|
||||
} else {
|
||||
cachedCheckNames.push(check.name);
|
||||
}
|
||||
currentResults = checkResults;
|
||||
totalRulesRun += checkResults.length;
|
||||
@@ -651,13 +699,15 @@ export class Manager {
|
||||
this.logger.info('Check was triggered but cache result options specified NOT to run actions...counting as check NOT triggered');
|
||||
triggered = false;
|
||||
}
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (e.logged !== true) {
|
||||
this.logger.warn(`Running rules for Check ${check.name} failed due to uncaught exception`, e);
|
||||
}
|
||||
this.emit('error', e);
|
||||
}
|
||||
|
||||
if (triggered) {
|
||||
triggeredCheckName = check.name;
|
||||
actionedEvent.check = check.name;
|
||||
actionedEvent.ruleResults = currentResults;
|
||||
if(isFromCache) {
|
||||
@@ -665,9 +715,12 @@ export class Manager {
|
||||
} else {
|
||||
actionedEvent.ruleSummary = resultsSummary(currentResults, check.condition);
|
||||
}
|
||||
this.checksTriggered.set(check.name, (this.checksTriggered.get(check.name) || 0) + 1);
|
||||
this.checksTriggeredSinceStart.set(check.name, (this.checksTriggeredSinceStart.get(check.name) || 0) + 1);
|
||||
runActions = await check.runActions(item, currentResults.filter(x => x.triggered), dryRun);
|
||||
// we only can about report and comment actions since those can produce items for newComm and modqueue
|
||||
const recentCandidates = runActions.filter(x => ['report','comment'].includes(x.kind.toLocaleLowerCase())).map(x => x.touchedEntities === undefined ? [] : x.touchedEntities).flat();
|
||||
for(const recent of recentCandidates) {
|
||||
await this.resources.setRecentSelf(recent as (Submission|Comment));
|
||||
}
|
||||
actionsRun = runActions.length;
|
||||
|
||||
if(check.notifyOnTrigger) {
|
||||
@@ -682,29 +735,13 @@ export class Manager {
|
||||
this.logger.info('No checks triggered');
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (!(err instanceof LoggedError) && err.logged !== true) {
|
||||
this.logger.error('An unhandled error occurred while running checks', err);
|
||||
}
|
||||
this.emit('error', err);
|
||||
} finally {
|
||||
try {
|
||||
const cachedTotal = totalRulesRun - allRuleResults.length;
|
||||
const triggeredRulesTotal = allRuleResults.filter(x => x.triggered).length;
|
||||
|
||||
this.checksRunTotal += checksRun;
|
||||
this.checksRunSinceStartTotal += checksRun;
|
||||
this.rulesRunTotal += totalRulesRun;
|
||||
this.rulesRunSinceStartTotal += totalRulesRun;
|
||||
this.rulesCachedTotal += cachedTotal;
|
||||
this.rulesCachedSinceStartTotal += cachedTotal;
|
||||
this.rulesTriggeredTotal += triggeredRulesTotal;
|
||||
this.rulesTriggeredSinceStartTotal += triggeredRulesTotal;
|
||||
|
||||
for (const a of runActions) {
|
||||
const name = a.name;
|
||||
this.actionsRun.set(name, (this.actionsRun.get(name) || 0) + 1);
|
||||
this.actionsRunSinceStart.set(name, (this.actionsRunSinceStart.get(name) || 0) + 1);
|
||||
}
|
||||
actionedEvent.actionResults = runActions;
|
||||
if(triggered) {
|
||||
await this.resources.addActionedEvent(actionedEvent);
|
||||
@@ -713,8 +750,20 @@ export class Manager {
|
||||
this.logger.verbose(`Run Stats: Checks ${checksRun} | Rules => Total: ${totalRulesRun} Unique: ${allRuleResults.length} Cached: ${totalRulesRun - allRuleResults.length} Rolling Avg: ~${formatNumber(this.rulesUniqueRollingAvg)}/s | Actions ${actionsRun}`);
|
||||
this.logger.verbose(`Reddit API Stats: Initial ${startingApiLimit} | Current ${this.client.ratelimitRemaining} | Used ~${startingApiLimit - this.client.ratelimitRemaining} | Events ~${formatNumber(this.eventsRollingAvg)}/s`);
|
||||
this.currentLabels = [];
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error occurred while cleaning up Activity check and generating stats', err);
|
||||
} finally {
|
||||
this.resources.updateHistoricalStats({
|
||||
eventsCheckedTotal: 1,
|
||||
eventsActionedTotal: triggered ? 1 : 0,
|
||||
checksTriggered: triggeredCheckName !== undefined ? [triggeredCheckName] : [],
|
||||
checksRun: checksRunNames,
|
||||
checksFromCache: cachedCheckNames,
|
||||
actionsRun: runActions.map(x => x.name),
|
||||
rulesRun: allRuleResults.map(x => x.name),
|
||||
rulesTriggered: allRuleResults.filter(x => x.triggered).map(x => x.name),
|
||||
rulesCachedTotal: totalRulesRun - allRuleResults.length,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -723,7 +772,7 @@ export class Manager {
|
||||
// give current handle() time to stop
|
||||
//await sleep(1000);
|
||||
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 5, maxOtherRetry: 1}, this.logger);
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 3, maxOtherRetry: 1}, this.logger);
|
||||
|
||||
const subName = this.subreddit.display_name;
|
||||
|
||||
@@ -732,7 +781,8 @@ export class Manager {
|
||||
pollOn,
|
||||
limit,
|
||||
interval,
|
||||
delayUntil
|
||||
delayUntil,
|
||||
clearProcessed,
|
||||
} = pollOpt;
|
||||
let stream: SPoll<Snoowrap.Submission | Snoowrap.Comment>;
|
||||
let modStreamType: string | undefined;
|
||||
@@ -748,6 +798,7 @@ export class Manager {
|
||||
subreddit: this.subreddit.display_name,
|
||||
limit: limit,
|
||||
pollTime: interval * 1000,
|
||||
clearProcessed,
|
||||
});
|
||||
}
|
||||
break;
|
||||
@@ -761,6 +812,7 @@ export class Manager {
|
||||
subreddit: this.subreddit.display_name,
|
||||
limit: limit,
|
||||
pollTime: interval * 1000,
|
||||
clearProcessed
|
||||
});
|
||||
}
|
||||
break;
|
||||
@@ -769,6 +821,7 @@ export class Manager {
|
||||
subreddit: this.subreddit.display_name,
|
||||
limit: limit,
|
||||
pollTime: interval * 1000,
|
||||
clearProcessed
|
||||
});
|
||||
break;
|
||||
case 'newComm':
|
||||
@@ -776,6 +829,7 @@ export class Manager {
|
||||
subreddit: this.subreddit.display_name,
|
||||
limit: limit,
|
||||
pollTime: interval * 1000,
|
||||
clearProcessed
|
||||
});
|
||||
break;
|
||||
}
|
||||
@@ -813,21 +867,26 @@ export class Manager {
|
||||
}
|
||||
};
|
||||
|
||||
stream.on('item', onItem);
|
||||
|
||||
if (modStreamType !== undefined) {
|
||||
this.modStreamCallbacks.set(pollOn, onItem);
|
||||
} else {
|
||||
stream.on('item', onItem);
|
||||
// @ts-ignore
|
||||
stream.on('error', async (err: any) => {
|
||||
|
||||
this.emit('error', err);
|
||||
|
||||
if(isRateLimitError(err)) {
|
||||
this.logger.error('Encountered rate limit while polling! Bot is all out of requests :( Stopping subreddit queue and polling.');
|
||||
await this.stop();
|
||||
}
|
||||
this.logger.error('Polling error occurred', err);
|
||||
const shouldRetry = await retryHandler(err);
|
||||
if (shouldRetry) {
|
||||
stream.startInterval();
|
||||
} else {
|
||||
this.logger.warn('Pausing event polling due to too many errors');
|
||||
await this.pauseEvents();
|
||||
this.logger.warn('Stopping subreddit processing/polling due to too many errors');
|
||||
await this.stop();
|
||||
}
|
||||
});
|
||||
this.streams.push(stream);
|
||||
@@ -911,10 +970,19 @@ export class Manager {
|
||||
} else {
|
||||
const pauseWaitStart = dayjs();
|
||||
this.logger.info(`Activity processing queue is stopping...waiting for ${this.queue.running()} activities to finish processing`);
|
||||
const fullStopTime = dayjs().add(5, 'seconds');
|
||||
let gracefulStop = true;
|
||||
while (this.queue.running() > 0) {
|
||||
gracefulStop = false;
|
||||
if(dayjs().isAfter(fullStopTime)) {
|
||||
break;
|
||||
}
|
||||
await sleep(1500);
|
||||
this.logger.verbose(`Activity processing queue is stopping...waiting for ${this.queue.running()} activities to finish processing`);
|
||||
}
|
||||
if(!gracefulStop) {
|
||||
this.logger.warn('Waited longer than 5 seconds to stop activities. Something isn\'t right so forcing stop :/ ');
|
||||
}
|
||||
this.logger.info(`Activity processing queue stopped by ${causedBy} and ${this.queue.length()} queued activities cleared (waited ${dayjs().diff(pauseWaitStart, 's')} seconds while activity processing finished)`);
|
||||
this.firehose.kill();
|
||||
this.queue.kill();
|
||||
@@ -997,18 +1065,12 @@ export class Manager {
|
||||
s.end();
|
||||
}
|
||||
this.streams = [];
|
||||
for (const [k, v] of this.modStreamCallbacks) {
|
||||
const stream = this.cacheManager.modStreams.get(k) as Poll<Snoowrap.Submission | Snoowrap.Comment>;
|
||||
stream.removeListener('item', v);
|
||||
}
|
||||
// for (const [k, v] of this.modStreamCallbacks) {
|
||||
// const stream = this.cacheManager.modStreams.get(k) as Poll<Snoowrap.Submission | Snoowrap.Comment>;
|
||||
// stream.removeListener('item', v);
|
||||
// }
|
||||
this.modStreamCallbacks = new Map();
|
||||
this.startedAt = undefined;
|
||||
this.eventsCheckedSinceStartTotal = 0;
|
||||
this.checksRunSinceStartTotal = 0;
|
||||
this.rulesRunSinceStartTotal = 0;
|
||||
this.rulesCachedSinceStartTotal = 0;
|
||||
this.rulesTriggeredSinceStartTotal = 0;
|
||||
this.checksTriggeredSinceStart = new Map();
|
||||
this.actionsRunSinceStart = new Map();
|
||||
this.logger.info(`Events STOPPED by ${causedBy}`);
|
||||
this.eventsState = {
|
||||
state: STOPPED,
|
||||
|
||||
@@ -2,47 +2,85 @@ import {Poll, SnooStormOptions} from "snoostorm"
|
||||
import Snoowrap from "snoowrap";
|
||||
import {EventEmitter} from "events";
|
||||
import {PollConfiguration} from "snoostorm/out/util/Poll";
|
||||
import {DEFAULT_POLLING_INTERVAL} from "../Common/interfaces";
|
||||
import {ClearProcessedOptions, DEFAULT_POLLING_INTERVAL} from "../Common/interfaces";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import { Duration } from "dayjs/plugin/duration";
|
||||
import {parseDuration, random} from "../util";
|
||||
|
||||
type Awaitable<T> = Promise<T> | T;
|
||||
|
||||
interface RCBPollingOptions extends SnooStormOptions {
|
||||
subreddit: string,
|
||||
clearProcessed?: ClearProcessedOptions
|
||||
}
|
||||
|
||||
interface RCBPollConfiguration<T> extends PollConfiguration<T> {
|
||||
clearProcessed?: ClearProcessedOptions
|
||||
}
|
||||
|
||||
export class SPoll<T extends object> extends Poll<T> {
|
||||
identifier: keyof T;
|
||||
getter: () => Awaitable<T[]>;
|
||||
frequency;
|
||||
running: boolean = false;
|
||||
clearProcessedDuration?: Duration;
|
||||
clearProcessedSize?: number;
|
||||
clearProcessedAfter?: Dayjs;
|
||||
retainProcessed: number = 0;
|
||||
randInterval?: { clear: () => void };
|
||||
|
||||
constructor(options: PollConfiguration<T>) {
|
||||
constructor(options: RCBPollConfiguration<T>) {
|
||||
super(options);
|
||||
this.identifier = options.identifier;
|
||||
this.getter = options.get;
|
||||
this.frequency = options.frequency;
|
||||
const {
|
||||
after,
|
||||
size,
|
||||
retain = 0,
|
||||
} = options.clearProcessed || {};
|
||||
if(after !== undefined) {
|
||||
this.clearProcessedDuration = parseDuration(after);
|
||||
}
|
||||
this.clearProcessedSize = size;
|
||||
this.retainProcessed = retain;
|
||||
if (this.clearProcessedDuration !== undefined) {
|
||||
this.clearProcessedAfter = dayjs().add(this.clearProcessedDuration.asSeconds(), 's');
|
||||
}
|
||||
clearInterval(this.interval);
|
||||
}
|
||||
|
||||
createInterval = () => {
|
||||
this.interval = setTimeout((function (self) {
|
||||
return async () => {
|
||||
try {
|
||||
const batch = await self.getter();
|
||||
const newItems: T[] = [];
|
||||
for (const item of batch) {
|
||||
const id = item[self.identifier];
|
||||
if (self.processed.has(id)) continue;
|
||||
|
||||
// Emit for new items and add it to the list
|
||||
newItems.push(item);
|
||||
self.processed.add(id);
|
||||
self.emit("item", item);
|
||||
}
|
||||
|
||||
// Emit the new listing of all new items
|
||||
self.emit("listing", newItems);
|
||||
|
||||
// if everything succeeded then create a new timeout
|
||||
self.createInterval();
|
||||
} catch (err: any) {
|
||||
self.emit('error', err);
|
||||
}
|
||||
}
|
||||
})(this), random(this.frequency - 1, this.frequency + 1));
|
||||
}
|
||||
|
||||
startInterval = () => {
|
||||
this.running = true;
|
||||
this.interval = setInterval(async () => {
|
||||
try {
|
||||
const batch = await this.getter();
|
||||
const newItems: T[] = [];
|
||||
for (const item of batch) {
|
||||
const id = item[this.identifier];
|
||||
if (this.processed.has(id)) continue;
|
||||
|
||||
// Emit for new items and add it to the list
|
||||
newItems.push(item);
|
||||
this.processed.add(id);
|
||||
this.emit("item", item);
|
||||
}
|
||||
|
||||
// Emit the new listing of all new items
|
||||
this.emit("listing", newItems);
|
||||
} catch (err) {
|
||||
this.emit('error', err);
|
||||
this.end();
|
||||
}
|
||||
}, this.frequency);
|
||||
this.createInterval();
|
||||
}
|
||||
|
||||
end = () => {
|
||||
@@ -54,11 +92,12 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
export class UnmoderatedStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment> {
|
||||
constructor(
|
||||
client: Snoowrap,
|
||||
options: SnooStormOptions & { subreddit: string }) {
|
||||
options: RCBPollingOptions) {
|
||||
super({
|
||||
frequency: options.pollTime || DEFAULT_POLLING_INTERVAL * 1000,
|
||||
get: async () => client.getSubreddit(options.subreddit).getUnmoderated(options),
|
||||
identifier: "id",
|
||||
clearProcessed: options.clearProcessed
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -66,11 +105,12 @@ export class UnmoderatedStream extends SPoll<Snoowrap.Submission | Snoowrap.Comm
|
||||
export class ModQueueStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment> {
|
||||
constructor(
|
||||
client: Snoowrap,
|
||||
options: SnooStormOptions & { subreddit: string }) {
|
||||
options: RCBPollingOptions) {
|
||||
super({
|
||||
frequency: options.pollTime || DEFAULT_POLLING_INTERVAL * 1000,
|
||||
get: async () => client.getSubreddit(options.subreddit).getModqueue(options),
|
||||
identifier: "id",
|
||||
clearProcessed: options.clearProcessed
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -78,11 +118,12 @@ export class ModQueueStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment
|
||||
export class SubmissionStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment> {
|
||||
constructor(
|
||||
client: Snoowrap,
|
||||
options: SnooStormOptions & { subreddit: string }) {
|
||||
options: RCBPollingOptions) {
|
||||
super({
|
||||
frequency: options.pollTime || DEFAULT_POLLING_INTERVAL * 1000,
|
||||
get: async () => client.getNew(options.subreddit, options),
|
||||
identifier: "id",
|
||||
clearProcessed: options.clearProcessed
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -90,11 +131,12 @@ export class SubmissionStream extends SPoll<Snoowrap.Submission | Snoowrap.Comme
|
||||
export class CommentStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment> {
|
||||
constructor(
|
||||
client: Snoowrap,
|
||||
options: SnooStormOptions & { subreddit: string }) {
|
||||
options: RCBPollingOptions) {
|
||||
super({
|
||||
frequency: options.pollTime || DEFAULT_POLLING_INTERVAL * 1000,
|
||||
get: async () => client.getNewComments(options.subreddit, options),
|
||||
identifier: "id",
|
||||
clearProcessed: options.clearProcessed
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import Snoowrap, {RedditUser, Subreddit} from "snoowrap";
|
||||
import Snoowrap, {RedditUser} from "snoowrap";
|
||||
import objectHash from 'object-hash';
|
||||
import {
|
||||
activityIsDeleted, activityIsFiltered,
|
||||
@@ -9,22 +9,38 @@ import {
|
||||
testAuthorCriteria
|
||||
} from "../Utils/SnoowrapUtils";
|
||||
import winston, {Logger} from "winston";
|
||||
import as from 'async';
|
||||
import fetch from 'node-fetch';
|
||||
import {
|
||||
asSubmission,
|
||||
buildCacheOptionsFromProvider, buildCachePrefix,
|
||||
cacheStats, comparisonTextOp, createCacheManager,
|
||||
cacheStats, compareDurationValue, comparisonTextOp, createCacheManager, createHistoricalStatsDisplay,
|
||||
formatNumber, getActivityAuthorName, getActivitySubredditName, isStrongSubredditState,
|
||||
mergeArr,
|
||||
parseExternalUrl, parseGenericValueComparison,
|
||||
parseWikiContext, toStrongSubredditState
|
||||
mergeArr, parseDurationComparison,
|
||||
parseExternalUrl, parseGenericValueComparison, parseRedditEntity,
|
||||
parseWikiContext, shouldCacheSubredditStateCriteriaResult, subredditStateIsNameOnly, toStrongSubredditState
|
||||
} from "../util";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
import {
|
||||
BotInstanceConfig,
|
||||
CacheOptions, CommentState,
|
||||
Footer, OperatorConfig, ResourceStats, StrongCache, SubmissionState,
|
||||
CacheConfig, TTLConfig, TypedActivityStates, UserResultCache, ActionedEvent, SubredditState, StrongSubredditState
|
||||
CacheOptions,
|
||||
CommentState,
|
||||
Footer,
|
||||
OperatorConfig,
|
||||
ResourceStats,
|
||||
StrongCache,
|
||||
SubmissionState,
|
||||
CacheConfig,
|
||||
TTLConfig,
|
||||
TypedActivityStates,
|
||||
UserResultCache,
|
||||
ActionedEvent,
|
||||
SubredditState,
|
||||
StrongSubredditState,
|
||||
HistoricalStats,
|
||||
HistoricalStatUpdateData,
|
||||
SubredditHistoricalStats,
|
||||
SubredditHistoricalStatsDisplay, ThirdPartyCredentialsJsonConfig,
|
||||
} from "../Common/interfaces";
|
||||
import UserNotes from "./UserNotes";
|
||||
import Mustache from "mustache";
|
||||
@@ -32,9 +48,12 @@ import he from "he";
|
||||
import {AuthorCriteria} from "../Author/Author";
|
||||
import {SPoll} from "./Streams";
|
||||
import {Cache} from 'cache-manager';
|
||||
import {Submission, Comment} from "snoowrap/dist/objects";
|
||||
import {cacheTTLDefaults} from "../Common/defaults";
|
||||
import {Submission, Comment, Subreddit} from "snoowrap/dist/objects";
|
||||
import {cacheTTLDefaults, createHistoricalDefaults, historicalDefaults} from "../Common/defaults";
|
||||
import {check} from "tcp-port-used";
|
||||
import {ExtendedSnoowrap} from "../Utils/SnoowrapClients";
|
||||
import dayjs from "dayjs";
|
||||
import ImageData from "../Common/ImageData";
|
||||
|
||||
export const DEFAULT_FOOTER = '\r\n*****\r\nThis action was performed by [a bot.]({{botLink}}) Mention a moderator or [send a modmail]({{modmailLink}}) if you any ideas, questions, or concerns about this action.';
|
||||
|
||||
@@ -42,7 +61,8 @@ export interface SubredditResourceConfig extends Footer {
|
||||
caching?: CacheConfig,
|
||||
subreddit: Subreddit,
|
||||
logger: Logger;
|
||||
client: Snoowrap
|
||||
client: ExtendedSnoowrap
|
||||
credentials?: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
interface SubredditResourceOptions extends Footer {
|
||||
@@ -52,9 +72,10 @@ interface SubredditResourceOptions extends Footer {
|
||||
cacheSettingsHash: string
|
||||
subreddit: Subreddit,
|
||||
logger: Logger;
|
||||
client: Snoowrap;
|
||||
client: ExtendedSnoowrap;
|
||||
prefix?: string;
|
||||
actionedEventsMax: number;
|
||||
thirdPartyCredentials: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
export interface SubredditResourceSetOptions extends CacheConfig, Footer {
|
||||
@@ -69,20 +90,26 @@ export class SubredditResources {
|
||||
protected submissionTTL: number | false = cacheTTLDefaults.submissionTTL;
|
||||
protected commentTTL: number | false = cacheTTLDefaults.commentTTL;
|
||||
protected filterCriteriaTTL: number | false = cacheTTLDefaults.filterCriteriaTTL;
|
||||
public selfTTL: number | false = cacheTTLDefaults.selfTTL;
|
||||
name: string;
|
||||
protected logger: Logger;
|
||||
userNotes: UserNotes;
|
||||
footer: false | string = DEFAULT_FOOTER;
|
||||
subreddit: Subreddit
|
||||
client: Snoowrap
|
||||
client: ExtendedSnoowrap
|
||||
cache: Cache
|
||||
cacheType: string
|
||||
cacheSettingsHash?: string;
|
||||
pruneInterval?: any;
|
||||
historicalSaveInterval?: any;
|
||||
prefix?: string
|
||||
actionedEventsMax: number;
|
||||
thirdPartyCredentials: ThirdPartyCredentialsJsonConfig;
|
||||
|
||||
stats: { cache: ResourceStats };
|
||||
stats: {
|
||||
cache: ResourceStats
|
||||
historical: SubredditHistoricalStats
|
||||
};
|
||||
|
||||
constructor(name: string, options: SubredditResourceOptions) {
|
||||
const {
|
||||
@@ -93,6 +120,7 @@ export class SubredditResources {
|
||||
authorTTL,
|
||||
wikiTTL,
|
||||
filterCriteriaTTL,
|
||||
selfTTL,
|
||||
submissionTTL,
|
||||
commentTTL,
|
||||
subredditTTL,
|
||||
@@ -103,6 +131,7 @@ export class SubredditResources {
|
||||
actionedEventsMax,
|
||||
cacheSettingsHash,
|
||||
client,
|
||||
thirdPartyCredentials,
|
||||
} = options || {};
|
||||
|
||||
this.cacheSettingsHash = cacheSettingsHash;
|
||||
@@ -117,7 +146,9 @@ export class SubredditResources {
|
||||
this.subredditTTL = subredditTTL === true ? 0 : subredditTTL;
|
||||
this.wikiTTL = wikiTTL === true ? 0 : wikiTTL;
|
||||
this.filterCriteriaTTL = filterCriteriaTTL === true ? 0 : filterCriteriaTTL;
|
||||
this.selfTTL = selfTTL === true ? 0 : selfTTL;
|
||||
this.subreddit = subreddit;
|
||||
this.thirdPartyCredentials = thirdPartyCredentials;
|
||||
this.name = name;
|
||||
if (logger === undefined) {
|
||||
const alogger = winston.loggers.get('app')
|
||||
@@ -127,7 +158,11 @@ export class SubredditResources {
|
||||
}
|
||||
|
||||
this.stats = {
|
||||
cache: cacheStats()
|
||||
cache: cacheStats(),
|
||||
historical: {
|
||||
allTime: createHistoricalDefaults(),
|
||||
lastReload: createHistoricalDefaults()
|
||||
}
|
||||
};
|
||||
|
||||
const cacheUseCB = (miss: boolean) => {
|
||||
@@ -151,10 +186,97 @@ export class SubredditResources {
|
||||
}
|
||||
}
|
||||
|
||||
async initHistoricalStats() {
|
||||
const at = await this.cache.wrap(`${this.name}-historical-allTime`, () => createHistoricalDefaults(), {ttl: 0}) as object;
|
||||
const rehydratedAt: any = {};
|
||||
for(const [k, v] of Object.entries(at)) {
|
||||
if(Array.isArray(v)) {
|
||||
rehydratedAt[k] = new Map(v);
|
||||
} else {
|
||||
rehydratedAt[k] = v;
|
||||
}
|
||||
}
|
||||
this.stats.historical.allTime = rehydratedAt as HistoricalStats;
|
||||
|
||||
// const lr = await this.cache.wrap(`${this.name}-historical-lastReload`, () => createHistoricalDefaults(), {ttl: 0}) as object;
|
||||
// const rehydratedLr: any = {};
|
||||
// for(const [k, v] of Object.entries(lr)) {
|
||||
// if(Array.isArray(v)) {
|
||||
// rehydratedLr[k] = new Map(v);
|
||||
// } else {
|
||||
// rehydratedLr[k] = v;
|
||||
// }
|
||||
// }
|
||||
// this.stats.historical.lastReload = rehydratedLr;
|
||||
}
|
||||
|
||||
updateHistoricalStats(data: HistoricalStatUpdateData) {
|
||||
for(const [k, v] of Object.entries(data)) {
|
||||
if(this.stats.historical.lastReload[k] !== undefined) {
|
||||
if(typeof v === 'number') {
|
||||
this.stats.historical.lastReload[k] += v;
|
||||
} else if(this.stats.historical.lastReload[k] instanceof Map) {
|
||||
const keys = Array.isArray(v) ? v : [v];
|
||||
for(const key of keys) {
|
||||
this.stats.historical.lastReload[k].set(key, (this.stats.historical.lastReload[k].get(key) || 0) + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
if(this.stats.historical.allTime[k] !== undefined) {
|
||||
if(typeof v === 'number') {
|
||||
this.stats.historical.allTime[k] += v;
|
||||
} else if(this.stats.historical.allTime[k] instanceof Map) {
|
||||
const keys = Array.isArray(v) ? v : [v];
|
||||
for(const key of keys) {
|
||||
this.stats.historical.allTime[k].set(key, (this.stats.historical.allTime[k].get(key) || 0) + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
getHistoricalDisplayStats(): SubredditHistoricalStatsDisplay {
|
||||
return {
|
||||
allTime: createHistoricalStatsDisplay(this.stats.historical.allTime),
|
||||
lastReload: createHistoricalStatsDisplay(this.stats.historical.lastReload)
|
||||
}
|
||||
}
|
||||
|
||||
async saveHistoricalStats() {
|
||||
const atSerializable: any = {};
|
||||
for(const [k, v] of Object.entries(this.stats.historical.allTime)) {
|
||||
if(v instanceof Map) {
|
||||
atSerializable[k] = Array.from(v.entries());
|
||||
} else {
|
||||
atSerializable[k] = v;
|
||||
}
|
||||
}
|
||||
await this.cache.set(`${this.name}-historical-allTime`, atSerializable, {ttl: 0});
|
||||
|
||||
// const lrSerializable: any = {};
|
||||
// for(const [k, v] of Object.entries(this.stats.historical.lastReload)) {
|
||||
// if(v instanceof Map) {
|
||||
// lrSerializable[k] = Array.from(v.entries());
|
||||
// } else {
|
||||
// lrSerializable[k] = v;
|
||||
// }
|
||||
// }
|
||||
// await this.cache.set(`${this.name}-historical-lastReload`, lrSerializable, {ttl: 0});
|
||||
}
|
||||
|
||||
setHistoricalSaveInterval() {
|
||||
this.historicalSaveInterval = setInterval((function(self) {
|
||||
return async () => {
|
||||
await self.saveHistoricalStats();
|
||||
}
|
||||
})(this),10000);
|
||||
}
|
||||
|
||||
async getCacheKeyCount() {
|
||||
if (this.cache.store.keys !== undefined) {
|
||||
if(this.cacheType === 'redis') {
|
||||
return (await this.cache.store.keys(`${this.prefix}*`)).length;
|
||||
const keys = await this.cache.store.keys(`${this.prefix}*`);
|
||||
return keys.length;
|
||||
}
|
||||
return (await this.cache.store.keys()).length;
|
||||
}
|
||||
@@ -266,29 +388,74 @@ export class SubredditResources {
|
||||
// @ts-ignore
|
||||
return await item.fetch();
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error while trying to fetch a cached activity', err);
|
||||
throw err.logged;
|
||||
}
|
||||
}
|
||||
|
||||
async hasActivity(item: Submission | Comment) {
|
||||
const hash = asSubmission(item) ? `sub-${item.name}` : `comm-${item.name}`;
|
||||
const res = await this.cache.get(hash);
|
||||
return res !== undefined && res !== null;
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
async getRecentSelf(item: Submission | Comment): Promise<(Submission | Comment | undefined)> {
|
||||
const hash = asSubmission(item) ? `sub-recentSelf-${item.name}` : `comm-recentSelf-${item.name}`;
|
||||
const res = await this.cache.get(hash);
|
||||
if(res === null) {
|
||||
return undefined;
|
||||
}
|
||||
return res as (Submission | Comment | undefined);
|
||||
}
|
||||
|
||||
async setRecentSelf(item: Submission | Comment) {
|
||||
if(this.selfTTL !== false) {
|
||||
const hash = asSubmission(item) ? `sub-recentSelf-${item.name}` : `comm-recentSelf-${item.name}`;
|
||||
// @ts-ignore
|
||||
await this.cache.set(hash, item, {ttl: this.selfTTL});
|
||||
}
|
||||
return;
|
||||
}
|
||||
/**
|
||||
* Returns true if the activity being checked was recently acted on/created by the bot and has not changed since that time
|
||||
* */
|
||||
async hasRecentSelf(item: Submission | Comment) {
|
||||
const recent = await this.getRecentSelf(item) as (Submission | Comment | undefined);
|
||||
if (recent !== undefined) {
|
||||
return item.num_reports === recent.num_reports;
|
||||
|
||||
// can't really used edited since its only ever updated once with no timestamp
|
||||
// if(item.num_reports !== recent.num_reports) {
|
||||
// return false;
|
||||
// }
|
||||
// if(!asSubmission(item)) {
|
||||
// return item.edited === recent.edited;
|
||||
// }
|
||||
// return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
async getSubreddit(item: Submission | Comment) {
|
||||
try {
|
||||
let hash = '';
|
||||
const subName = getActivitySubredditName(item);
|
||||
if (this.subredditTTL !== false) {
|
||||
hash = `sub-${getActivitySubredditName(item)}`;
|
||||
hash = `sub-${subName}`;
|
||||
await this.stats.cache.subreddit.identifierRequestCount.set(hash, (await this.stats.cache.subreddit.identifierRequestCount.wrap(hash, () => 0) as number) + 1);
|
||||
this.stats.cache.subreddit.requestTimestamps.push(Date.now());
|
||||
this.stats.cache.subreddit.requests++;
|
||||
const cachedSubreddit = await this.cache.get(hash);
|
||||
if (cachedSubreddit !== undefined && cachedSubreddit !== null) {
|
||||
this.logger.debug(`Cache Hit: Subreddit ${item.subreddit.display_name}`);
|
||||
this.logger.debug(`Cache Hit: Subreddit ${subName}`);
|
||||
// @ts-ignore
|
||||
return cachedSubreddit as Subreddit;
|
||||
}
|
||||
// @ts-ignore
|
||||
const subreddit = await this.client.getSubreddit(getActivitySubredditName(item)).fetch() as Subreddit;
|
||||
const subreddit = await this.client.getSubreddit(subName).fetch() as Subreddit;
|
||||
this.stats.cache.subreddit.miss++;
|
||||
// @ts-ignore
|
||||
await this.cache.set(hash, subreddit, {ttl: this.subredditTTL});
|
||||
@@ -296,16 +463,31 @@ export class SubredditResources {
|
||||
return subreddit as Subreddit;
|
||||
} else {
|
||||
// @ts-ignore
|
||||
let subreddit = await this.client.getSubreddit(getActivitySubredditName(item));
|
||||
let subreddit = await this.client.getSubreddit(subName);
|
||||
|
||||
return subreddit as Subreddit;
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error while trying to fetch a cached activity', err);
|
||||
throw err.logged;
|
||||
}
|
||||
}
|
||||
|
||||
async hasSubreddit(name: string) {
|
||||
if (this.subredditTTL !== false) {
|
||||
const hash = `sub-${name}`;
|
||||
this.stats.cache.subreddit.requests++
|
||||
this.stats.cache.subreddit.requestTimestamps.push(Date.now());
|
||||
await this.stats.cache.subreddit.identifierRequestCount.set(hash, (await this.stats.cache.subreddit.identifierRequestCount.wrap(hash, () => 0) as number) + 1);
|
||||
const val = await this.cache.get(hash);
|
||||
if(val === undefined || val === null) {
|
||||
this.stats.cache.subreddit.miss++;
|
||||
}
|
||||
return val !== undefined && val !== null;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async getAuthorActivities(user: RedditUser, options: AuthorTypedActivitiesOptions): Promise<Array<Submission | Comment>> {
|
||||
const userName = getActivityAuthorName(user);
|
||||
if (this.authorTTL !== false) {
|
||||
@@ -397,7 +579,7 @@ export class SubredditResources {
|
||||
// @ts-ignore
|
||||
const wikiPage = sub.getWikiPage(wikiContext.wiki);
|
||||
wikiContent = await wikiPage.content_md;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
let msg = `Could not read wiki page for an unknown reason. Please ensure the page 'https://reddit.com${sub.display_name_prefixed}/wiki/${wikiContext.wiki}' exists and is readable`;
|
||||
if(err.statusCode !== undefined) {
|
||||
if(err.statusCode === 404) {
|
||||
@@ -413,7 +595,7 @@ export class SubredditResources {
|
||||
try {
|
||||
const response = await fetch(extUrl as string);
|
||||
wikiContent = await response.text();
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
const msg = `Error occurred while trying to fetch the url ${extUrl}`;
|
||||
this.logger.error(msg, err);
|
||||
throw new LoggedError(msg);
|
||||
@@ -427,6 +609,67 @@ export class SubredditResources {
|
||||
return wikiContent;
|
||||
}
|
||||
|
||||
async cacheSubreddits(subs: (Subreddit | string)[]) {
|
||||
const allSubs = subs.map(x => typeof x !== 'string' ? x.display_name : x);
|
||||
const subNames = [...new Set(allSubs)];
|
||||
const uncachedSubs = [];
|
||||
|
||||
for(const s of subNames) {
|
||||
if(!(await this.hasSubreddit(s))) {
|
||||
uncachedSubs.push(s);
|
||||
}
|
||||
}
|
||||
if(uncachedSubs.length > 0) {
|
||||
// cache all uncached subs batchly-like
|
||||
const subResults = await this.client.getManySubreddits(uncachedSubs);
|
||||
for(const s of subResults) {
|
||||
// @ts-ignore
|
||||
await this.cache.set(`sub-${s.display_name}`, s, {ttl: this.subredditTTL});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async batchTestSubredditCriteria(items: (Comment | Submission)[], states: (SubredditState | StrongSubredditState)[]): Promise<(Comment | Submission)[]> {
|
||||
let passedItems: (Comment | Submission)[] = [];
|
||||
let unpassedItems: (Comment | Submission)[] = [];
|
||||
|
||||
const {nameOnly = [], full = []} = states.reduce((acc: {nameOnly: (SubredditState | StrongSubredditState)[], full: (SubredditState | StrongSubredditState)[]}, curr) => {
|
||||
if(subredditStateIsNameOnly(curr)) {
|
||||
return {...acc, nameOnly: acc.nameOnly.concat(curr)};
|
||||
}
|
||||
return {...acc, full: acc.full.concat(curr)};
|
||||
}, {nameOnly: [], full: []});
|
||||
|
||||
if(nameOnly.length === 0) {
|
||||
unpassedItems = items;
|
||||
} else {
|
||||
for(const item of items) {
|
||||
const subName = getActivitySubredditName(item);
|
||||
for(const state of nameOnly) {
|
||||
if(await this.isSubreddit({display_name: subName} as Subreddit, state, this.logger)) {
|
||||
passedItems.push(item);
|
||||
break;
|
||||
}
|
||||
}
|
||||
unpassedItems.push(item);
|
||||
}
|
||||
}
|
||||
|
||||
if(unpassedItems.length > 0 && full.length > 0) {
|
||||
await this.cacheSubreddits(unpassedItems.map(x => x.subreddit));
|
||||
for(const item of unpassedItems) {
|
||||
for(const state of full) {
|
||||
if(await this.isSubreddit(await this.getSubreddit(item), state, this.logger)) {
|
||||
passedItems.push(item);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return passedItems;
|
||||
}
|
||||
|
||||
async testSubredditCriteria(item: (Comment | Submission), state: SubredditState | StrongSubredditState) {
|
||||
if(Object.keys(state).length === 0) {
|
||||
return true;
|
||||
@@ -441,7 +684,8 @@ export class SubredditResources {
|
||||
return await this.isSubreddit({display_name: subName} as Subreddit, state, this.logger);
|
||||
}
|
||||
|
||||
if (this.filterCriteriaTTL !== false) {
|
||||
// see comments on shouldCacheSubredditStateCriteriaResult() for why this is needed
|
||||
if (this.filterCriteriaTTL !== false && shouldCacheSubredditStateCriteriaResult(state)) {
|
||||
try {
|
||||
const hash = `subredditCrit-${getActivitySubredditName(item)}-${objectHash.sha1(state)}`;
|
||||
await this.stats.cache.subredditCrit.identifierRequestCount.set(hash, (await this.stats.cache.subredditCrit.identifierRequestCount.wrap(hash, () => 0) as number) + 1);
|
||||
@@ -456,7 +700,7 @@ export class SubredditResources {
|
||||
this.stats.cache.subredditCrit.miss++;
|
||||
await this.cache.set(hash, itemResult, {ttl: this.filterCriteriaTTL});
|
||||
return itemResult;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (err.logged !== true) {
|
||||
this.logger.error('Error occurred while testing subreddit criteria', err);
|
||||
}
|
||||
@@ -497,6 +741,10 @@ export class SubredditResources {
|
||||
}
|
||||
|
||||
async testItemCriteria(i: (Comment | Submission), activityStates: TypedActivityStates) {
|
||||
// return early if nothing is being checked for so we don't store an empty cache result for this (duh)
|
||||
if(activityStates.length === 0) {
|
||||
return true;
|
||||
}
|
||||
if (this.filterCriteriaTTL !== false) {
|
||||
let item = i;
|
||||
let states = activityStates;
|
||||
@@ -522,7 +770,7 @@ export class SubredditResources {
|
||||
this.stats.cache.itemCrit.miss++;
|
||||
await this.cache.set(hash, itemResult, {ttl: this.filterCriteriaTTL});
|
||||
return itemResult;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (err.logged !== true) {
|
||||
this.logger.error('Error occurred while testing item criteria', err);
|
||||
}
|
||||
@@ -555,9 +803,29 @@ export class SubredditResources {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case 'isUserProfile':
|
||||
const entity = parseRedditEntity(subreddit.display_name);
|
||||
const entityIsUserProfile = entity.type === 'user';
|
||||
if(crit[k] !== entityIsUserProfile) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${entityIsUserProfile}`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
case 'over18':
|
||||
case 'over_18':
|
||||
// handling an edge case where user may have confused Comment/Submission state "over_18" with SubredditState "over18"
|
||||
|
||||
// @ts-ignore
|
||||
if (crit[k] !== subreddit.over18) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${subreddit.over18}`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
default:
|
||||
// @ts-ignore
|
||||
if (crit[k] !== undefined) {
|
||||
if (subreddit[k] !== undefined) {
|
||||
// @ts-ignore
|
||||
if (crit[k] !== subreddit[k]) {
|
||||
// @ts-ignore
|
||||
@@ -614,10 +882,31 @@ export class SubredditResources {
|
||||
}
|
||||
break;
|
||||
case 'reports':
|
||||
if (!item.can_mod_post) {
|
||||
log.debug(`Cannot test for reports on Activity in a subreddit bot account is not a moderator of. Skipping criteria...`);
|
||||
break;
|
||||
}
|
||||
const reportCompare = parseGenericValueComparison(crit[k] as string);
|
||||
if(!comparisonTextOp(item.num_reports, reportCompare.operator, reportCompare.value)) {
|
||||
let reportType = 'total';
|
||||
if(reportCompare.extra !== undefined && reportCompare.extra.trim() !== '') {
|
||||
const requestedType = reportCompare.extra.toLocaleLowerCase().trim();
|
||||
if(requestedType.includes('mod')) {
|
||||
reportType = 'mod';
|
||||
} else if(requestedType.includes('user')) {
|
||||
reportType = 'user';
|
||||
} else {
|
||||
log.warn(`Did not recognize the report type "${requestedType}" -- can only use "mod" or "user". Will default to TOTAL reports`);
|
||||
}
|
||||
}
|
||||
let reportNum = item.num_reports;
|
||||
if(reportType === 'user') {
|
||||
reportNum = item.user_reports.length;
|
||||
} else {
|
||||
reportNum = item.mod_reports.length;
|
||||
}
|
||||
if(!comparisonTextOp(reportNum, reportCompare.operator, reportCompare.value)) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${item.num_reports}`)
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} ${reportType} reports | Found => ${k}:${reportNum} ${reportType} reports`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
@@ -638,6 +927,10 @@ export class SubredditResources {
|
||||
}
|
||||
break;
|
||||
case 'filtered':
|
||||
if (!item.can_mod_post) {
|
||||
log.debug(`Cannot test for 'filtered' state on Activity in a subreddit bot account is not a moderator for. Skipping criteria...`);
|
||||
break;
|
||||
}
|
||||
const filtered = activityIsFiltered(item);
|
||||
if (filtered !== crit['filtered']) {
|
||||
// @ts-ignore
|
||||
@@ -645,6 +938,13 @@ export class SubredditResources {
|
||||
return false
|
||||
}
|
||||
break;
|
||||
case 'age':
|
||||
const ageTest = compareDurationValue(parseDurationComparison(crit[k] as string), dayjs.unix(await item.created));
|
||||
if (!ageTest) {
|
||||
log.debug(`Failed: Activity did not pass age test "${crit[k] as string}"`);
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case 'title':
|
||||
if((item instanceof Comment)) {
|
||||
log.warn('`title` is not allowed in `itemIs` criteria when the main Activity is a Comment');
|
||||
@@ -653,16 +953,54 @@ export class SubredditResources {
|
||||
// @ts-ignore
|
||||
const titleReg = crit[k] as string;
|
||||
try {
|
||||
if(null === item.title.match(titleReg)) {
|
||||
if (null === item.title.match(titleReg)) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed to match title as regular expression: ${titleReg}`);
|
||||
return false;
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
log.error(`An error occurred while attempting to match title against string as regular expression: ${titleReg}. Most likely the string does not make a valid regular expression.`, err);
|
||||
return false
|
||||
}
|
||||
break;
|
||||
case 'approved':
|
||||
case 'spam':
|
||||
if(!item.can_mod_post) {
|
||||
log.debug(`Cannot test for '${k}' state on Activity in a subreddit bot account is not a moderator for. Skipping criteria...`);
|
||||
break;
|
||||
}
|
||||
// @ts-ignore
|
||||
if (item[k] !== crit[k]) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${item[k]}`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
case 'op':
|
||||
if(item instanceof Submission) {
|
||||
log.warn(`On a Submission the 'op' property will always be true. Did you mean to use this on a comment instead?`);
|
||||
break;
|
||||
}
|
||||
// @ts-ignore
|
||||
if (item.is_submitter !== crit[k]) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${item[k]}`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
case 'depth':
|
||||
if(item instanceof Submission) {
|
||||
log.warn(`Cannot test for 'depth' on a Submission`);
|
||||
break;
|
||||
}
|
||||
// @ts-ignore
|
||||
const depthCompare = parseGenericValueComparison(crit[k] as string);
|
||||
if(!comparisonTextOp(item.score, depthCompare.operator, depthCompare.value)) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${item.score}`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
default:
|
||||
// @ts-ignore
|
||||
if (item[k] !== undefined) {
|
||||
@@ -673,7 +1011,11 @@ export class SubredditResources {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
log.warn(`Tried to test for Item property '${k}' but it did not exist`);
|
||||
if(!item.can_mod_post) {
|
||||
log.warn(`Tried to test for Activity property '${k}' but it did not exist. This Activity is not in a subreddit the bot can mod so it may be that this property is only available to mods of that subreddit. Or the property may be misspelled.`);
|
||||
} else {
|
||||
log.warn(`Tried to test for Activity property '${k}' but it did not exist. Check the spelling of the property.`);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -693,6 +1035,8 @@ export class SubredditResources {
|
||||
const userName = getActivityAuthorName(item.author);
|
||||
const hash = `commentUserResult-${userName}-${item.link_id}-${objectHash.sha1(checkConfig)}`;
|
||||
this.stats.cache.commentCheck.requests++;
|
||||
this.stats.cache.commentCheck.requestTimestamps.push(Date.now());
|
||||
await this.stats.cache.commentCheck.identifierRequestCount.set(hash, (await this.stats.cache.commentCheck.identifierRequestCount.wrap(hash, () => 0) as number) + 1);
|
||||
let result = await this.cache.get(hash) as UserResultCache | undefined | null;
|
||||
if(result === null) {
|
||||
result = undefined;
|
||||
@@ -723,6 +1067,40 @@ export class SubredditResources {
|
||||
const footerRawContent = await this.getContent(footer, item.subreddit);
|
||||
return he.decode(Mustache.render(footerRawContent, {subName, permaLink, modmailLink, botLink: BOT_LINK}));
|
||||
}
|
||||
|
||||
async getImageHash(img: ImageData): Promise<string|undefined> {
|
||||
const hash = `imgHash-${img.baseUrl}`;
|
||||
const result = await this.cache.get(hash) as string | undefined | null;
|
||||
this.stats.cache.imageHash.requests++
|
||||
this.stats.cache.imageHash.requestTimestamps.push(Date.now());
|
||||
await this.stats.cache.imageHash.identifierRequestCount.set(hash, (await this.stats.cache.imageHash.identifierRequestCount.wrap(hash, () => 0) as number) + 1);
|
||||
if(result !== undefined && result !== null) {
|
||||
return result;
|
||||
}
|
||||
this.stats.cache.commentCheck.miss++;
|
||||
return undefined;
|
||||
// const hash = await this.cache.wrap(img.baseUrl, async () => await img.hash(true), { ttl }) as string;
|
||||
// if(img.hashResult === undefined) {
|
||||
// img.hashResult = hash;
|
||||
// }
|
||||
// return hash;
|
||||
}
|
||||
|
||||
async setImageHash(img: ImageData, hash: string, ttl: number): Promise<void> {
|
||||
await this.cache.set(`imgHash-${img.baseUrl}`, hash, {ttl});
|
||||
// const hash = await this.cache.wrap(img.baseUrl, async () => await img.hash(true), { ttl }) as string;
|
||||
// if(img.hashResult === undefined) {
|
||||
// img.hashResult = hash;
|
||||
// }
|
||||
// return hash;
|
||||
}
|
||||
|
||||
getThirdPartyCredentials(name: string) {
|
||||
if(this.thirdPartyCredentials[name] !== undefined) {
|
||||
return this.thirdPartyCredentials[name];
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export class BotResourcesManager {
|
||||
@@ -738,6 +1116,7 @@ export class BotResourcesManager {
|
||||
actionedEventsMaxDefault?: number;
|
||||
actionedEventsDefault: number;
|
||||
pruneInterval: any;
|
||||
defaultThirdPartyCredentials: ThirdPartyCredentialsJsonConfig;
|
||||
|
||||
constructor(config: BotInstanceConfig) {
|
||||
const {
|
||||
@@ -749,19 +1128,24 @@ export class BotResourcesManager {
|
||||
submissionTTL,
|
||||
subredditTTL,
|
||||
filterCriteriaTTL,
|
||||
selfTTL,
|
||||
provider,
|
||||
actionedEventsMax,
|
||||
actionedEventsDefault,
|
||||
},
|
||||
name,
|
||||
credentials,
|
||||
credentials: {
|
||||
reddit,
|
||||
...thirdParty
|
||||
},
|
||||
caching,
|
||||
} = config;
|
||||
caching.provider.prefix = buildCachePrefix([caching.provider.prefix, 'SHARED']);
|
||||
const {actionedEventsMax: eMax, actionedEventsDefault: eDef, ...relevantCacheSettings} = caching;
|
||||
this.cacheHash = objectHash.sha1(relevantCacheSettings);
|
||||
this.defaultCacheConfig = caching;
|
||||
this.ttlDefaults = {authorTTL, userNotesTTL, wikiTTL, commentTTL, submissionTTL, filterCriteriaTTL, subredditTTL};
|
||||
this.defaultThirdPartyCredentials = thirdParty;
|
||||
this.ttlDefaults = {authorTTL, userNotesTTL, wikiTTL, commentTTL, submissionTTL, filterCriteriaTTL, subredditTTL, selfTTL};
|
||||
|
||||
const options = provider;
|
||||
this.cacheType = options.store;
|
||||
@@ -791,15 +1175,16 @@ export class BotResourcesManager {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
set(subName: string, initOptions: SubredditResourceConfig): SubredditResources {
|
||||
async set(subName: string, initOptions: SubredditResourceConfig): Promise<SubredditResources> {
|
||||
let hash = 'default';
|
||||
const { caching, ...init } = initOptions;
|
||||
const { caching, credentials, ...init } = initOptions;
|
||||
|
||||
let opts: SubredditResourceOptions = {
|
||||
cache: this.defaultCache,
|
||||
cacheType: this.cacheType,
|
||||
cacheSettingsHash: hash,
|
||||
ttl: this.ttlDefaults,
|
||||
thirdPartyCredentials: credentials ?? this.defaultThirdPartyCredentials,
|
||||
prefix: this.defaultCacheConfig.provider.prefix,
|
||||
actionedEventsMax: this.actionedEventsMaxDefault !== undefined ? Math.min(this.actionedEventsDefault, this.actionedEventsMaxDefault) : this.actionedEventsDefault,
|
||||
...init,
|
||||
@@ -827,6 +1212,7 @@ export class BotResourcesManager {
|
||||
actionedEventsMax: eventsMax,
|
||||
cacheType: trueProvider.store,
|
||||
cacheSettingsHash: hash,
|
||||
thirdPartyCredentials: credentials ?? this.defaultThirdPartyCredentials,
|
||||
prefix: subPrefix,
|
||||
...init,
|
||||
...trueRest,
|
||||
@@ -841,6 +1227,8 @@ export class BotResourcesManager {
|
||||
res.cache.reset();
|
||||
}
|
||||
resource = new SubredditResources(subName, opts);
|
||||
await resource.initHistoricalStats();
|
||||
resource.setHistoricalSaveInterval();
|
||||
this.resources.set(subName, resource);
|
||||
} else {
|
||||
// just set non-cache related settings
|
||||
@@ -851,7 +1239,41 @@ export class BotResourcesManager {
|
||||
// reset cache stats when configuration is reloaded
|
||||
resource.stats.cache = cacheStats();
|
||||
}
|
||||
resource.stats.historical.lastReload = createHistoricalDefaults();
|
||||
|
||||
return resource;
|
||||
}
|
||||
|
||||
async getPendingSubredditInvites(): Promise<(string[])> {
|
||||
const subredditNames = await this.defaultCache.get(`modInvites`);
|
||||
if (subredditNames !== undefined && subredditNames !== null) {
|
||||
return subredditNames as string[];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
async addPendingSubredditInvite(subreddit: string): Promise<void> {
|
||||
let subredditNames = await this.defaultCache.get(`modInvites`) as (string[] | undefined | null);
|
||||
if (subredditNames === undefined || subredditNames === null) {
|
||||
subredditNames = [];
|
||||
}
|
||||
subredditNames.push(subreddit);
|
||||
await this.defaultCache.set(`modInvites`, subredditNames, {ttl: 0});
|
||||
return;
|
||||
}
|
||||
|
||||
async deletePendingSubredditInvite(subreddit: string): Promise<void> {
|
||||
let subredditNames = await this.defaultCache.get(`modInvites`) as (string[] | undefined | null);
|
||||
if (subredditNames === undefined || subredditNames === null) {
|
||||
subredditNames = [];
|
||||
}
|
||||
subredditNames = subredditNames.filter(x => x !== subreddit);
|
||||
await this.defaultCache.set(`modInvites`, subredditNames, {ttl: 0});
|
||||
return;
|
||||
}
|
||||
|
||||
async clearPendingSubredditInvites(): Promise<void> {
|
||||
await this.defaultCache.del(`modInvites`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user