mirror of
https://github.com/FoxxMD/context-mod.git
synced 2026-01-14 07:57:57 -05:00
Compare commits
82 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba53233640 | ||
|
|
ede86d285b | ||
|
|
52f6aabb69 | ||
|
|
18175f3662 | ||
|
|
68a272d305 | ||
|
|
3dac91fafc | ||
|
|
e5bb8c2a38 | ||
|
|
61e0baf3fd | ||
|
|
37e9d1fcc2 | ||
|
|
5e70ca1cb6 | ||
|
|
7f7ed18927 | ||
|
|
efed3381fd | ||
|
|
5ac5d65a28 | ||
|
|
1ac7ad4724 | ||
|
|
0ae74fdce1 | ||
|
|
845173822c | ||
|
|
edb3036957 | ||
|
|
3790f0e061 | ||
|
|
e3e4e4abff | ||
|
|
fd9b83437b | ||
|
|
05694f115c | ||
|
|
70ee157198 | ||
|
|
bbb4ec3c2d | ||
|
|
acb72551ec | ||
|
|
bf6affe592 | ||
|
|
8c2cb02a46 | ||
|
|
73e2af2100 | ||
|
|
ba4c4af5a7 | ||
|
|
9ad21ee2dd | ||
|
|
b32c4f213c | ||
|
|
7e01c8d1f8 | ||
|
|
aee158ecc9 | ||
|
|
8cd2243c2d | ||
|
|
4969789532 | ||
|
|
1dcfdc14d1 | ||
|
|
f1c9b64f64 | ||
|
|
2e5a61566b | ||
|
|
85761fa662 | ||
|
|
0b1a6bd77b | ||
|
|
51e299ca99 | ||
|
|
7696f3c2ff | ||
|
|
1c9ed41e70 | ||
|
|
2d67f9f57d | ||
|
|
975bcb6ad7 | ||
|
|
2a282a0d6f | ||
|
|
0d087521a7 | ||
|
|
fb5fc961cc | ||
|
|
c04b305881 | ||
|
|
5c5e9a26aa | ||
|
|
477d1a10ae | ||
|
|
bbee92699c | ||
|
|
7f09043cdf | ||
|
|
768a199c40 | ||
|
|
6e4b0c7719 | ||
|
|
89b21e6073 | ||
|
|
da611c5894 | ||
|
|
2c90a260c0 | ||
|
|
f081598da6 | ||
|
|
55f45163a4 | ||
|
|
e4dfa9dde3 | ||
|
|
0e395792db | ||
|
|
dcbeb784e8 | ||
|
|
aeaeb6ce27 | ||
|
|
d6a29c5914 | ||
|
|
c1224121d4 | ||
|
|
9790e681ea | ||
|
|
a48a850c98 | ||
|
|
b8369a9e9f | ||
|
|
0c31bdf25e | ||
|
|
4b14e581dd | ||
|
|
b2846efd2b | ||
|
|
a787e4515b | ||
|
|
f63e2a0ec4 | ||
|
|
9d0e098db1 | ||
|
|
181390f0eb | ||
|
|
a8c7b1dac9 | ||
|
|
fd5a92758d | ||
|
|
027199d788 | ||
|
|
2a9f01b928 | ||
|
|
cf54502f0d | ||
|
|
2a3663ccc9 | ||
|
|
dc2eeffcb5 |
@@ -1,8 +1,8 @@
|
||||
node_modules
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
.gitignore
|
||||
.git
|
||||
src/logs
|
||||
/docs
|
||||
.github
|
||||
/docs/
|
||||
/node_modules/
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -381,4 +381,5 @@ dist
|
||||
.pnp.*
|
||||
|
||||
**/src/**/*.js
|
||||
!src/Web/assets/public/yaml/*
|
||||
**/src/**/*.map
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -1,15 +1,17 @@
|
||||
FROM node:16-alpine3.14
|
||||
FROM node:16-alpine3.14 as base
|
||||
|
||||
ENV TZ=Etc/GMT
|
||||
|
||||
# vips required to run sharp library for image comparison
|
||||
RUN echo "http://dl-4.alpinelinux.org/alpine/v3.14/community" >> /etc/apk/repositories \
|
||||
&& apk --update add vips
|
||||
&& apk --no-cache add vips
|
||||
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
WORKDIR /usr/app
|
||||
|
||||
FROM base as build
|
||||
|
||||
COPY package*.json ./
|
||||
COPY tsconfig.json .
|
||||
|
||||
@@ -17,7 +19,13 @@ RUN npm install
|
||||
|
||||
ADD . /usr/app
|
||||
|
||||
RUN npm run build
|
||||
RUN npm run build && rm -rf node_modules
|
||||
|
||||
FROM base as app
|
||||
|
||||
COPY --from=build /usr/app /usr/app
|
||||
|
||||
RUN npm install --production
|
||||
|
||||
ENV NPM_CONFIG_LOGLEVEL debug
|
||||
|
||||
|
||||
@@ -19,14 +19,15 @@ Some feature highlights:
|
||||
* Default/no configuration runs "All In One" behavior
|
||||
* Additional configuration allows web interface to connect to multiple servers
|
||||
* Each server instance can run multiple reddit accounts as bots
|
||||
* **Per-subreddit configuration** is handled by JSON stored in the subreddit wiki
|
||||
* Any text-based actions (comment, submission, message, usernotes, ban, etc...) can be configured via a wiki page or raw text in JSON and support [mustache](https://mustache.github.io) [templating](/docs/actionTemplating.md)
|
||||
* **Per-subreddit configuration** is handled by YAML (**like automoderator!**) or JSON stored in the subreddit wiki
|
||||
* Any text-based actions (comment, submission, message, usernotes, ban, etc...) can be configured via a wiki page or raw text and supports [mustache](https://mustache.github.io) [templating](/docs/actionTemplating.md)
|
||||
* History-based rules support multiple "valid window" types -- [ISO 8601 Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations), [Day.js Durations](https://day.js.org/docs/en/durations/creating), and submission/comment count limits.
|
||||
* Support Activity skipping based on:
|
||||
* Author criteria (name, css flair/text, age, karma, moderator status, and [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes))
|
||||
* Activity state (removed, locked, distinguished, etc.)
|
||||
* Rules and Actions support named references (write once, reference anywhere)
|
||||
* [**Image Comparisons**](/docs/imageComparison.md) via fingerprinting and/or pixel differences
|
||||
* [**Repost detection**](/docs/examples/repost) with support for external services (youtube, etc...)
|
||||
* Global/subreddit-level **API caching**
|
||||
* Support for [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes) as criteria or Actions (writing notes)
|
||||
* Docker container support
|
||||
@@ -84,7 +85,7 @@ See the [Moderator's Getting Started Guide](/docs/gettingStartedMod.md)
|
||||
|
||||
## Configuration and Documentation
|
||||
|
||||
Context Bot's configuration can be written in JSON, [JSON5](https://json5.org/) or YAML. Its schema conforms to [JSON Schema Draft 7](https://json-schema.org/). Additionally, many **operator** settings can be passed via command line or environmental variables.
|
||||
Context Bot's configuration can be written in YAML (like automoderator) or [JSON5](https://json5.org/). Its schema conforms to [JSON Schema Draft 7](https://json-schema.org/). Additionally, many **operator** settings can be passed via command line or environmental variables.
|
||||
|
||||
* For **operators** (running the bot instance) see the [Operator Configuration](/docs/operatorConfiguration.md) guide
|
||||
* For **moderators** consult the [app schema and examples folder](/docs/#configuration-and-usage)
|
||||
@@ -125,7 +126,7 @@ Moderator view/invite and authorization:
|
||||
|
||||
A built-in editor using [monaco-editor](https://microsoft.github.io/monaco-editor/) makes editing configurations easy:
|
||||
|
||||
* Automatic JSON syntax validation and formatting
|
||||
* Automatic JSON or YAML syntax validation and formatting
|
||||
* Automatic Schema (subreddit or operator) validation
|
||||
* All properties are annotated via hover popups
|
||||
* Unauthenticated view via `yourdomain.com/config`
|
||||
|
||||
14
app.json
14
app.json
@@ -17,12 +17,22 @@
|
||||
"REFRESH_TOKEN": {
|
||||
"description": "Refresh token retrieved from authenticating an account with your Reddit Application",
|
||||
"value": "",
|
||||
"required": true
|
||||
"required": false
|
||||
},
|
||||
"ACCESS_TOKEN": {
|
||||
"description": "Access token retrieved from authenticating an account with your Reddit Application",
|
||||
"value": "",
|
||||
"required": true
|
||||
"required": false
|
||||
},
|
||||
"REDIRECT_URI": {
|
||||
"description": "Redirect URI you specified when creating your Reddit Application. Required if you want to use the web interface. In the provided example replace 'your-heroku-app-name' with the name of your HEROKU app.",
|
||||
"value": "https://your-heroku-6app-name.herokuapp.com/callback",
|
||||
"required": false
|
||||
},
|
||||
"OPERATOR": {
|
||||
"description": "Your reddit username WITHOUT any prefixes EXAMPLE /u/FoxxMD => FoxxMD. Specified user will be recognized as an admin.",
|
||||
"value": "",
|
||||
"required": false
|
||||
},
|
||||
"WIKI_CONFIG": {
|
||||
"description": "Relative url to contextbot wiki page EX https://reddit.com/r/subreddit/wiki/<path>",
|
||||
|
||||
@@ -102,6 +102,7 @@ Find detailed descriptions of all the Rules, with examples, below:
|
||||
* [History](/docs/examples/history)
|
||||
* [Author](/docs/examples/author)
|
||||
* [Regex](/docs/examples/regex)
|
||||
* [Repost](/docs/examples/repost)
|
||||
|
||||
### Rule Set
|
||||
|
||||
@@ -119,6 +120,15 @@ It consists of:
|
||||
* **rules** -- The **Rules** for the Rule Set.
|
||||
|
||||
Example
|
||||
|
||||
YAML
|
||||
```yaml
|
||||
condition: AND
|
||||
# rules are an array
|
||||
rules:
|
||||
- aRule
|
||||
```
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"condition": "AND",
|
||||
|
||||
@@ -17,7 +17,28 @@ Examples of all of the above
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
# count, last 100 activities
|
||||
window: 100
|
||||
|
||||
# duration, last 10 days
|
||||
window: 10 days
|
||||
|
||||
# duration object, last 2 months and 5 days
|
||||
window:
|
||||
months: 2
|
||||
days: 5
|
||||
|
||||
# iso 8601 string, last 15 minutes
|
||||
window: PT15M
|
||||
|
||||
# ActivityWindowCriteria, last 100 activities or 6 weeks of activities (whichever is found first)
|
||||
window:
|
||||
count: 100
|
||||
duration: 6 weeks
|
||||
```
|
||||
|
||||
```json5
|
||||
// count, last 100 activities
|
||||
{
|
||||
"window": 100
|
||||
@@ -49,6 +70,7 @@ Examples of all of the above
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Types of Ranges
|
||||
@@ -95,6 +117,7 @@ If you need to specify multiple units of time for your duration you can instead
|
||||
|
||||
Example
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"days": 4,
|
||||
@@ -102,6 +125,13 @@ Example
|
||||
"minutes": 20
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
days: 4
|
||||
hours: 6
|
||||
minutes: 20
|
||||
```
|
||||
|
||||
##### An ISO 8601 duration string
|
||||
|
||||
@@ -119,6 +149,7 @@ This is an object that lets you specify more granular conditions for your range.
|
||||
|
||||
The full object looks like this:
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
@@ -130,6 +161,19 @@ The full object looks like this:
|
||||
}
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 100
|
||||
duration: 10 days
|
||||
satisfyOn: any
|
||||
subreddits:
|
||||
include:
|
||||
- mealtimevideos
|
||||
- pooptimevideos
|
||||
exclude:
|
||||
- videos
|
||||
```
|
||||
|
||||
### Specifying Range
|
||||
|
||||
@@ -142,7 +186,9 @@ If both range properties are specified then the value `satisfyOn` determines how
|
||||
|
||||
If **any** then Activities will be retrieved until one of the range properties is met, **whichever occurs first.**
|
||||
|
||||
Example
|
||||
Example
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 80,
|
||||
@@ -150,6 +196,13 @@ Example
|
||||
"satisfyOn": "any"
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 80
|
||||
duration: 90 days
|
||||
satisfyOn: any
|
||||
```
|
||||
Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
|
||||
|
||||
* If 90 days of activities returns only 40 activities => returns 40 activities
|
||||
@@ -160,6 +213,8 @@ Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
|
||||
If **all** then both ranges must be satisfied. Effectively, whichever range produces the most Activities will be the one that is used.
|
||||
|
||||
Example
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
@@ -167,6 +222,13 @@ Example
|
||||
"satisfyOn": "all"
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 100
|
||||
duration: 90 days
|
||||
satisfyOn: all
|
||||
```
|
||||
Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
|
||||
|
||||
* If at 90 days of activities => 40 activities retrieved
|
||||
@@ -187,6 +249,8 @@ You may filter retrieved Activities using an array of subreddits.
|
||||
Use **include** to specify which subreddits should be included from results
|
||||
|
||||
Example where only activities from /r/mealtimevideos and /r/modsupport will be returned
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
@@ -196,7 +260,17 @@ Example where only activities from /r/mealtimevideos and /r/modsupport will be r
|
||||
"include": ["mealtimevideos","modsupport"]
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 100
|
||||
duruation: 90 days
|
||||
satisfyOn: any
|
||||
subreddits:
|
||||
include:
|
||||
- mealtimevideos
|
||||
- modsupport
|
||||
```
|
||||
|
||||
#### Exclude
|
||||
@@ -204,6 +278,8 @@ Example where only activities from /r/mealtimevideos and /r/modsupport will be r
|
||||
Use **exclude** to specify which subreddits should NOT be in the results
|
||||
|
||||
Example where activities from /r/mealtimevideos and /r/modsupport will not be returned in results
|
||||
|
||||
JSON
|
||||
```json
|
||||
{
|
||||
"count": 100,
|
||||
@@ -214,4 +290,15 @@ Example where activities from /r/mealtimevideos and /r/modsupport will not be re
|
||||
}
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
window:
|
||||
count: 100
|
||||
duruation: 90 days
|
||||
satisfyOn: any
|
||||
subreddits:
|
||||
exclude:
|
||||
- mealtimevideos
|
||||
- modsupport
|
||||
```
|
||||
**Note:** `exclude` will be ignored if `include` is also present.
|
||||
|
||||
@@ -17,6 +17,7 @@ This directory contains example of valid, ready-to-go configurations for Context
|
||||
* [History](/docs/examples/history)
|
||||
* [Author](/docs/examples/author)
|
||||
* [Regex](/docs/examples/regex)
|
||||
* [Repost](/docs/examples/repost)
|
||||
* [Toolbox User Notes](/docs/examples/userNotes)
|
||||
* [Advanced Concepts](/docs/examples/advancedConcepts)
|
||||
* [Rule Sets](/docs/examples/advancedConcepts/ruleSets.json5)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
### Named Rules
|
||||
|
||||
See [ruleNameReuse.json5](/docs/examples/advancedConcepts/ruleNameReuse.json5)
|
||||
See **Rule Name Reuse Examples [YAML](/docs/examples/advancedConcepts/ruleNameReuse.yaml) | [JSON](/docs/examples/advancedConcepts/ruleNameReuse.json5)**
|
||||
|
||||
### Check Order
|
||||
|
||||
@@ -23,7 +23,7 @@ The `rules` array on a `Checks` can contain both `Rule` objects and `RuleSet` ob
|
||||
|
||||
A **Rule Set** is a "nested" set of `Rule` objects with a passing condition specified. These allow you to create more complex trigger behavior by combining multiple rules.
|
||||
|
||||
See **[ruleSets.json5](/docs/examples/advancedConcepts/ruleSets.json5)** for a complete example as well as consulting the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRuleSetJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json).
|
||||
See **ruleSets [YAML](/docs/examples/advancedConcepts/ruleSets.yaml) | [JSON](/docs/examples/advancedConcepts/ruleSets.json5)** for a complete example as well as consulting the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRuleSetJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json).
|
||||
|
||||
### Rule Order
|
||||
|
||||
|
||||
52
docs/examples/advancedConcepts/ruleNameReuse.yaml
Normal file
52
docs/examples/advancedConcepts/ruleNameReuse.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
checks:
|
||||
- name: Auto Remove SP Karma
|
||||
description: >-
|
||||
Remove submission because author has self-promo >10% and posted in karma
|
||||
subs recently
|
||||
kind: submission
|
||||
rules:
|
||||
# named rules can be referenced at any point in the configuration (where they occur does not matter)
|
||||
# and can be used in any Check
|
||||
# Note: rules do not transfer between subreddit configurations
|
||||
- freekarmasub
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
actions:
|
||||
- kind: remove
|
||||
- kind: comment
|
||||
content: >-
|
||||
Your submission was removed because you are over reddit's threshold
|
||||
for self-promotion and recently posted this content in a karma sub
|
||||
- name: Free Karma On Submission Alert
|
||||
description: Check if author has posted this submission in 'freekarma' subreddits
|
||||
kind: submission
|
||||
rules:
|
||||
# rules can be re-used throughout a configuration by referencing them by name
|
||||
#
|
||||
# The rule name itself can only contain spaces, hyphens and underscores
|
||||
# The value used to reference it will have all of these removed, and lower-cased
|
||||
#
|
||||
# so to reference this rule use the value 'freekarmasub'
|
||||
- name: Free_Karma-SUB
|
||||
kind: recentActivity
|
||||
lookAt: submissions
|
||||
useSubmissionAsReference: true
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- upvote
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Submission posted {{rules.freekarmasub.totalCount}} times in karma
|
||||
{{rules.freekarmasub.subCount}} subs over
|
||||
{{rules.freekarmasub.window}}: {{rules.freekarmasub.subSummary}}
|
||||
53
docs/examples/advancedConcepts/ruleSets.yaml
Normal file
53
docs/examples/advancedConcepts/ruleSets.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
checks:
|
||||
- name: Self Promo All or low comment
|
||||
description: >-
|
||||
SP >10% of all activities or >10% of submissions with low comment
|
||||
engagement
|
||||
kind: submission
|
||||
rules:
|
||||
# this attribution rule is looking at all activities
|
||||
#
|
||||
# we want want this one rule to trigger the check because >10% of all activity (submission AND comments) is a good requirement
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
# this is a RULE SET
|
||||
#
|
||||
# it is made up of "nested" rules with a pass condition (AND/OR)
|
||||
# if the nested rules pass the condition then the Rule Set triggers the Check
|
||||
#
|
||||
# AND = all nested rules must be triggered to make the Rule Set trigger
|
||||
# AND = any of the nested Rules will be the Rule Set trigger
|
||||
- condition: AND
|
||||
# in this check we use an Attribution >10% on ONLY submissions, which is a lower requirement then the above attribution rule
|
||||
# and combine it with a History rule looking for low comment engagement
|
||||
# to make a "higher" requirement Rule Set our of two low requirement Rules
|
||||
rules:
|
||||
- name: attr20sub
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
thresholdOn: submissions
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
thresholdOn: submissions
|
||||
window: 100
|
||||
lookAt: media
|
||||
- name: lowOrOpComm
|
||||
kind: history
|
||||
criteriaJoin: OR
|
||||
criteria:
|
||||
- window: 90 days
|
||||
comment: < 50%
|
||||
- window: 90 days
|
||||
comment: '> 40% OP'
|
||||
actions:
|
||||
- kind: remove
|
||||
- kind: comment
|
||||
content: >-
|
||||
Your submission was removed because you are over reddit's threshold
|
||||
for self-promotion or exhibit low comment engagement
|
||||
@@ -10,5 +10,5 @@ Consult the [schema](https://json-schema.app/view/%23/%23%2Fdefinitions%2FCheckJ
|
||||
|
||||
### Examples
|
||||
|
||||
* [Self Promotion as percentage of all Activities](/docs/examples/attribution/redditSelfPromoAll.json5) - Check if Author is submitting much more than they comment.
|
||||
* [Self Promotion as percentage of Submissions](/docs/examplesm/attribution/redditSelfPromoSubmissionsOnly.json5) - Check if any of Author's aggregated submission origins are >10% of their submissions
|
||||
* Self Promotion as percentage of all Activities [YAML](/docs/examples/attribution/redditSelfPromoAll.yaml) | [JSON](/docs/examples/attribution/redditSelfPromoAll.json5) - Check if Author is submitting much more than they comment.
|
||||
* Self Promotion as percentage of Submissions [YAML](/docs/examples/attribution/redditSelfPromoSubmissionsOnly.yaml) | [JSON](/docs/examplesm/attribution/redditSelfPromoSubmissionsOnly.json5) - Check if any of Author's aggregated submission origins are >10% of their submissions
|
||||
|
||||
27
docs/examples/attribution/redditSelfPromoAll.yaml
Normal file
27
docs/examples/attribution/redditSelfPromoAll.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
checks:
|
||||
- name: Self Promo Activities
|
||||
description: >-
|
||||
Check if any of Author's aggregated submission origins are >10% of entire
|
||||
history
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
# criteria defaults to OR -- so either of these criteria will trigger the rule
|
||||
criteria:
|
||||
- threshold: '> 10%' # threshold can be a percent or an absolute number
|
||||
# The default is "all" -- calculate percentage of entire history (submissions & comments)
|
||||
#thresholdOn: all
|
||||
#
|
||||
# look at last 90 days of Author's activities (comments and submissions)
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
# look at Author's last 100 activities (comments and submissions)
|
||||
window: 100
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
{{rules.attr10all.largestPercent}}% of
|
||||
{{rules.attr10all.activityTotal}} items over
|
||||
{{rules.attr10all.window}}
|
||||
24
docs/examples/attribution/redditSelfPromoSubmissionOnly.yaml
Normal file
24
docs/examples/attribution/redditSelfPromoSubmissionOnly.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
checks:
|
||||
- name: Self Promo Submissions
|
||||
description: >-
|
||||
Check if any of Author's aggregated submission origins are >10% of their
|
||||
submissions
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: attr10sub
|
||||
kind: attribution
|
||||
# criteria defaults to OR -- so either of these criteria will trigger the rule
|
||||
criteria:
|
||||
- threshold: '> 10%' # threshold can be a percent or an absolute number
|
||||
thresholdOn: submissions # calculate percentage of submissions, rather than entire history (submissions & comments)
|
||||
window: 90 days # look at last 90 days of Author's activities (comments and submissions)
|
||||
- threshold: '> 10%'
|
||||
thresholdOn: submissions
|
||||
window: 100 # look at Author's last 100 activities (comments and submissions)
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
{{rules.attr10sub.largestPercent}}% of
|
||||
{{rules.attr10sub.activityTotal}} items over
|
||||
{{rules.attr10sub.window}}
|
||||
@@ -18,10 +18,10 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorRule
|
||||
### Examples
|
||||
|
||||
* Basic examples
|
||||
* [Flair new user Submission](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does not have the `vet` flair then flair the Submission with `New User`
|
||||
* [Flair vetted user Submission](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does have the `vet` flair then flair the Submission with `Vetted`
|
||||
* Flair new user Submission [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does not have the `vet` flair then flair the Submission with `New User`
|
||||
* Flair vetted user Submission [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does have the `vet` flair then flair the Submission with `Vetted`
|
||||
* Used with other Rules
|
||||
* [Ignore vetted user](/docs/examples/author/flairNewUserSubmission.json5) - Short-circuit the Check if the Author has the `vet` flair
|
||||
* Ignore vetted user [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - Short-circuit the Check if the Author has the `vet` flair
|
||||
|
||||
## Filter
|
||||
|
||||
@@ -35,4 +35,4 @@ All **Rules** and **Checks** have an optional `authorIs` property that takes an
|
||||
|
||||
### Examples
|
||||
|
||||
* [Skip recent activity check based on author](/docs/examples/author/authorFilter.json5) - Skip a Recent Activity check for a set of subreddits if the Author of the Submission has any set of flairs.
|
||||
* Skip recent activity check based on author [YAML](/docs/examples/author/authorFilter.yaml) | [JSON](/docs/examples/author/authorFilter.json5) - Skip a Recent Activity check for a set of subreddits if the Author of the Submission has any set of flairs.
|
||||
|
||||
48
docs/examples/author/authorFilter.yaml
Normal file
48
docs/examples/author/authorFilter.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
checks:
|
||||
- name: Karma/Meme Sub Activity
|
||||
description: Report on karma sub activity or meme sub activity if user isn't a memelord
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
lookAt: submissions
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
window: 7 days
|
||||
- name: noobmemer
|
||||
kind: recentActivity
|
||||
# authors filter will be checked before a rule is run. If anything passes then the Rule is skipped -- it is not failed or triggered.
|
||||
# if *all* Rules for a Check are skipped due to authors filter then the Check will fail
|
||||
authorIs:
|
||||
# each property (include/exclude) can contain multiple AuthorCriteria
|
||||
# if any AuthorCriteria passes its test the Rule is skipped
|
||||
#
|
||||
# for an AuthorCriteria to pass all properties present on it must pass
|
||||
#
|
||||
# if include is present it will always run and exclude will be skipped
|
||||
#-include:
|
||||
exclude:
|
||||
# for this to pass the Author of the Submission must not have the flair "Supreme Memer" and have the name "user1" or "user2"
|
||||
- flairText:
|
||||
- Supreme Memer
|
||||
names:
|
||||
- user1
|
||||
- user2
|
||||
# for this to pass the Author of the Submission must not have the flair "Decent Memer"
|
||||
- flairText:
|
||||
- Decent Memer
|
||||
lookAt: submissions
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- dankmemes
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Author has posted in free karma sub, or in /r/dankmemes and does not
|
||||
have meme flair in this subreddit
|
||||
16
docs/examples/author/flairNewUserSubmission.yaml
Normal file
16
docs/examples/author/flairNewUserSubmission.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
checks:
|
||||
- name: Flair New User Sub
|
||||
description: Flair submission as sketchy if user does not have vet flair
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: newflair
|
||||
kind: author
|
||||
# rule will trigger if Author does not have "vet" flair text
|
||||
exclude:
|
||||
- flairText:
|
||||
- vet
|
||||
actions:
|
||||
- kind: flair
|
||||
text: New User
|
||||
css: orange
|
||||
16
docs/examples/author/flairVettedUserSubmission.yaml
Normal file
16
docs/examples/author/flairVettedUserSubmission.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
checks:
|
||||
- name: Flair Vetted User Submission
|
||||
description: Flair submission as Approved if user has vet flair
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: newflair
|
||||
kind: author
|
||||
# rule will trigger if Author has "vet" flair text
|
||||
include:
|
||||
- flairText:
|
||||
- vet
|
||||
actions:
|
||||
- kind: flair
|
||||
text: Vetted
|
||||
css: green
|
||||
45
docs/examples/author/ignoreVettedUser.yaml
Normal file
45
docs/examples/author/ignoreVettedUser.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
checks:
|
||||
- name: non-vetted karma/meme activity
|
||||
description: >-
|
||||
Report if Author has SP and has recent karma/meme sub activity and isn't
|
||||
vetted
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
# The Author Rule is best used in conjunction with other Rules --
|
||||
# instead of having to write an AuthorFilter for every Rule where you want to skip it based on Author criteria
|
||||
# you can write one Author Rule and make it fail on the required criteria
|
||||
# so that the check fails and Actions don't run
|
||||
- name: nonvet
|
||||
kind: author
|
||||
exclude:
|
||||
- flairText:
|
||||
- vet
|
||||
- name: attr10
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
lookAt: submissions
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
window: 7 days
|
||||
- name: memes
|
||||
kind: recentActivity
|
||||
lookAt: submissions
|
||||
thresholds:
|
||||
- threshold: '>= 3'
|
||||
subreddits:
|
||||
- dankmemes
|
||||
window: 7 days
|
||||
# will NOT run if the Author for this Submission has the flair "vet"
|
||||
actions:
|
||||
- kind: report
|
||||
content: Author has posted in free karma or meme subs recently
|
||||
@@ -9,5 +9,5 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FHistoryJSO
|
||||
|
||||
### Examples
|
||||
|
||||
* [Low Comment Engagement](/docs/examples/history/lowEngagement.json5) - Check if Author is submitting much more than they comment.
|
||||
* [OP Comment Engagement](/docs/examples/history/opOnlyEngagement.json5) - Check if Author is mostly engaging only in their own content
|
||||
* Low Comment Engagement [YAML](/docs/examples/history/lowEngagement.yaml) | [JSON](/docs/examples/history/lowEngagement.json5) - Check if Author is submitting much more than they comment.
|
||||
* OP Comment Engagement [YAML](/docs/examples/history/opOnlyEngagement.yaml) | [JSON](/docs/examples/history/opOnlyEngagement.json5) - Check if Author is mostly engaging only in their own content
|
||||
|
||||
21
docs/examples/history/lowEngagement.yaml
Normal file
21
docs/examples/history/lowEngagement.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
checks:
|
||||
- name: Low Comment Engagement
|
||||
description: Check if Author is submitting much more than they comment
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: lowComm
|
||||
kind: history
|
||||
criteria:
|
||||
- comment: '< 30%'
|
||||
window:
|
||||
# get author's last 90 days of activities or 100 activities, whichever is less
|
||||
duration: 90 days
|
||||
count: 100
|
||||
# trigger if less than 30% of their activities in this time period are comments
|
||||
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Low engagement: comments were {{rules.lowcomm.commentPercent}} of
|
||||
{{rules.lowcomm.activityTotal}} over {{rules.lowcomm.window}}
|
||||
22
docs/examples/history/opOnlyEngagement.yaml
Normal file
22
docs/examples/history/opOnlyEngagement.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
checks:
|
||||
- name: Engaging Own Content Only
|
||||
description: Check if Author is mostly engaging in their own content only
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: opOnly
|
||||
kind: history
|
||||
criteria:
|
||||
# trigger if more than 60% of their activities in this time period are comments as OP
|
||||
- comment: '> 60% OP'
|
||||
window:
|
||||
# get author's last 90 days of activities or 100 activities, whichever is less
|
||||
duration: 90 days
|
||||
count: 100
|
||||
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Selfish OP: {{rules.oponly.opPercent}} of
|
||||
{{rules.oponly.commentTotal}} comments over {{rules.oponly.window}}
|
||||
are as OP
|
||||
@@ -6,5 +6,5 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRecentActi
|
||||
|
||||
### Examples
|
||||
|
||||
* [Free Karma Subreddits](/docs/examples/recentActivity/freeKarma.json5) - Check if the Author has recently posted in any "free karma" subreddits
|
||||
* [Submission in Free Karma Subreddits](/docs/examples/recentActivity/freeKarmaOnSubmission.json5) - Check if the Author has posted the Submission this check is running on in any "free karma" subreddits recently
|
||||
* Free Karma Subreddits [YAML](/docs/examples/recentActivity/freeKarma.yaml) | [JSON](/docs/examples/recentActivity/freeKarma.json5) - Check if the Author has recently posted in any "free karma" subreddits
|
||||
* Submission in Free Karma Subreddits [YAML](/docs/examples/recentActivity/freeKarmaOnSubmission.yaml) | [JSON](/docs/examples/recentActivity/freeKarmaOnSubmission.json5) - Check if the Author has posted the Submission this check is running on in any "free karma" subreddits recently
|
||||
|
||||
27
docs/examples/recentActivity/freeKarma.yaml
Normal file
27
docs/examples/recentActivity/freeKarma.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
checks:
|
||||
- name: Free Karma Alert
|
||||
description: Check if author has posted in 'freekarma' subreddits
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
# // when lookAt is not present this rule will look for submissions and comments
|
||||
#lookAt: comments
|
||||
useSubmissionAsReference: false
|
||||
thresholds:
|
||||
# if the number of activities (sub/comment) found CUMULATIVELY in the subreddits listed is
|
||||
# equal to or greater than 1 then the rule is triggered
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- upvote
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
{{rules.freekarma.totalCount}} activities in karma
|
||||
{{rules.freekarma.subCount}} subs over {{rules.freekarma.window}}:
|
||||
{{rules.freekarma.subSummary}}
|
||||
26
docs/examples/recentActivity/freeKarmaOnSubmission.yaml
Normal file
26
docs/examples/recentActivity/freeKarmaOnSubmission.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
checks:
|
||||
- name: Free Karma On Submission Alert
|
||||
description: Check if author has posted this submission in 'freekarma' subreddits
|
||||
kind: submission
|
||||
rules:
|
||||
- name: freekarmasub
|
||||
kind: recentActivity
|
||||
# rule will only look at Author's submissions in these subreddits
|
||||
lookAt: submissions
|
||||
# rule will only look at Author's submissions in these subreddits that have the same content (link) as the submission this event was made on
|
||||
# In simpler terms -- rule will only check to see if the same link the author just posted is also posted in these subreddits
|
||||
useSubmissionAsReference: true
|
||||
thresholds:
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- DeFreeKarma
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- upvote
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Submission posted {{rules.freekarmasub.totalCount}} times in karma
|
||||
{{rules.freekarmasub.subCount}} subs over
|
||||
{{rules.freekarmasub.window}}: {{rules.freekarmasub.subSummary}}
|
||||
@@ -11,10 +11,12 @@ Which can then be used in conjunction with a [`window`](https://github.com/FoxxM
|
||||
|
||||
### Examples
|
||||
|
||||
* [Trigger if regex matches against the current activity](/docs/examples/regex/matchAnyCurrentActivity.json5)
|
||||
* [Trigger if regex matches 5 times against the current activity](/docs/examples/regex/matchThresholdCurrentActivity.json5)
|
||||
* [Trigger if regex matches against any part of a Submission](/docs/examples/regex/matchSubmissionParts.json5)
|
||||
* [Trigger if regex matches any of Author's last 10 activities](/docs/examples/regex/matchHistoryActivity.json5)
|
||||
* [Trigger if regex matches at least 3 of Author's last 10 activities](/docs/examples/regex/matchActivityThresholdHistory.json5)
|
||||
* [Trigger if there are 5 regex matches in the Author's last 10 activities](/docs/examples/regex/matchTotalHistoryActivity.json5)
|
||||
* [Trigger if there are 5 regex matches in the Author's last 10 comments](/docs/examples/regex/matchSubsetHistoryActivity.json5)
|
||||
* Trigger if regex matches against the current activity - [YAML](/docs/examples/regex/matchAnyCurrentActivity.yaml) | [JSON](/docs/examples/regex/matchAnyCurrentActivity.json5)
|
||||
* Trigger if regex matches 5 times against the current activity - [YAML](/docs/examples/regex/matchThresholdCurrentActivity.yaml) | [JSON](/docs/examples/regex/matchThresholdCurrentActivity.json5)
|
||||
* Trigger if regex matches against any part of a Submission - [YAML](/docs/examples/regex/matchSubmissionParts.yaml) | [JSON](/docs/examples/regex/matchSubmissionParts.json5)
|
||||
* Trigger if regex matches any of Author's last 10 activities - [YAML](/docs/examples/regex/matchHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchHistoryActivity.json5)
|
||||
* Trigger if regex matches at least 3 of Author's last 10 activities - [YAML](/docs/examples/regex/matchActivityThresholdHistory.json5) | [JSON](/docs/examples/regex/matchActivityThresholdHistory.json5)
|
||||
* Trigger if there are 5 regex matches in the Author's last 10 activities - [YAML](/docs/examples/regex/matchTotalHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchTotalHistoryActivity.json5)
|
||||
* Trigger if there are 5 regex matches in the Author's last 10 comments - [YAML](/docs/examples/regex/matchSubsetHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchSubsetHistoryActivity.json5)
|
||||
* Remove comments that are spamming discord links - [YAML](/docs/examples/regex/removeDiscordSpam.yaml) | [JSON](/docs/examples/regex/removeDiscordSpam.json5)
|
||||
* Differs from just using automod because this config can allow one-off/organic links from users who DO NOT spam discord links but will still remove the comment if the user is spamming them
|
||||
|
||||
13
docs/examples/regex/matchActivityThresholdHistory.yaml
Normal file
13
docs/examples/regex/matchActivityThresholdHistory.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
# triggers if more than 3 activities in the last 10 match the regex
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# this differs from "totalMatchThreshold"
|
||||
#
|
||||
# activityMatchThreshold => # of activities from window must match regex
|
||||
# totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
activityMatchThreshold: '> 3'
|
||||
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
window: 10
|
||||
6
docs/examples/regex/matchAnyCurrentActivity.yaml
Normal file
6
docs/examples/regex/matchAnyCurrentActivity.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# if "matchThreshold" is not specified it defaults to this -- default behavior is to trigger if there are any matches
|
||||
#matchThreshold: "> 0"
|
||||
8
docs/examples/regex/matchHistoryActivity.yaml
Normal file
8
docs/examples/regex/matchHistoryActivity.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
# triggers if any activity in the last 10 (including current activity) match the regex
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
window: 10
|
||||
11
docs/examples/regex/matchSubmissionParts.yaml
Normal file
11
docs/examples/regex/matchSubmissionParts.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# triggers if the current activity has more than 0 matches
|
||||
# if the activity is a submission then matches against title, body, and url
|
||||
# if "testOn" is not provided then `title, body` are the defaults
|
||||
testOn:
|
||||
- title
|
||||
- body
|
||||
- url
|
||||
16
docs/examples/regex/matchSubsetHistoryActivity.yaml
Normal file
16
docs/examples/regex/matchSubsetHistoryActivity.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
# triggers if there are more than 5 regex matches in the last 10 activities (comments only)
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# this differs from "activityMatchThreshold"
|
||||
#
|
||||
# activityMatchThreshold => # of activities from window must match regex
|
||||
# totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
totalMatchThreshold: '> 5'
|
||||
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
window: 10
|
||||
# determines which activities from window to consider
|
||||
# defaults to "all" (submissions and comments)
|
||||
lookAt: comments
|
||||
6
docs/examples/regex/matchThresholdCurrentActivity.yaml
Normal file
6
docs/examples/regex/matchThresholdCurrentActivity.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# triggers if current activity has greater than 5 matches
|
||||
matchThreshold: '> 5'
|
||||
13
docs/examples/regex/matchTotalHistoryActivity.yaml
Normal file
13
docs/examples/regex/matchTotalHistoryActivity.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
name: swear
|
||||
kind: regex
|
||||
criteria:
|
||||
# triggers if there are more than 5 regex matches in the last 10 activities (comments or submission)
|
||||
- regex: '/fuck|shit|damn/'
|
||||
# this differs from "activityMatchThreshold"
|
||||
#
|
||||
# activityMatchThreshold => # of activities from window must match regex
|
||||
# totalMatchThreshold => # of matches across all activities from window must match regex
|
||||
totalMatchThreshold: '> 5'
|
||||
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
|
||||
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
|
||||
window: 10
|
||||
73
docs/examples/regex/removeDiscordSpam.json5
Normal file
73
docs/examples/regex/removeDiscordSpam.json5
Normal file
@@ -0,0 +1,73 @@
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "remove discord spam",
|
||||
"notifyOnTrigger": true,
|
||||
"description": "remove comments from users who are spamming discord links",
|
||||
"kind": "comment",
|
||||
"authorIs": {
|
||||
"exclude": [
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"itemIs": [
|
||||
{
|
||||
"removed": false,
|
||||
"approved": false,
|
||||
}
|
||||
],
|
||||
"condition": "OR",
|
||||
"rules": [
|
||||
{
|
||||
// set to false if you want to allow comments with a discord link ONLY IF
|
||||
// the author doesn't have a history of spamming discord links
|
||||
// -- basically allows one-off/organic discord links
|
||||
"enable": true,
|
||||
"name": "linkOnlySpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "only link",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+)$/i",
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "linkAnywhereSpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "contains link anywhere",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "linkAnywhereHistoricalSpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "contains links anywhere historically",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
|
||||
"totalMatchThreshold": ">= 3",
|
||||
"lookAt": "comments",
|
||||
"window": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
36
docs/examples/regex/removeDiscordSpam.yaml
Normal file
36
docs/examples/regex/removeDiscordSpam.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
checks:
|
||||
- name: remove discord spam
|
||||
notifyOnTrigger: true
|
||||
description: remove comments from users who are spamming discord links
|
||||
kind: comment
|
||||
authorIs:
|
||||
exclude:
|
||||
- isMod: true
|
||||
itemIs:
|
||||
- removed: false
|
||||
approved: false
|
||||
condition: OR
|
||||
rules:
|
||||
- enable: true
|
||||
name: linkOnlySpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: only link
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+)$/i'
|
||||
- condition: AND
|
||||
rules:
|
||||
- name: linkAnywhereSpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: contains link anywhere
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
|
||||
- name: linkAnywhereHistoricalSpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: contains links anywhere historically
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
|
||||
totalMatchThreshold: '>= 3'
|
||||
lookAt: comments
|
||||
window: 10
|
||||
actions:
|
||||
- kind: remove
|
||||
@@ -45,5 +45,5 @@ With only `gapAllowance: 2` this rule **would trigger** because the the 1 and 2
|
||||
|
||||
## Examples
|
||||
|
||||
* [Crosspost Spamming](/docs/examples/repeatActivity/crosspostSpamming.json5) - Check if an Author is spamming their Submissions across multiple subreddits
|
||||
* [Burst-posting](/docs/examples/repeatActivity/burstPosting.json5) - Check if Author is crossposting their Submissions in short bursts
|
||||
* Crosspost Spamming [JSON](/docs/examples/repeatActivity/crosspostSpamming.json5) | [YAML](/docs/examples/repeatActivity/crosspostSpamming.yaml) - Check if an Author is spamming their Submissions across multiple subreddits
|
||||
* Burst-posting [JSON](/docs/examples/repeatActivity/burstPosting.json5) | [YAML](/docs/examples/repeatActivity/burstPosting.yaml) - Check if Author is crossposting their Submissions in short bursts
|
||||
|
||||
23
docs/examples/repeatActivity/burstPosting.yaml
Normal file
23
docs/examples/repeatActivity/burstPosting.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
checks:
|
||||
- name: Burstpost Spam
|
||||
description: Check if Author is crossposting in short bursts
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: burstpost
|
||||
kind: repeatActivity
|
||||
# will only look at Submissions in Author's history that contain the same content (link) as the Submission this check was initiated by
|
||||
useSubmissionAsReference: true
|
||||
# the number of non-repeat activities (submissions or comments) to ignore between repeat submissions
|
||||
gapAllowance: 3
|
||||
# if the Author has posted this Submission 6 times, ignoring 3 non-repeat activities between each repeat, then this rule will trigger
|
||||
threshold: '>= 6'
|
||||
# look at all of the Author's submissions in the last 7 days or 100 submissions
|
||||
window:
|
||||
duration: 7 days
|
||||
count: 100
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Author has burst-posted this link {{rules.burstpost.largestRepeat}}
|
||||
times over {{rules.burstpost.window}}
|
||||
19
docs/examples/repeatActivity/crosspostSpamming.yaml
Normal file
19
docs/examples/repeatActivity/crosspostSpamming.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
checks:
|
||||
- name: Crosspost Spam
|
||||
description: Check if Author is spamming Submissions across subreddits
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: xpostspam
|
||||
kind: repeatActivity
|
||||
# will only look at Submissions in Author's history that contain the same content (link) as the Submission this check was initiated by
|
||||
useSubmissionAsReference: true
|
||||
# if the Author has posted this Submission 5 times consecutively then this rule will trigger
|
||||
threshold: '>= 5'
|
||||
# look at all of the Author's submissions in the last 7 days
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
Author has posted this link {{rules.xpostspam.largestRepeat}} times
|
||||
over {{rules.xpostspam.window}}
|
||||
927
docs/examples/repost/README.md
Normal file
927
docs/examples/repost/README.md
Normal file
@@ -0,0 +1,927 @@
|
||||
The **Repost** rule is used to find reposts for both **Submissions** and **Comments**, depending on what type of **Check** it is used on.
|
||||
|
||||
Note: This rule is for searching **all of Reddit** for reposts, as opposed to just the Author of the Activity being checked. If you only want to check for reposts by the Author of the Activity being checked you should use the [Repeat Activity](/docs/examples/repeatActivity) rule.
|
||||
|
||||
# TLDR
|
||||
|
||||
Out of the box CM generates a repost rule with sensible default behavior without any configuration. You do not need to configure any of below options (facets, modifiers, criteria) yourself in order to have a working repost rule. Default behavior is as follows...
|
||||
|
||||
* When looking for Submission reposts CM will find any Submissions with
|
||||
* a very similar title
|
||||
* or independent of title...
|
||||
* any crossposts/duplicates
|
||||
* any submissions with the exact URL
|
||||
* When looking for Comment reposts CM will do the above AND THEN
|
||||
* compare the top 50 most-upvoted comments from the top 10 most-upvoted Submissions against the comment being checked
|
||||
* compare any items found from external source (Youtube comments, etc...) against the comment being checked
|
||||
|
||||
# Configuration
|
||||
|
||||
## Search Facets
|
||||
|
||||
ContextMod has several ways to search for reposts -- all of which look at different elements of a Submission in order to find repost candidates. You can define any/all of these **Search Facets** you want to use to search Reddit inside the configuration for the Repost Rule in the `searchOn` property.
|
||||
|
||||
### Usage
|
||||
|
||||
Facets are specified in the `searchOn` array property within the rule's configuration.
|
||||
|
||||
**String**
|
||||
|
||||
Specify one or more types of facets as a string to use their default configurations
|
||||
|
||||
<details>
|
||||
|
||||
YAML
|
||||
```yaml
|
||||
kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
- url
|
||||
- crossposts
|
||||
```
|
||||
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// ...
|
||||
"searchOn": ["title", "url", "crossposts"],
|
||||
// ....
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
**Object**
|
||||
|
||||
**string** and object configurations can be mixed
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
- kind: url
|
||||
matchScore: 90
|
||||
- external
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// ...
|
||||
"searchOn": [
|
||||
"title",
|
||||
{
|
||||
"kind": "url",
|
||||
// could also specify multiple types to use the same config for all
|
||||
//"kind": ["url", "duplicates"]
|
||||
"matchScore": 90,
|
||||
//...
|
||||
},
|
||||
"external"
|
||||
],
|
||||
// ....
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Facet Types
|
||||
|
||||
* **title** -- search reddit for Submissions with a similar title
|
||||
* **url** -- search reddit for Submissions with the same URL
|
||||
* **duplicates** -- get all Submissions **reddit has identified** as duplicates that are **NOT** crossposts
|
||||
* these are found under *View discussions in other communities* (new reddit) or *other discussions* (old reddit) on the Submission
|
||||
* **crossposts** -- get all Submissions where the current Submission is the source of an **official** crosspost
|
||||
* this differs from duplicates in that crossposts use reddit's built-in crosspost functionality, respect subreddit crosspost rules, and link back to the original Submission
|
||||
* **external** -- get items from the Submission's link source that may be reposted (currently implemented for **Comment Checks** only)
|
||||
* When the Submission link is for...
|
||||
* **Youtube** -- get top comments on video by replies/like count
|
||||
* **NOTE:** An **API Key** for the [Youtube Data API](https://developers.google.com/youtube/v3) must be provided for this facet to work. This can be provided by the operator alongside [bot credentials](/docs/operatorConfiguration.md) or in the top-level `credentials` property for a [subreddit configuration.](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fmaster%2Fsrc%2FSchema%2FApp.json)
|
||||
|
||||
### Facet Modifiers
|
||||
|
||||
For all **Facets**, except for **external**, there are options that be configured to determine if the found Submissions is a "valid" repost IE filtering. These options can be configured **per facet**.
|
||||
|
||||
* **matchScore** -- The percentage, as a whole number, of a repost title that must match the title being checked in order to consider both a match
|
||||
* **minWordCount** -- The minimum number of words a title must have
|
||||
* **caseSensitive** -- If the match comparison should be case-sensitive (defaults to `false`)
|
||||
|
||||
Additionally, the current Activity's title and/or each repost's title can be transformed before matching:
|
||||
|
||||
* **transformations** -- An array of SearchAndReplace objects used to transform the repost's title
|
||||
* **transformationsActivity** -- An array of SearchAndReplace objects used to transform the current Activity's title
|
||||
|
||||
#### Modifier Defaults
|
||||
|
||||
To make facets easier to use without configuration sensible defaults are applied to each when no other configuration is defined...
|
||||
|
||||
* **title**
|
||||
* `matchScore: 85` -- The candidate repost's title must be at least 85% similar to the current Activity's title
|
||||
* `minWordCount: 2` -- The candidate repost's title must have at least 2 words
|
||||
|
||||
For `url`,`duplicates`, and `crossposts` the only default is `matchScore: 0` because the assumption is you want to treat any actual dups/x-posts or exact URLs as reposts, regardless of their title.
|
||||
|
||||
## Additional Criteria Properties
|
||||
|
||||
A **criteria** object may also specify some additional tests to run against the reposts found from searching.
|
||||
|
||||
### For Submissions and Comments
|
||||
|
||||
#### Occurrences
|
||||
|
||||
Define a set of criteria to test against the **number of reposts**, **time reposts were created**, or both.
|
||||
|
||||
##### Count
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
- url
|
||||
- crossposts
|
||||
occurrences:
|
||||
criteria:
|
||||
- count:
|
||||
condition: AND
|
||||
test:
|
||||
- '> 3'
|
||||
- <= 5
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// ...
|
||||
"searchOn": ["title", "url", "crossposts"],
|
||||
"occurrences": {
|
||||
"criteria": [
|
||||
{
|
||||
// passes if BOTH tests are true
|
||||
"count": {
|
||||
"condition": "AND", // default is AND
|
||||
"test": [
|
||||
"> 3", // TRUE if there are GREATER THAN 3 reposts found
|
||||
"<= 5" // TRUE if there are LESS THAN OR EQUAL TO 5 reposts found
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
##### Time
|
||||
|
||||
Define a test or array of tests to run against **when reposts were created**
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
- url
|
||||
- crossposts
|
||||
occurrences:
|
||||
criteria:
|
||||
- time:
|
||||
condition: AND
|
||||
test:
|
||||
- testOn: all
|
||||
condition: '> 3 months'
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// ...
|
||||
"searchOn": [
|
||||
"title",
|
||||
"url",
|
||||
"crossposts"
|
||||
],
|
||||
"occurrences": {
|
||||
"criteria": [
|
||||
{
|
||||
time: {
|
||||
// how to test array of comparisons. AND => all must pass, OR => any must pass
|
||||
"condition": "AND",
|
||||
"test": [
|
||||
{
|
||||
// which of the found reposts to test the time comparison on
|
||||
//
|
||||
// "all" => ALL reposts must pass time comparison
|
||||
// "any" => ANY repost must pass time comparison
|
||||
// "newest" => The newest (closest in time to now) repost must pass time comparison
|
||||
// "oldest" => The oldest (furthest in time from now) repost must pass time comparison
|
||||
//
|
||||
"testOn": "all",
|
||||
// Tested items must be OLDER THAN 3 months
|
||||
"condition": "> 3 months"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
### For Comments
|
||||
|
||||
When the rule is run in a **Comment Check** you may specify text comparisons (like those found in Search Facets) to run on the contents of the repost comments *against* the contents of the comment being checked.
|
||||
|
||||
* **matchScore** -- The percentage, as a whole number, of a repost comment that must match the comment being checked in order to consider both a match (defaults to 85% IE `85`)
|
||||
* **minWordCount** -- The minimum number of words a comment must have
|
||||
* **caseSensitive** -- If the match comparison should be case-sensitive (defaults to `false`)
|
||||
|
||||
# Examples
|
||||
|
||||
Examples of a *full* CM configuration, including the Repost Rule, in various scenarios. In each scenario the parts of the configuration that affect the rule are indicated.
|
||||
|
||||
## Submissions
|
||||
|
||||
When the Repost Rule is run on a **Submission Check** IE the activity being checked is a Submission.
|
||||
|
||||
### Default Behavior (No configuration)
|
||||
|
||||
This is the same behavior described in the [TLDR](#TLDR) section above -- find any submissions with:
|
||||
|
||||
* a very similar title (85% or more the same)
|
||||
* or ignoring title...
|
||||
* any crossposts/duplicates
|
||||
* any submissions with the exact URL
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check if submission has been reposted
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
actions:
|
||||
- kind: report
|
||||
content: This submission was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check if submission has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost"
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This submission was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by Title Only
|
||||
|
||||
Find any submissions with:
|
||||
|
||||
* a very similar title (85% or more the same)
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check if submission has been reposted
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
actions:
|
||||
- kind: report
|
||||
content: This submission was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check if submission has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only title to search on
|
||||
"searchOn": [
|
||||
"title" // uses default configuration since only string is specified
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This submission was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by Title only and specify similarity percentage
|
||||
|
||||
* a very similar title (95% or more the same)
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check if submission has been reposted
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- kind: title
|
||||
matchScore: '95'
|
||||
actions:
|
||||
- kind: report
|
||||
content: This submission was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check if submission has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only title to search on
|
||||
"searchOn": [
|
||||
{
|
||||
"kind": "title",
|
||||
// titles must be 95% or more similar
|
||||
"matchScore": "95"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This submission was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by Title, specify similarity percentage, AND any duplicates
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check if submission has been reposted
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- duplicates
|
||||
- kind: title
|
||||
matchScore: '95'
|
||||
actions:
|
||||
- kind: report
|
||||
content: This submission was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check if submission has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
"searchOn": [
|
||||
// look for duplicates (NON crossposts) using default configuration
|
||||
"duplicates",
|
||||
// search by title
|
||||
{
|
||||
"kind": "title",
|
||||
// titles must be 95% or more similar
|
||||
"matchScore": "95"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This submission was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Approve Submission if not reposted in the last month, by title
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
- name: subRepost
|
||||
description: Check there are no reposts with same title in the last month
|
||||
kind: submission
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- title
|
||||
occurrences:
|
||||
condition: OR
|
||||
criteria:
|
||||
- count:
|
||||
test:
|
||||
- < 1
|
||||
- time:
|
||||
test:
|
||||
- testOn: newest
|
||||
condition: '> 1 month'
|
||||
actions:
|
||||
- kind: approve
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"unmoderated"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "subRepost",
|
||||
"description": "Check there are no reposts with same title in the last month",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "submission",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
"searchOn": [
|
||||
"title"
|
||||
],
|
||||
"occurrences": {
|
||||
// if EITHER criteria is TRUE then it "passes"
|
||||
"condition": "OR",
|
||||
"criteria": [
|
||||
// first criteria:
|
||||
// TRUE if there are LESS THAN 1 reposts (no reposts found)
|
||||
{
|
||||
"count": {
|
||||
"test": ["< 1"]
|
||||
}
|
||||
},
|
||||
// second criteria:
|
||||
// TRUE if the newest repost is older than one month
|
||||
{
|
||||
"time": {
|
||||
"test": [
|
||||
{
|
||||
"testOn": "newest",
|
||||
"condition": "> 1 month"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
// approve this post since we know it is not a repost of anything within the last month
|
||||
"kind": "approve",
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
## Comments
|
||||
|
||||
### Default Behavior (No configuration)
|
||||
|
||||
This is the same behavior described in the [TLDR](#TLDR) section above -- find any submissions with:
|
||||
|
||||
* a very similar title (85% or more the same)
|
||||
* or ignoring title...
|
||||
* any crossposts/duplicates
|
||||
* any submissions with the exact URL
|
||||
* If comment being checked is on a Submission for Youtube then get top 50 comments on youtube video as well...
|
||||
|
||||
AND THEN
|
||||
|
||||
* sort submissions by votes
|
||||
* take top 20 (upvoted) comments from top 10 (upvoted) submissions
|
||||
* sort comments by votes, take top 50 + top 50 external items
|
||||
|
||||
FINALLY
|
||||
|
||||
* filter all gathered comments by default `matchScore: 85` to find very similar matches
|
||||
* rules is triggered if any are found
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: commRepost
|
||||
description: Check if comment has been reposted
|
||||
kind: common
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
actions:
|
||||
- kind: report
|
||||
content: This comment was reposted
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"newComm"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "commRepost",
|
||||
"description": "Check if comment has been reposted",
|
||||
// kind specifies this check is for COMMENTS
|
||||
"kind": "common",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost"
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This comment was reposted"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by external (youtube) comments only
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: commRepost
|
||||
description: Check if comment has been reposted from youtube
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- external
|
||||
actions:
|
||||
- kind: report
|
||||
content: This comment was reposted from youtube
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"newComm"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "commRepost",
|
||||
"description": "Check if comment has been reposted from youtube",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only external (youtube) to search on
|
||||
"searchOn": [
|
||||
"external"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This comment was reposted from youtube"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by external (youtube) comments only, with higher comment match percentage
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: commRepost
|
||||
description: Check if comment has been reposted from youtube
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- external
|
||||
matchScore: 95
|
||||
actions:
|
||||
- kind: report
|
||||
content: This comment was reposted from youtube
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"newComm"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "commRepost",
|
||||
"description": "Check if comment has been reposted from youtube",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only external (youtube) to search on
|
||||
"searchOn": [
|
||||
"external"
|
||||
],
|
||||
"matchScore": 95 // matchScore for comments is on criteria instead of searchOn config...
|
||||
},
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This comment was reposted from youtube"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Search by external (youtube) comments and submission URL, with higher comment match percentage
|
||||
|
||||
<details>
|
||||
|
||||
```yaml
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: commRepost
|
||||
description: Check if comment has been reposted
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- kind: repost
|
||||
criteria:
|
||||
- searchOn:
|
||||
- external
|
||||
- url
|
||||
matchScore: 95
|
||||
actions:
|
||||
- kind: report
|
||||
content: >-
|
||||
This comment was reposted from youtube or from submission with the
|
||||
same URL
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"polling": [
|
||||
"newComm"
|
||||
],
|
||||
"checks": [
|
||||
{
|
||||
"name": "commRepost",
|
||||
"description": "Check if comment has been reposted",
|
||||
// kind specifies this check is for SUBMISSIONS
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
// repost rule configuration is below
|
||||
//
|
||||
{
|
||||
"kind": "repost",
|
||||
"criteria": [
|
||||
{
|
||||
// specify only external (youtube) to search on
|
||||
"searchOn": [
|
||||
"external",
|
||||
// can specify any/all submission search facets to acquire comments from
|
||||
"url"
|
||||
],
|
||||
"matchScore": 95 // matchScore for comments is on criteria instead of searchOn config...
|
||||
},
|
||||
]
|
||||
},
|
||||
//
|
||||
// repost rule configuration is above
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "report",
|
||||
"content": "This comment was reposted from youtube or from submission with the same URL"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -11,21 +11,31 @@ All actions for these configurations are non-destructive in that:
|
||||
|
||||
**You will have to remove the `report` action and `dryRun` settings yourself.** This is to ensure that you understand the behavior the bot will be performing. If you are unsure of this you should leave them in place until you are certain the behavior the bot is performing is acceptable.
|
||||
|
||||
**YAML** is the same format as **automoderator**
|
||||
|
||||
## Submission-based Behavior
|
||||
|
||||
### [Remove submissions from users who have used 'freekarma' subs to bypass karma checks](/docs/examples/subredditReady/freekarma.json5)
|
||||
### Remove submissions from users who have used 'freekarma' subs to bypass karma checks
|
||||
|
||||
[YAML](/docs/examples/subredditReady/freekarma.yaml) | [JSON](/docs/examples/subredditReady/freekarma.json5)
|
||||
|
||||
If the user has any activity (comment/submission) in known freekarma subreddits in the past (50 activities or 6 months) then remove the submission.
|
||||
|
||||
### [Remove submissions from users who have crossposted the same submission 4 or more times](/docs/examples/subredditReady/crosspostSpam.json5)
|
||||
### Remove submissions from users who have crossposted the same submission 4 or more times
|
||||
|
||||
[YAML](/docs/examples/subredditReady/crosspostSpam.yaml) | [JSON](/docs/examples/subredditReady/crosspostSpam.yaml)
|
||||
|
||||
If the user has crossposted the same submission in the past (50 activities or 6 months) 4 or more times in a row then remove the submission.
|
||||
|
||||
### [Remove submissions from users who have crossposted or used 'freekarma' subs](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5)
|
||||
### Remove submissions from users who have crossposted or used 'freekarma' subs
|
||||
|
||||
[YAML](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.yaml) | [JSON](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5)
|
||||
|
||||
Will remove submission if either of the above two behaviors is detected
|
||||
|
||||
### [Remove link submissions where the user's history is comprised of 10% or more of the same link](/docs/examples/subredditReady/selfPromo.json5)
|
||||
### Remove link submissions where the user's history is comprised of 10% or more of the same link
|
||||
|
||||
[YAML](/docs/examples/subredditReady/selfPromo.yaml) | [JSON](/docs/examples/subredditReady/selfPromo.json5)
|
||||
|
||||
If the link origin (youtube author, twitter author, etc. or regular domain for non-media links)
|
||||
|
||||
@@ -36,6 +46,33 @@ then remove the submission
|
||||
|
||||
## Comment-based behavior
|
||||
|
||||
### [Remove comment if the user has posted the same comment 4 or more times in a row](/docs/examples/subredditReady/commentSpam.json5)
|
||||
### Remove comment if the user has posted the same comment 4 or more times in a row
|
||||
|
||||
[YAML](/docs/examples/subredditReady/commentSpam.yaml) | [JSON](/docs/examples/subredditReady/commentSpam.json5)
|
||||
|
||||
If the user made the same comment (with some fuzzy matching) 4 or more times in a row in the past (50 activities or 6 months) then remove the comment.
|
||||
|
||||
### Remove comment if it is discord invite link spam
|
||||
|
||||
[YAML](/docs/examples/subredditReady/discordSpam.yaml) | [JSON](/docs/examples/subredditReady/discordSpam.json5)
|
||||
|
||||
This rule goes a step further than automod can by being more discretionary about how it handles this type of spam.
|
||||
|
||||
* Remove the comment and **ban a user** if:
|
||||
* Comment being checked contains **only** a discord link (no other text) AND
|
||||
* Discord links appear **anywhere** in three or more of the last 10 comments the Author has made
|
||||
|
||||
otherwise...
|
||||
|
||||
* Remove the comment if:
|
||||
* Comment being checked contains **only** a discord link (no other text) OR
|
||||
* Comment contains a discord link **anywhere** AND
|
||||
* Discord links appear **anywhere** in three or more of the last 10 comments the Author has made
|
||||
|
||||
Using these checks ContextMod can more easily distinguish between these use cases for a user commenting with a discord link:
|
||||
|
||||
* actual spammers who only spam a discord link
|
||||
* users who may comment with a link but have context for it either in the current comment or in their history
|
||||
* users who many comment with a link but it's a one-off event (no other links historically)
|
||||
|
||||
Additionally, you could modify both/either of these checks to not remove one-off discord link comments but still remove if the user has a historical trend for spamming links
|
||||
|
||||
25
docs/examples/subredditReady/commentSpam.yaml
Normal file
25
docs/examples/subredditReady/commentSpam.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
# Stop users who spam the same comment many times
|
||||
- name: low xp comment spam
|
||||
description: X-posted comment >=4x
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- name: xPostLow
|
||||
kind: repeatActivity
|
||||
# number of "non-repeat" comments allowed between "repeat comments"
|
||||
gapAllowance: 2
|
||||
# greater or more than 4 repeat comments triggers this rule
|
||||
threshold: '>= 4'
|
||||
# retrieve either last 50 comments or 6 months' of history, whichever is less
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: 'Remove => Posted same comment {{rules.xpostlow.largestRepeat}}x times'
|
||||
- kind: remove
|
||||
enable: true
|
||||
48
docs/examples/subredditReady/crosspostSpam.yaml
Normal file
48
docs/examples/subredditReady/crosspostSpam.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
# stop users who post low-effort, crossposted spam submissions
|
||||
#
|
||||
# Remove a SUBMISSION if the user has crossposted it at least 4 times in recent history AND
|
||||
# less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
- name: low xp spam and engagement
|
||||
description: X-posted 4x and low comment engagement
|
||||
kind: submission
|
||||
itemIs:
|
||||
- removed: false
|
||||
condition: AND
|
||||
rules:
|
||||
- name: xPostLow
|
||||
kind: repeatActivity
|
||||
gapAllowance: 2
|
||||
threshold: '>= 4'
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
- name: lowOrOpComm
|
||||
kind: history
|
||||
criteriaJoin: OR
|
||||
criteria:
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: < 50%
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: '> 40% OP'
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: >-
|
||||
Remove=>{{rules.xpostlow.largestRepeat}} X-P =>
|
||||
{{rules.loworopcomm.thresholdSummary}}
|
||||
- kind: remove
|
||||
enable: true
|
||||
- kind: comment
|
||||
enable: true
|
||||
content: >-
|
||||
Your submission has been removed because you cross-posted it
|
||||
{{rules.xpostlow.largestRepeat}} times and you have very low
|
||||
engagement outside of making submissions
|
||||
distinguish: true
|
||||
75
docs/examples/subredditReady/discordSpam.json5
Normal file
75
docs/examples/subredditReady/discordSpam.json5
Normal file
@@ -0,0 +1,75 @@
|
||||
{
|
||||
"polling": ["newComm"],
|
||||
"checks": [
|
||||
{
|
||||
"name": "ban discord only spammer",
|
||||
"description": "ban a user who spams only a discord link many times historically",
|
||||
"kind": "comment",
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
"linkOnlySpam",
|
||||
"linkAnywhereHistoricalSpam",
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
},
|
||||
{
|
||||
"kind": "ban",
|
||||
"content": "spamming discord links"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "remove discord spam",
|
||||
"description": "remove comments from users who only link to discord or mention discord link many times historically",
|
||||
"kind": "comment",
|
||||
"condition": "OR",
|
||||
"rules": [
|
||||
{
|
||||
"name": "linkOnlySpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "only link",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+)$/i",
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"condition": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"name": "linkAnywhereSpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "contains link anywhere",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "linkAnywhereHistoricalSpam",
|
||||
"kind": "regex",
|
||||
"criteria": [
|
||||
{
|
||||
"name": "contains links anywhere historically",
|
||||
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
|
||||
"totalMatchThreshold": ">= 3",
|
||||
"lookAt": "comments",
|
||||
"window": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"kind": "remove"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
46
docs/examples/subredditReady/discordSpam.yaml
Normal file
46
docs/examples/subredditReady/discordSpam.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
polling:
|
||||
- newComm
|
||||
checks:
|
||||
- name: ban discord only spammer
|
||||
description: ban a user who spams only a discord link many times historically
|
||||
kind: comment
|
||||
condition: AND
|
||||
rules:
|
||||
- linkOnlySpam
|
||||
- linkAnywhereHistoricalSpam
|
||||
actions:
|
||||
- kind: remove
|
||||
- kind: ban
|
||||
content: spamming discord links
|
||||
- name: remove discord spam
|
||||
description: >-
|
||||
remove comments from users who only link to discord or mention discord
|
||||
link many times historically
|
||||
kind: comment
|
||||
condition: OR
|
||||
rules:
|
||||
- name: linkOnlySpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: only link
|
||||
# single quotes are required to escape special characters
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+)$/i'
|
||||
- condition: AND
|
||||
rules:
|
||||
- name: linkAnywhereSpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: contains link anywhere
|
||||
# single quotes are required to escape special characters
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
|
||||
- name: linkAnywhereHistoricalSpam
|
||||
kind: regex
|
||||
criteria:
|
||||
- name: contains links anywhere historically
|
||||
# single quotes are required to escape special characters
|
||||
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
|
||||
totalMatchThreshold: '>= 3'
|
||||
lookAt: comments
|
||||
window: 10
|
||||
actions:
|
||||
- kind: remove
|
||||
84
docs/examples/subredditReady/freeKarmaOrCrosspostSpam.yaml
Normal file
84
docs/examples/subredditReady/freeKarmaOrCrosspostSpam.yaml
Normal file
@@ -0,0 +1,84 @@
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
# stop users who post low-effort, crossposted spam submissions
|
||||
#
|
||||
# Remove a SUBMISSION if the user has crossposted it at least 4 times in recent history AND
|
||||
# less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
- name: remove on low xp spam and engagement
|
||||
description: X-posted 4x and low comment engagement
|
||||
kind: submission
|
||||
itemIs:
|
||||
- removed: false
|
||||
condition: AND
|
||||
rules:
|
||||
- name: xPostLow
|
||||
kind: repeatActivity
|
||||
gapAllowance: 2
|
||||
threshold: '>= 4'
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
- name: lowOrOpComm
|
||||
kind: history
|
||||
criteriaJoin: OR
|
||||
criteria:
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: < 50%
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: '> 40% OP'
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: >-
|
||||
Remove=>{{rules.xpostlow.largestRepeat}} X-P =>
|
||||
{{rules.loworopcomm.thresholdSummary}}
|
||||
- kind: remove
|
||||
enable: false
|
||||
- kind: comment
|
||||
enable: true
|
||||
content: >-
|
||||
Your submission has been removed because you cross-posted it
|
||||
{{rules.xpostlow.largestRepeat}} times and you have very low
|
||||
engagement outside of making submissions
|
||||
distinguish: true
|
||||
dryRun: true
|
||||
# Remove submissions from users who have recent activity in freekarma subs within the last 50 activities or 6 months (whichever is less)
|
||||
- name: freekarma removal
|
||||
description: Remove submission if user has used freekarma sub recently
|
||||
kind: submission
|
||||
itemIs:
|
||||
- removed: false
|
||||
condition: AND
|
||||
rules:
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
useSubmissionAsReference: false
|
||||
thresholds:
|
||||
- subreddits:
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- KarmaStore
|
||||
- promote
|
||||
- shamelessplug
|
||||
- upvote
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: 'Remove=> {{rules.newtube.totalCount}} activities in freekarma subs'
|
||||
- kind: remove
|
||||
enable: false
|
||||
- kind: comment
|
||||
enable: true
|
||||
content: >-
|
||||
Your submission has been removed because you have recent activity in
|
||||
'freekarma' subs
|
||||
distinguish: true
|
||||
dryRun: true
|
||||
35
docs/examples/subredditReady/freekarma.yaml
Normal file
35
docs/examples/subredditReady/freekarma.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
# Remove submissions from users who have recent activity in freekarma subs within the last 50 activities or 6 months (whichever is less)
|
||||
- name: freekarma removal
|
||||
description: Remove submission if user has used freekarma sub recently
|
||||
kind: submission
|
||||
itemIs:
|
||||
- removed: false
|
||||
condition: AND
|
||||
rules:
|
||||
- name: freekarma
|
||||
kind: recentActivity
|
||||
window:
|
||||
count: 50
|
||||
duration: 6 months
|
||||
useSubmissionAsReference: false
|
||||
thresholds:
|
||||
- subreddits:
|
||||
- FreeKarma4U
|
||||
- FreeKarma4You
|
||||
- KarmaStore
|
||||
- upvote
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: 'Remove=> {{rules.newtube.totalCount}} activities in freekarma subs'
|
||||
- kind: remove
|
||||
enable: true
|
||||
- kind: comment
|
||||
enable: false
|
||||
content: >-
|
||||
Your submission has been removed because you have recent activity in
|
||||
'freekarma' subs
|
||||
distinguish: true
|
||||
71
docs/examples/subredditReady/selfPromo.yaml
Normal file
71
docs/examples/subredditReady/selfPromo.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
polling:
|
||||
- unmoderated
|
||||
checks:
|
||||
#
|
||||
# Stop users who make link submissions with a self-promotional agenda (with reddit's suggested 10% rule)
|
||||
# https://www.reddit.com/wiki/selfpromotion#wiki_guidelines_for_self-promotion_on_reddit
|
||||
#
|
||||
# Remove a SUBMISSION if the link comprises more than or equal to 10% of users history (100 activities or 6 months) OR
|
||||
#
|
||||
# if link comprises 10% of submission history (100 activities or 6 months)
|
||||
# AND less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
|
||||
#
|
||||
- name: Self-promo all AND low engagement
|
||||
description: Self-promo is >10% for all or just sub and low comment engagement
|
||||
kind: submission
|
||||
condition: OR
|
||||
rules:
|
||||
- name: attr
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '>= 10%'
|
||||
window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
domains:
|
||||
- 'AGG:SELF'
|
||||
- condition: AND
|
||||
rules:
|
||||
- name: attrsub
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '>= 10%'
|
||||
thresholdOn: submissions
|
||||
window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
domains:
|
||||
- 'AGG:SELF'
|
||||
- name: lowOrOpComm
|
||||
kind: history
|
||||
criteriaJoin: OR
|
||||
criteria:
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: < 50%
|
||||
- window:
|
||||
count: 100
|
||||
duration: 6 months
|
||||
comment: '> 40% OP'
|
||||
actions:
|
||||
- kind: report
|
||||
enable: true
|
||||
content: >-
|
||||
{{rules.attr.largestPercent}}{{rules.attrsub.largestPercent}} of
|
||||
{{rules.attr.activityTotal}}{{rules.attrsub.activityTotal}} items
|
||||
({{rules.attr.window}}{{rules.attrsub.window}}){{#rules.loworopcomm.thresholdSummary}}
|
||||
=>
|
||||
{{rules.loworopcomm.thresholdSummary}}{{/rules.loworopcomm.thresholdSummary}}
|
||||
- kind: remove
|
||||
enable: false
|
||||
- kind: comment
|
||||
enable: true
|
||||
content: >-
|
||||
Your submission has been removed it comprises 10% or more of your
|
||||
recent history
|
||||
({{rules.attr.largestPercent}}{{rules.attrsub.largestPercent}}). This
|
||||
is against [reddit's self promotional
|
||||
guidelines.](https://www.reddit.com/wiki/selfpromotion#wiki_guidelines_for_self-promotion_on_reddit)
|
||||
distinguish: true
|
||||
dryRun: true
|
||||
@@ -14,7 +14,7 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FUserNoteCr
|
||||
|
||||
### Examples
|
||||
|
||||
* [Do not tag user with Good User note](/docs/examples/userNotes/usernoteFilter.json5)
|
||||
* Do not tag user with Good User note [JSON](/docs/examples/userNotes/usernoteFilter.json5) | [YAML](/docs/examples/userNotes/usernoteFilter.yaml)
|
||||
|
||||
## Action
|
||||
|
||||
@@ -23,4 +23,4 @@ A User Note can also be added to the Author of a Submission or Comment with the
|
||||
|
||||
### Examples
|
||||
|
||||
* [Add note on user doing self promotion](/docs/examples/userNotes/usernoteSP.json5)
|
||||
* Add note on user doing self promotion [JSON](/docs/examples/userNotes/usernoteSP.json5) | [YAML](/docs/examples/userNotes/usernoteSP.yaml)
|
||||
|
||||
27
docs/examples/userNotes/usernoteFilter.yaml
Normal file
27
docs/examples/userNotes/usernoteFilter.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
checks:
|
||||
- name: Self Promo Activities
|
||||
description: Tag SP only if user does not have good contributor user note
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
kind: submission
|
||||
rules:
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
author:
|
||||
exclude:
|
||||
# the key of the usernote type to look for https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
# rule will not run if current usernote on Author is of type 'gooduser'
|
||||
- type: gooduser
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
actions:
|
||||
- kind: usernote
|
||||
# the key of usernote type
|
||||
# https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
type: spamwarn
|
||||
# content is mustache templated
|
||||
content: >-
|
||||
Self Promotion: {{rules.attr10all.titlesDelim}}
|
||||
{{rules.attr10sub.largestPercent}}%
|
||||
23
docs/examples/userNotes/usernoteSP.yaml
Normal file
23
docs/examples/userNotes/usernoteSP.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
checks:
|
||||
- name: Self Promo Activities
|
||||
# check will run on a new submission in your subreddit and look at the Author of that submission
|
||||
description: >-
|
||||
Check if any of Author's aggregated submission origins are >10% of entire
|
||||
history
|
||||
kind: submission
|
||||
rules:
|
||||
- name: attr10all
|
||||
kind: attribution
|
||||
criteria:
|
||||
- threshold: '> 10%'
|
||||
window: 90 days
|
||||
- threshold: '> 10%'
|
||||
window: 100
|
||||
actions:
|
||||
- kind: usernote
|
||||
# the key of usernote type
|
||||
# https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
|
||||
type: spamwarn
|
||||
content: >-
|
||||
Self Promotion: {{rules.attr10all.titlesDelim}}
|
||||
{{rules.attr10sub.largestPercent}}%
|
||||
@@ -14,8 +14,8 @@ This getting started guide is for **reddit moderators** -- that is, someone who
|
||||
|
||||
Before continuing with this guide you should first make sure you understand how a ContextMod works. Please review this documentation:
|
||||
|
||||
* [How It Works](/docs#how-it-works)
|
||||
* [Core Concepts](/docs#concepts)
|
||||
* [How It Works](/docs/README.md#how-it-works)
|
||||
* [Core Concepts](/docs/README.md#concepts)
|
||||
|
||||
# Choose A Bot
|
||||
|
||||
@@ -36,15 +36,16 @@ If the Operator has communicated that **you should add a bot they control as a m
|
||||
|
||||
___
|
||||
|
||||
Ensure that you are in communication with the **operator** for this bot. The bot **will not automatically accept a moderator invitation,** it must be manually done by the bot operator. This is an intentional barrier to ensure moderators and the operator are familiar with their respective needs and have some form of trust.
|
||||
Ensure that you are in communication with the **operator** of this bot. The bot **will only accept a moderator invitation if your subreddit has been whitelisted by the operator.** This is an intentional barrier to ensure moderators and the operator are familiar with their respective needs and have some form of trust.
|
||||
|
||||
Now invite the bot to moderate your subreddit. The bot should have at least these permissions:
|
||||
|
||||
* Manage Users
|
||||
* Manage Posts and Comments
|
||||
* Manage Flair
|
||||
|
||||
Additionally, the bot must have the **Manage Wiki Pages** permission if you plan to use [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes). If you are not planning on using this feature and do not want the bot to have this permission then you **must** ensure the bot has visibility to the configuration wiki page (detailed below).
|
||||
* Manage Wiki Pages
|
||||
* Required to read the moderator-only visible wiki page used to configure the bot
|
||||
* Required to read/write to [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes)
|
||||
|
||||
## Bring Your Own Bot (BYOB)
|
||||
|
||||
@@ -60,7 +61,7 @@ If the operator has communicated that **they want to use a bot you control** thi
|
||||
|
||||
**Cons:**
|
||||
|
||||
* More setup required for both moderators and operators
|
||||
* You must have access to the credentials for the reddit account (bot)
|
||||
|
||||
___
|
||||
|
||||
@@ -72,15 +73,28 @@ Review the information shown on the invite link webpage and then follow the dire
|
||||
|
||||
# Configuring the Bot
|
||||
|
||||
The bot's behavior is defined using a configuration, like automoderator, that is stored in the **wiki** of each subreddit it moderates.
|
||||
|
||||
The default location for this page is at `https://old.reddit.com/r/YOURSUBERDDIT/wiki/botconfig/contextbot`
|
||||
|
||||
## Setup wiki page
|
||||
|
||||
The bot automatically tries to create its configuration wiki page. You can find the result of this in the log for your subreddit in the web interface.
|
||||
|
||||
If this fails for some reason you can create the wiki page through the web interface by navigating to your subreddit's tab, opening the [built-in editor (click **View**)](/docs/screenshots/configBox.png), and following the directions in **Create configuration for...** link found there.
|
||||
|
||||
If neither of the above approaches work, or you do not wish to use the web interface, expand the section below for directions on how to manually setup the wiki page:
|
||||
|
||||
<details>
|
||||
|
||||
* Visit the wiki page of the subreddit you want the bot to moderate
|
||||
* The default location the bot checks for a configuration is at `https://old.reddit.com/r/YOURSUBERDDIT/wiki/botconfig/contextbot`
|
||||
* If the page does not exist create it
|
||||
* Ensure the wiki page visibility is restricted
|
||||
* On the wiki page click **settings** (**Page settings** in new reddit)
|
||||
* Check the box for **Only mods may edit and view** and then **save**
|
||||
* Alternatively, if you did not give the bot the **Manage Wiki Pages** permission then add it to the **allow users to edit page** setting
|
||||
|
||||
</details>
|
||||
|
||||
## Procure a configuration
|
||||
|
||||
@@ -94,25 +108,46 @@ Visit the [Examples](https://github.com/FoxxMD/context-mod/tree/master/docs/exam
|
||||
|
||||
After you have found a configuration to use as a starting point:
|
||||
|
||||
* In a new tab open the github page for the configuration you want ([example](/docs/examples/repeatActivity/crosspostSpamming.json5))
|
||||
* Click the **Raw** button, then select all and copy all of the text to your clipboard.
|
||||
* Copy the URL for the configuration file EX `https://github.com/FoxxMD/context-mod/blob/master/docs/examples/subredditReady/freekarma.json5` and either:
|
||||
* (Easiest) **Load** it into your [subreddit's built-in editor](#using-the-built-in-editor) and **Save**
|
||||
* or on the file's page, click the **Raw** button, select all and copy to your clipboard, and [manually save to your wiki page](#manually-saving)
|
||||
|
||||
### Build Your Own Config
|
||||
|
||||
Additionally, you can use [this schema editor](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) to build your configuration. The editor features a ton of handy features:
|
||||
CM comes equipped with a [configuration explorer](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) to help you see all available options, with descriptions and examples, that can be used in your configuration.
|
||||
|
||||
* fully annotated configuration data/structure
|
||||
* generated examples in json/yaml
|
||||
* built-in editor that automatically validates your config
|
||||
To create or edit a configuration you should use **CM's buit-in editor** which features:
|
||||
* syntax validation and formatting
|
||||
* full configuration validation with error highlighting, hints, and fixes
|
||||
* hover over properties to see documentation and examples
|
||||
|
||||
PROTIP: Find an example config to use as a starting point and then build on it using the editor.
|
||||
To use the editor either:
|
||||
* [use your subreddit's built-in editor](#using-the-built-in-editor)
|
||||
* or use the public editor at https://cm.foxxmd.dev/config
|
||||
|
||||
PROTIP: Find an [example config](#using-an-example-config) to use as a starting point and then build on it using the editor.
|
||||
|
||||
## Saving Your Configuration
|
||||
|
||||
* Open the wiki page you created in the [previous step](#setup-wiki-page) and click **edit**
|
||||
### Using the built-in Editor
|
||||
|
||||
In the web interface each subreddit's tab has access to the built-in editor. Use this built-in editor to automatically create, load, or save the configuration for that subreddit's wiki.
|
||||
|
||||
* Visit the tab for the subreddit you want to edit the configuration of
|
||||
* Open the [built-in editor by click **View**](/docs/screenshots/configBox.png)
|
||||
* Edit your configuration
|
||||
* Follow the directions on the **Save to r/..** link found at the top of the editor to automatically save your configuration
|
||||
|
||||
### Manually Saving
|
||||
|
||||
<details>
|
||||
|
||||
* Open the wiki page you created in the [wiki setup step](#setup-wiki-page) and click **edit**
|
||||
* Copy-paste your configuration into the wiki text box
|
||||
* Save the edited wiki page
|
||||
|
||||
</details>
|
||||
|
||||
___
|
||||
|
||||
The bot automatically checks for new configurations on your wiki page every 5 minutes. If your operator has the web interface accessible you may login there and force the config to update on your subreddit.
|
||||
|
||||
@@ -50,6 +50,18 @@ tsc -p .
|
||||
### [Heroku Quick Deploy](https://heroku.com/about)
|
||||
[](https://dashboard.heroku.com/new?template=https://github.com/FoxxMD/context-mod)
|
||||
|
||||
This template provides a **web** and **worker** dyno for heroku.
|
||||
|
||||
* **Web** -- Will run the bot **and** the web interface for ContextMod.
|
||||
* **Worker** -- Will run **just** the bot.
|
||||
|
||||
Be aware that Heroku's [free dyno plan](https://devcenter.heroku.com/articles/free-dyno-hours#dyno-sleeping) enacts some limits:
|
||||
|
||||
* A **Web** dyno will go to sleep (pause) after 30 minutes without web activity -- so your bot will ALSO go to sleep at this time
|
||||
* The **Worker** dyno **will not** go to sleep but you will NOT be able to access the web interface. You can, however, still see how Cm is running by reading the logs for the dyno.
|
||||
|
||||
If you want to use a free dyno it is recommended you perform first-time setup (bot authentication and configuration, testing, etc...) with the **Web** dyno, then SWITCH to a **Worker** dyno so it can run 24/7.
|
||||
|
||||
# Bot Authentication
|
||||
|
||||
Next you need to create a bot and authenticate it with Reddit. Follow the [bot authentication guide](/docs/botAuthentication.md) to complete this step.
|
||||
|
||||
@@ -7,6 +7,7 @@ ContextMod supports comparing image content, for the purpose of detecting duplic
|
||||
|
||||
To enable comparisons reference the example below (at the top-level of your rule) and configure as needed:
|
||||
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"name": "ruleWithImageDetection",
|
||||
@@ -31,9 +32,18 @@ To enable comparisons reference the example below (at the top-level of your rule
|
||||
},
|
||||
//
|
||||
// And above ^^^
|
||||
...
|
||||
//...
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
name: ruleWithImageDetection
|
||||
kind: recentActivity
|
||||
enable: true
|
||||
threshold: 5
|
||||
fetchBehavior: extension
|
||||
|
||||
```
|
||||
|
||||
**Perceptual Hashing** (`hash`) and **Pixel Comparisons** (`pixel`) may be used at the same time. Refer to the documentation below to see how they interact.
|
||||
|
||||
@@ -114,10 +124,12 @@ To further configure hashing refer to this code block:
|
||||
// the higher the bits the more accurate the comparison
|
||||
//
|
||||
// NOTE: Hashes of different sizes (bits) cannot be compared. If you are caching hashes make sure all rules where results may be shared use the same bit count to ensure hashes can be compared. Otherwise hashes will be recomputed.
|
||||
"bits": 32, // default is 32 if not defined
|
||||
"bits": 32,
|
||||
// default is 32 if not defined
|
||||
//
|
||||
// number of seconds to cache an image hash
|
||||
"ttl": 60, // default is 60 if not defined
|
||||
"ttl": 60,
|
||||
// default is 60 if not defined
|
||||
//
|
||||
// "High Confidence" Threshold
|
||||
// If the difference in comparison is equal to or less than this number the images are considered the same and pixel comparison WILL NOT occur
|
||||
@@ -139,8 +151,22 @@ To further configure hashing refer to this code block:
|
||||
//
|
||||
// And above ^^^
|
||||
//"pixel": {...}
|
||||
},
|
||||
}
|
||||
//...
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
name: ruleWithImageDetectionAndConfiguredHashing
|
||||
kind: recentActivity
|
||||
imageDetection:
|
||||
enable: true
|
||||
hash:
|
||||
enable: true
|
||||
bits: 32
|
||||
ttl: 60
|
||||
hardThreshold: 5
|
||||
softThreshold: 0
|
||||
```
|
||||
|
||||
## Pixel Comparison
|
||||
@@ -184,18 +210,28 @@ To configure pixel comparisons refer to this code block:
|
||||
|
||||
```json5
|
||||
{
|
||||
"name": "ruleWithImageDetectionAndPixelEnabled",
|
||||
"kind": "recentActivity",
|
||||
"imageDetection": {
|
||||
//"hash": {...}
|
||||
"pixel": {
|
||||
// enable or disable pixel comparisons (disabled by default)
|
||||
"enable": true,
|
||||
// if the comparison difference percentage is equal to or less than this value the images are considered the same
|
||||
//
|
||||
// if not defined the value from imageDetection.threshold will be used
|
||||
"threshold": 5
|
||||
}
|
||||
},
|
||||
//...
|
||||
"name": "ruleWithImageDetectionAndPixelEnabled",
|
||||
"kind": "recentActivity",
|
||||
"imageDetection": {
|
||||
//"hash": {...}
|
||||
"pixel": {
|
||||
// enable or disable pixel comparisons (disabled by default)
|
||||
"enable": true,
|
||||
// if the comparison difference percentage is equal to or less than this value the images are considered the same
|
||||
//
|
||||
// if not defined the value from imageDetection.threshold will be used
|
||||
"threshold": 5
|
||||
}
|
||||
},
|
||||
//...
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
name: ruleWithImageDetectionAndPixelEnabled
|
||||
kind: recentActivity
|
||||
imageDetection:
|
||||
pixel:
|
||||
enable: true
|
||||
threshold: 5
|
||||
```
|
||||
|
||||
@@ -121,6 +121,16 @@ Below are examples of the minimum required config to run the application using a
|
||||
Using **FILE**
|
||||
<details>
|
||||
|
||||
YAML
|
||||
```yaml
|
||||
bots:
|
||||
- credentials:
|
||||
clientId: f4b4df1c7b2
|
||||
clientSecret: 34v5q1c56ub
|
||||
refreshToken: 34_f1w1v4
|
||||
accessToken: p75_1c467b2
|
||||
```
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"bots": [
|
||||
@@ -175,6 +185,11 @@ An example of using multiple configuration levels together IE all are provided t
|
||||
}
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
logging:
|
||||
level: debug
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
@@ -220,6 +235,30 @@ See the [Architecture Docs](/docs/serverClientArchitecture.md) for more informat
|
||||
|
||||
<details>
|
||||
|
||||
YAML
|
||||
```yaml
|
||||
bots:
|
||||
- credentials:
|
||||
clientId: f4b4df1c7b2
|
||||
clientSecret: 34v5q1c56ub
|
||||
refreshToken: 34_f1w1v4
|
||||
accessToken: p75_1c467b2
|
||||
web:
|
||||
credentials:
|
||||
clientId: f4b4df1c7b2
|
||||
clientSecret: 34v5q1c56ub
|
||||
redirectUri: 'http://localhost:8085/callback'
|
||||
clients:
|
||||
# server application running on this same CM instance
|
||||
- host: 'localhost:8095'
|
||||
secret: localSecret
|
||||
# a server application running somewhere else
|
||||
- host: 'mySecondContextMod.com:8095'
|
||||
secret: anotherSecret
|
||||
api:
|
||||
secret: localSecret
|
||||
```
|
||||
JSON
|
||||
```json5
|
||||
{
|
||||
"bots": [
|
||||
@@ -289,3 +328,14 @@ A caching object in the json configuration:
|
||||
}
|
||||
}
|
||||
```
|
||||
YAML
|
||||
```yaml
|
||||
provider:
|
||||
store: memory
|
||||
ttl: 60
|
||||
max: 500
|
||||
host: localhost
|
||||
port: 6379
|
||||
auth_pass: null
|
||||
db: 0
|
||||
```
|
||||
|
||||
BIN
docs/screenshots/configBox.png
Normal file
BIN
docs/screenshots/configBox.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 45 KiB |
29
heroku.Dockerfile
Normal file
29
heroku.Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM node:16-alpine3.14
|
||||
|
||||
ENV TZ=Etc/GMT
|
||||
|
||||
# vips required to run sharp library for image comparison
|
||||
RUN echo "http://dl-4.alpinelinux.org/alpine/v3.14/community" >> /etc/apk/repositories \
|
||||
&& apk --update add vips
|
||||
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
WORKDIR /usr/app
|
||||
|
||||
COPY package*.json ./
|
||||
COPY tsconfig.json .
|
||||
|
||||
RUN npm install
|
||||
|
||||
ADD . /usr/app
|
||||
|
||||
RUN npm run build
|
||||
|
||||
ENV NPM_CONFIG_LOGLEVEL debug
|
||||
|
||||
ARG log_dir=/home/node/logs
|
||||
RUN mkdir -p $log_dir
|
||||
VOLUME $log_dir
|
||||
ENV LOG_DIR=$log_dir
|
||||
|
||||
CMD [ "node", "src/index.js", "run", "all", "--port $PORT"]
|
||||
@@ -1,3 +1,4 @@
|
||||
build:
|
||||
docker:
|
||||
worker: Dockerfile
|
||||
web: heroku.Dockerfile
|
||||
worker: heroku.Dockerfile
|
||||
|
||||
3992
package-lock.json
generated
3992
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
10
package.json
10
package.json
@@ -7,7 +7,6 @@
|
||||
"test": "echo \"Error: no tests installed\" && exit 1",
|
||||
"build": "tsc",
|
||||
"start": "node src/index.js run",
|
||||
"guard": "ts-auto-guard src/JsonConfig.ts",
|
||||
"schema": "npm run -s schema-app & npm run -s schema-ruleset & npm run -s schema-rule & npm run -s schema-action & npm run -s schema-config",
|
||||
"schema-app": "typescript-json-schema tsconfig.json JSONConfig --out src/Schema/App.json --required --tsNodeRegister --refs",
|
||||
"schema-ruleset": "typescript-json-schema tsconfig.json RuleSetJson --out src/Schema/RuleSet.json --required --tsNodeRegister --refs",
|
||||
@@ -26,8 +25,10 @@
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@awaitjs/express": "^0.8.0",
|
||||
"@googleapis/youtube": "^2.0.0",
|
||||
"@stdlib/regexp-regexp": "^0.0.6",
|
||||
"ajv": "^7.2.4",
|
||||
"ansi-regex": ">=5.0.1",
|
||||
"async": "^3.2.0",
|
||||
"autolinker": "^3.14.3",
|
||||
"body-parser": "^1.19.0",
|
||||
@@ -57,7 +58,6 @@
|
||||
"leven": "^3.1.0",
|
||||
"lodash": "^4.17.21",
|
||||
"lru-cache": "^6.0.0",
|
||||
"monaco-editor": "^0.27.0",
|
||||
"mustache": "^4.2.0",
|
||||
"node-fetch": "^2.6.1",
|
||||
"normalize-url": "^6.1.0",
|
||||
@@ -70,15 +70,15 @@
|
||||
"pixelmatch": "^5.2.1",
|
||||
"pretty-print-json": "^1.0.3",
|
||||
"safe-stable-stringify": "^1.1.1",
|
||||
"set-random-interval": "^1.1.0",
|
||||
"snoostorm": "^1.5.2",
|
||||
"snoowrap": "^1.23.0",
|
||||
"socket.io": "^4.1.3",
|
||||
"string-similarity": "^4.0.4",
|
||||
"tcp-port-used": "^1.0.2",
|
||||
"triple-beam": "^1.3.0",
|
||||
"typescript": "^4.3.4",
|
||||
"webhook-discord": "^3.7.7",
|
||||
"winston": "FoxxMD/winston#fbab8de969ecee578981c77846156c7f43b5f01e",
|
||||
"winston": "github:FoxxMD/winston#fbab8de969ecee578981c77846156c7f43b5f01e",
|
||||
"winston-daily-rotate-file": "^4.5.5",
|
||||
"winston-duplex": "^0.1.1",
|
||||
"winston-transport": "^4.4.0",
|
||||
@@ -108,9 +108,9 @@
|
||||
"@types/passport-jwt": "^3.0.6",
|
||||
"@types/pixelmatch": "^5.2.4",
|
||||
"@types/sharp": "^0.29.2",
|
||||
"@types/string-similarity": "^4.0.0",
|
||||
"@types/tcp-port-used": "^1.0.0",
|
||||
"@types/triple-beam": "^1.3.2",
|
||||
"ts-auto-guard": "*",
|
||||
"ts-json-schema-generator": "^0.93.0",
|
||||
"typescript-json-schema": "^0.50.1"
|
||||
},
|
||||
|
||||
@@ -10,10 +10,11 @@ import ApproveAction, {ApproveActionConfig} from "./ApproveAction";
|
||||
import BanAction, {BanActionJson} from "./BanAction";
|
||||
import {MessageAction, MessageActionJson} from "./MessageAction";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import Snoowrap from "snoowrap";
|
||||
import {UserFlairAction, UserFlairActionJson} from './UserFlairAction';
|
||||
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
|
||||
|
||||
export function actionFactory
|
||||
(config: ActionJson, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Action {
|
||||
(config: ActionJson, logger: Logger, subredditName: string, resources: SubredditResources, client: ExtendedSnoowrap): Action {
|
||||
switch (config.kind) {
|
||||
case 'comment':
|
||||
return new CommentAction({...config as CommentActionJson, logger, subredditName, resources, client});
|
||||
@@ -25,6 +26,8 @@ export function actionFactory
|
||||
return new ReportAction({...config as ReportActionJson, logger, subredditName, resources, client});
|
||||
case 'flair':
|
||||
return new FlairAction({...config as FlairActionJson, logger, subredditName, resources, client});
|
||||
case 'userflair':
|
||||
return new UserFlairAction({...config as UserFlairActionJson, logger, subredditName, resources, client});
|
||||
case 'approve':
|
||||
return new ApproveAction({...config as ApproveActionConfig, logger, subredditName, resources, client});
|
||||
case 'usernote':
|
||||
|
||||
@@ -11,6 +11,7 @@ export class ApproveAction extends Action {
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const touchedEntities = [];
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
if (item.approved) {
|
||||
@@ -23,11 +24,12 @@ export class ApproveAction extends Action {
|
||||
}
|
||||
if (!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.approve();
|
||||
touchedEntities.push(await item.approve());
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
touchedEntities
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ export class BanAction extends Action {
|
||||
const renderedBody = content === undefined ? undefined : await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
const renderedContent = renderedBody === undefined ? undefined : `${renderedBody}${await this.resources.generateFooter(item, this.footer)}`;
|
||||
|
||||
const touchedEntities = [];
|
||||
let banPieces = [];
|
||||
banPieces.push(`Message: ${renderedContent === undefined ? 'None' : `${renderedContent.length > 100 ? `\r\n${renderedContent}` : renderedContent}`}`);
|
||||
banPieces.push(`Reason: ${this.reason || 'None'}`);
|
||||
@@ -50,18 +51,20 @@ export class BanAction extends Action {
|
||||
// @ts-ignore
|
||||
const fetchedSub = await item.subreddit.fetch();
|
||||
const fetchedName = await item.author.name;
|
||||
await fetchedSub.banUser({
|
||||
const bannedUser = await fetchedSub.banUser({
|
||||
name: fetchedName,
|
||||
banMessage: renderedContent === undefined ? undefined : renderedContent,
|
||||
banReason: this.reason,
|
||||
banNote: this.note,
|
||||
duration: this.duration
|
||||
});
|
||||
touchedEntities.push(bannedUser);
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: `Banned ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`
|
||||
result: `Banned ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`,
|
||||
touchedEntities
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,16 +51,19 @@ export class CommentAction extends Action {
|
||||
result: 'Cannot comment because Item is archived'
|
||||
};
|
||||
}
|
||||
const touchedEntities = [];
|
||||
let reply: Comment;
|
||||
if(!dryRun) {
|
||||
// @ts-ignore
|
||||
reply = await item.reply(renderedContent);
|
||||
touchedEntities.push(reply);
|
||||
}
|
||||
if (this.lock) {
|
||||
if (!dryRun) {
|
||||
// snoopwrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
await item.lock();
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
}
|
||||
if (this.distinguish && !dryRun) {
|
||||
@@ -78,7 +81,8 @@ export class CommentAction extends Action {
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: `${modifierStr}${this.lock ? ' - Locked Author\'s Activity - ' : ''}${truncateStringToLength(100)(body)}`
|
||||
result: `${modifierStr}${this.lock ? ' - Locked Author\'s Activity - ' : ''}${truncateStringToLength(100)(body)}`,
|
||||
touchedEntities,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ export class LockAction extends Action {
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const touchedEntities = [];
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
if (item.locked) {
|
||||
@@ -25,10 +26,12 @@ export class LockAction extends Action {
|
||||
//snoowrap typing issue, thinks comments can't be locked
|
||||
// @ts-ignore
|
||||
await item.lock();
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
return {
|
||||
dryRun,
|
||||
success: true
|
||||
success: true,
|
||||
touchedEntities
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,13 +58,13 @@ export class MessageAction extends Action {
|
||||
if(this.to !== undefined) {
|
||||
// parse to value
|
||||
try {
|
||||
const entityData = parseRedditEntity(this.to);
|
||||
const entityData = parseRedditEntity(this.to, 'user');
|
||||
if(entityData.type === 'user') {
|
||||
recipient = entityData.name;
|
||||
} else {
|
||||
recipient = `/r/${entityData.name}`;
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error(`'to' field for message was not in a valid format. See ${REDDIT_ENTITY_REGEX_URL} for valid examples`);
|
||||
this.logger.error(err);
|
||||
err.logged = true;
|
||||
|
||||
@@ -12,6 +12,7 @@ export class RemoveAction extends Action {
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
const touchedEntities = [];
|
||||
// issue with snoowrap typings, doesn't think prop exists on Submission
|
||||
// @ts-ignore
|
||||
if (activityIsRemoved(item)) {
|
||||
@@ -24,11 +25,13 @@ export class RemoveAction extends Action {
|
||||
if (!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.remove();
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
touchedEntities
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,15 +29,20 @@ export class ReportAction extends Action {
|
||||
const renderedContent = await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
this.logger.verbose(`Contents:\r\n${renderedContent}`);
|
||||
const truncatedContent = reportTrunc(renderedContent);
|
||||
const touchedEntities = [];
|
||||
if(!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.report({reason: truncatedContent});
|
||||
// due to reddit not updating this in response (maybe)?? just increment stale activity
|
||||
item.num_reports++;
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: truncatedContent
|
||||
result: truncatedContent,
|
||||
touchedEntities
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,23 @@
|
||||
import {SubmissionActionConfig} from "./index";
|
||||
import Action, {ActionJson, ActionOptions} from "../index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {RuleResult} from "../../Rule";
|
||||
import {ActionProcessResult} from "../../Common/interfaces";
|
||||
import Submission from 'snoowrap/dist/objects/Submission';
|
||||
import Comment from 'snoowrap/dist/objects/Comment';
|
||||
|
||||
export class FlairAction extends Action {
|
||||
text: string;
|
||||
css: string;
|
||||
flair_template_id: string;
|
||||
|
||||
constructor(options: FlairActionOptions) {
|
||||
super(options);
|
||||
if (options.text === undefined && options.css === undefined) {
|
||||
throw new Error('Must define either text or css on FlairAction');
|
||||
if (options.text === undefined && options.css === undefined && options.flair_template_id === undefined) {
|
||||
throw new Error('Must define either text+css or flair_template_id on FlairAction');
|
||||
}
|
||||
this.text = options.text || '';
|
||||
this.css = options.css || '';
|
||||
this.flair_template_id = options.flair_template_id || '';
|
||||
}
|
||||
|
||||
getKind() {
|
||||
@@ -34,8 +37,12 @@ export class FlairAction extends Action {
|
||||
this.logger.verbose(flairSummary);
|
||||
if (item instanceof Submission) {
|
||||
if(!this.dryRun) {
|
||||
// @ts-ignore
|
||||
await item.assignFlair({text: this.text, cssClass: this.css})
|
||||
if (this.flair_template_id) {
|
||||
await item.selectFlair({flair_template_id: this.flair_template_id}).then(() => {});
|
||||
} else {
|
||||
await item.assignFlair({text: this.text, cssClass: this.css}).then(() => {});
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
this.logger.warn('Cannot flair Comment');
|
||||
@@ -60,12 +67,16 @@ export class FlairAction extends Action {
|
||||
export interface FlairActionConfig extends SubmissionActionConfig {
|
||||
/**
|
||||
* The text of the flair to apply
|
||||
* */
|
||||
* */
|
||||
text?: string,
|
||||
/**
|
||||
* The text of the css class of the flair to apply
|
||||
* */
|
||||
css?: string,
|
||||
/**
|
||||
* Flair template ID to assign
|
||||
* */
|
||||
flair_template_id?: string,
|
||||
}
|
||||
|
||||
export interface FlairActionOptions extends FlairActionConfig,ActionOptions {
|
||||
@@ -76,5 +87,5 @@ export interface FlairActionOptions extends FlairActionConfig,ActionOptions {
|
||||
* Flair the Submission
|
||||
* */
|
||||
export interface FlairActionJson extends FlairActionConfig, ActionJson {
|
||||
kind: 'flair'
|
||||
kind: 'flair'
|
||||
}
|
||||
|
||||
109
src/Action/UserFlairAction.ts
Normal file
109
src/Action/UserFlairAction.ts
Normal file
@@ -0,0 +1,109 @@
|
||||
import Action, {ActionConfig, ActionJson, ActionOptions} from './index';
|
||||
import {Comment, RedditUser, Submission} from 'snoowrap';
|
||||
import {RuleResult} from '../Rule';
|
||||
import {ActionProcessResult} from '../Common/interfaces';
|
||||
|
||||
export class UserFlairAction extends Action {
|
||||
text?: string;
|
||||
css?: string;
|
||||
flair_template_id?: string;
|
||||
|
||||
constructor(options: UserFlairActionOptions) {
|
||||
super(options);
|
||||
|
||||
this.text = options.text === null || options.text === '' ? undefined : options.text;
|
||||
this.css = options.css === null || options.text === '' ? undefined : options.text;
|
||||
this.flair_template_id = options.flair_template_id === null || options.flair_template_id === '' ? undefined : options.flair_template_id;
|
||||
}
|
||||
|
||||
getKind() {
|
||||
return 'User Flair';
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
|
||||
const dryRun = runtimeDryrun || this.dryRun;
|
||||
let flairParts = [];
|
||||
|
||||
if (this.flair_template_id !== undefined) {
|
||||
flairParts.push(`Flair template ID: ${this.flair_template_id}`)
|
||||
if(this.text !== undefined || this.css !== undefined) {
|
||||
this.logger.warn('Text/CSS properties will be ignored since a flair template is specified');
|
||||
}
|
||||
} else {
|
||||
if (this.text !== undefined) {
|
||||
flairParts.push(`Text: ${this.text}`);
|
||||
}
|
||||
if (this.css !== undefined) {
|
||||
flairParts.push(`CSS: ${this.css}`);
|
||||
}
|
||||
}
|
||||
|
||||
const flairSummary = flairParts.length === 0 ? 'Unflair user' : flairParts.join(' | ');
|
||||
this.logger.verbose(flairSummary);
|
||||
|
||||
if (!this.dryRun) {
|
||||
if (this.flair_template_id !== undefined) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.assignUserFlairByTemplateId({
|
||||
subredditName: item.subreddit.display_name,
|
||||
flairTemplateId: this.flair_template_id,
|
||||
username: item.author.name,
|
||||
});
|
||||
} catch (err: any) {
|
||||
this.logger.error('Either the flair template ID is incorrect or you do not have permission to access it.');
|
||||
throw err;
|
||||
}
|
||||
} else if (this.text === undefined && this.css === undefined) {
|
||||
// @ts-ignore
|
||||
await item.subreddit.deleteUserFlair(item.author.name);
|
||||
} else {
|
||||
// @ts-ignore
|
||||
await item.author.assignFlair({
|
||||
subredditName: item.subreddit.display_name,
|
||||
cssClass: this.css,
|
||||
text: this.text,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: flairSummary,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flair the Author of an Activity
|
||||
*
|
||||
* Leave all properties blank or null to remove a User's existing flair
|
||||
* */
|
||||
export interface UserFlairActionConfig extends ActionConfig {
|
||||
/**
|
||||
* The text of the flair to apply
|
||||
* */
|
||||
text?: string,
|
||||
/**
|
||||
* The text of the css class of the flair to apply
|
||||
* */
|
||||
css?: string,
|
||||
/**
|
||||
* Flair template to pick.
|
||||
*
|
||||
* **Note:** If this template is used text/css are ignored
|
||||
* */
|
||||
flair_template_id?: string;
|
||||
}
|
||||
|
||||
export interface UserFlairActionOptions extends UserFlairActionConfig, ActionOptions {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Flair the Submission
|
||||
* */
|
||||
export interface UserFlairActionJson extends UserFlairActionConfig, ActionJson {
|
||||
kind: 'userflair'
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {Comment, Submission} from "snoowrap";
|
||||
import {Logger} from "winston";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
@@ -6,12 +6,13 @@ import {ActionProcessResult, ActionResult, ChecksActivityState, TypedActivitySta
|
||||
import Author, {AuthorOptions} from "../Author/Author";
|
||||
import {mergeArr} from "../util";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
|
||||
|
||||
export abstract class Action {
|
||||
name?: string;
|
||||
logger: Logger;
|
||||
resources: SubredditResources;
|
||||
client: Snoowrap
|
||||
client: ExtendedSnoowrap;
|
||||
authorIs: AuthorOptions;
|
||||
itemIs: TypedActivityStates;
|
||||
dryRun: boolean;
|
||||
@@ -98,7 +99,7 @@ export abstract class Action {
|
||||
actRes.run = true;
|
||||
const results = await this.process(item, ruleResults, runtimeDryrun);
|
||||
return {...actRes, ...results};
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(!(err instanceof LoggedError)) {
|
||||
this.logger.error(`Encountered error while running`, err);
|
||||
}
|
||||
@@ -114,8 +115,8 @@ export abstract class Action {
|
||||
export interface ActionOptions extends ActionConfig {
|
||||
logger: Logger;
|
||||
subredditName: string;
|
||||
resources: SubredditResources
|
||||
client: Snoowrap
|
||||
resources: SubredditResources;
|
||||
client: ExtendedSnoowrap;
|
||||
}
|
||||
|
||||
export interface ActionConfig extends ChecksActivityState {
|
||||
@@ -162,7 +163,7 @@ export interface ActionJson extends ActionConfig {
|
||||
/**
|
||||
* The type of action that will be performed
|
||||
*/
|
||||
kind: 'comment' | 'lock' | 'remove' | 'report' | 'approve' | 'ban' | 'flair' | 'usernote' | 'message'
|
||||
kind: 'comment' | 'lock' | 'remove' | 'report' | 'approve' | 'ban' | 'flair' | 'usernote' | 'message' | 'userflair'
|
||||
}
|
||||
|
||||
export const isActionJson = (obj: object): obj is ActionJson => {
|
||||
|
||||
@@ -74,7 +74,7 @@ export class App {
|
||||
this.logger.error(err);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (b.error === undefined) {
|
||||
b.error = err.message;
|
||||
}
|
||||
|
||||
186
src/Bot/index.ts
186
src/Bot/index.ts
@@ -20,6 +20,8 @@ import {ModQueueStream, UnmoderatedStream} from "../Subreddit/Streams";
|
||||
import {BotResourcesManager} from "../Subreddit/SubredditResources";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
import pEvent from "p-event";
|
||||
import SimpleError from "../Utils/SimpleError";
|
||||
import {isRateLimitError, isStatusError} from "../Utils/Errors";
|
||||
|
||||
|
||||
class Bot {
|
||||
@@ -42,6 +44,7 @@ class Bot {
|
||||
nannyRunning: boolean = false;
|
||||
nextNannyCheck: Dayjs = dayjs().add(10, 'second');
|
||||
nannyRetryHandler: Function;
|
||||
managerRetryHandler: Function;
|
||||
nextExpiration: Dayjs = dayjs();
|
||||
botName?: string;
|
||||
botLink?: string;
|
||||
@@ -51,6 +54,8 @@ class Bot {
|
||||
sharedModqueue: boolean = false;
|
||||
streamListedOnce: string[] = [];
|
||||
|
||||
stagger: number;
|
||||
|
||||
apiSample: number[] = [];
|
||||
apiRollingAvg: number = 0;
|
||||
apiEstDepletion?: Duration;
|
||||
@@ -81,10 +86,12 @@ class Bot {
|
||||
heartbeatInterval,
|
||||
},
|
||||
credentials: {
|
||||
clientId,
|
||||
clientSecret,
|
||||
refreshToken,
|
||||
accessToken,
|
||||
reddit: {
|
||||
clientId,
|
||||
clientSecret,
|
||||
refreshToken,
|
||||
accessToken,
|
||||
},
|
||||
},
|
||||
snoowrap: {
|
||||
proxy,
|
||||
@@ -92,7 +99,7 @@ class Bot {
|
||||
},
|
||||
polling: {
|
||||
sharedMod,
|
||||
stagger,
|
||||
stagger = 2000,
|
||||
},
|
||||
queue: {
|
||||
maxWorkers,
|
||||
@@ -171,23 +178,26 @@ class Bot {
|
||||
this.client = proxy === undefined ? new ExtendedSnoowrap(creds) : new ProxiedSnoowrap({...creds, proxy});
|
||||
this.client.config({
|
||||
warnings: true,
|
||||
maxRetryAttempts: 5,
|
||||
maxRetryAttempts: 2,
|
||||
debug,
|
||||
logger: snooLogWrapper(this.logger.child({labels: ['Snoowrap']}, mergeArr)),
|
||||
continueAfterRatelimitError: true,
|
||||
continueAfterRatelimitError: false,
|
||||
});
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(this.error === undefined) {
|
||||
this.error = err.message;
|
||||
this.logger.error(err);
|
||||
}
|
||||
}
|
||||
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 1}, this.logger);
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 2}, this.logger);
|
||||
this.nannyRetryHandler = createRetryHandler({maxRequestRetry: 5, maxOtherRetry: 1}, this.logger);
|
||||
this.managerRetryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 8, waitOnRetry: false, clearRetryCountAfter: 2}, this.logger);
|
||||
|
||||
this.stagger = stagger ?? 2000;
|
||||
|
||||
const modStreamErrorListener = (name: string) => async (err: any) => {
|
||||
this.logger.error('Polling error occurred', err);
|
||||
this.logger.error(`Polling error occurred on stream ${name.toUpperCase()}`, err);
|
||||
const shouldRetry = await retryHandler(err);
|
||||
if(shouldRetry) {
|
||||
defaultUnmoderatedStream.startInterval();
|
||||
@@ -259,19 +269,23 @@ class Bot {
|
||||
}
|
||||
}
|
||||
|
||||
async testClient() {
|
||||
async testClient(initial = true) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.getMe();
|
||||
this.logger.info('Test API call successful');
|
||||
} catch (err) {
|
||||
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
|
||||
if(err.name === 'StatusCodeError') {
|
||||
} catch (err: any) {
|
||||
if (initial) {
|
||||
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
|
||||
}
|
||||
if (err.name === 'StatusCodeError') {
|
||||
const authHeader = err.response.headers['www-authenticate'];
|
||||
if (authHeader !== undefined && authHeader.includes('insufficient_scope')) {
|
||||
this.logger.error('Reddit responded with a 403 insufficient_scope. Please ensure you have chosen the correct scopes when authorizing your account.');
|
||||
} else if(err.statusCode === 401) {
|
||||
} else if (err.statusCode === 401) {
|
||||
this.logger.error('It is likely a credential is missing or incorrect. Check clientId, clientSecret, refreshToken, and accessToken');
|
||||
} else if(err.statusCode === 400) {
|
||||
this.logger.error('Credentials may have been invalidated due to prior behavior. The error message may contain more information.');
|
||||
}
|
||||
this.logger.error(`Error Message: ${err.message}`);
|
||||
} else {
|
||||
@@ -298,10 +312,12 @@ class Bot {
|
||||
}
|
||||
this.logger.info(`Bot Name${botNameFromConfig ? ' (from config)' : ''}: ${this.botName}`);
|
||||
|
||||
for (const sub of await this.client.getModeratedSubreddits()) {
|
||||
// TODO don't know a way to check permissions yet
|
||||
availSubs.push(sub);
|
||||
let subListing = await this.client.getModeratedSubreddits({count: 100});
|
||||
while(!subListing.isFinished) {
|
||||
subListing = await subListing.fetchMore({amount: 100});
|
||||
}
|
||||
availSubs = subListing;
|
||||
|
||||
this.logger.info(`u/${user.name} is a moderator of these subreddits: ${availSubs.map(x => x.display_name_prefixed).join(', ')}`);
|
||||
|
||||
let subsToRun: Subreddit[] = [];
|
||||
@@ -324,26 +340,45 @@ class Bot {
|
||||
const normalExcludes = this.excludeSubreddits.map(x => x.toLowerCase());
|
||||
subsToRun = availSubs.filter(x => !normalExcludes.includes(x.display_name.toLowerCase()));
|
||||
} else {
|
||||
this.logger.info('No user-defined subreddit constraints detected, will run on all moderated subreddits');
|
||||
subsToRun = availSubs;
|
||||
this.logger.info(`No user-defined subreddit constraints detected, will run on all moderated subreddits EXCEPT own profile (${this.botAccount})`);
|
||||
subsToRun = availSubs.filter(x => x.display_name_prefixed !== this.botAccount);
|
||||
}
|
||||
}
|
||||
|
||||
let subSchedule: Manager[] = [];
|
||||
// get configs for subs we want to run on and build/validate them
|
||||
for (const sub of subsToRun) {
|
||||
const manager = new Manager(sub, this.client, this.logger, this.cacheManager, {dryRun: this.dryRun, sharedModqueue: this.sharedModqueue, wikiLocation: this.wikiLocation, botName: this.botName, maxWorkers: this.maxWorkers});
|
||||
try {
|
||||
await manager.parseConfiguration('system', true, {suppressNotification: true});
|
||||
} catch (err) {
|
||||
if (!(err instanceof LoggedError)) {
|
||||
this.logger.error(`Config was not valid:`, {subreddit: sub.display_name_prefixed});
|
||||
this.logger.error(err, {subreddit: sub.display_name_prefixed});
|
||||
}
|
||||
this.subManagers.push(await this.createManager(sub));
|
||||
} catch (err: any) {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async createManager(sub: Subreddit): Promise<Manager> {
|
||||
const manager = new Manager(sub, this.client, this.logger, this.cacheManager, {dryRun: this.dryRun, sharedModqueue: this.sharedModqueue, wikiLocation: this.wikiLocation, botName: this.botName as string, maxWorkers: this.maxWorkers});
|
||||
try {
|
||||
await manager.parseConfiguration('system', true, {suppressNotification: true});
|
||||
} catch (err: any) {
|
||||
if (!(err instanceof LoggedError)) {
|
||||
this.logger.error(`Config was not valid:`, {subreddit: sub.display_name_prefixed});
|
||||
this.logger.error(err, {subreddit: sub.display_name_prefixed});
|
||||
err.logged = true;
|
||||
}
|
||||
}
|
||||
// all errors from managers will count towards bot-level retry count
|
||||
manager.on('error', async (err) => await this.panicOnRetries(err));
|
||||
return manager;
|
||||
}
|
||||
|
||||
// if the cumulative errors exceeds configured threshold then stop ALL managers as there is most likely something very bad happening
|
||||
async panicOnRetries(err: any) {
|
||||
if(!await this.managerRetryHandler(err)) {
|
||||
this.logger.warn('Bot detected too many errors from managers within a short time. Stopping all managers and will try to restart on next heartbeat.');
|
||||
for(const m of this.subManagers) {
|
||||
await m.stop('system',{reason: 'Bot detected too many errors from all managers. Stopping all manager as a failsafe.'});
|
||||
}
|
||||
subSchedule.push(manager);
|
||||
}
|
||||
this.subManagers = subSchedule;
|
||||
}
|
||||
|
||||
async destroy(causedBy: Invokee) {
|
||||
@@ -357,6 +392,40 @@ class Bot {
|
||||
this.logger.info('Bot is stopped.');
|
||||
}
|
||||
|
||||
async checkModInvites() {
|
||||
const subs: string[] = await this.cacheManager.getPendingSubredditInvites();
|
||||
for (const name of subs) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.getSubreddit(name).acceptModeratorInvite();
|
||||
this.logger.info(`Accepted moderator invite for r/${name}!`);
|
||||
await this.cacheManager.deletePendingSubredditInvite(name);
|
||||
// @ts-ignore
|
||||
const sub = await this.client.getSubreddit(name);
|
||||
this.logger.info(`Attempting to add manager for r/${name}`);
|
||||
try {
|
||||
const manager = await this.createManager(sub);
|
||||
this.logger.info(`Starting manager for r/${name}`);
|
||||
this.subManagers.push(manager);
|
||||
await manager.start('system', {reason: 'Caused by creation due to moderator invite'});
|
||||
await this.runModStreams();
|
||||
} catch (err: any) {
|
||||
if (!(err instanceof LoggedError)) {
|
||||
this.logger.error(err);
|
||||
}
|
||||
}
|
||||
} catch (err: any) {
|
||||
if (err.message.includes('NO_INVITE_FOUND')) {
|
||||
this.logger.warn(`No pending moderation invite for r/${name} was found`);
|
||||
} else if (isStatusError(err) && err.statusCode === 403) {
|
||||
this.logger.error(`Error occurred while checking r/${name} for a pending moderation invite. It is likely that this bot does not have the 'modself' oauth permission. Error: ${err.message}`);
|
||||
} else {
|
||||
this.logger.error(`Error occurred while checking r/${name} for a pending moderation invite. Error: ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async runModStreams(notify = false) {
|
||||
for(const [k,v] of this.cacheManager.modStreams) {
|
||||
if(!v.running && this.subManagers.some(x => x.modStreamCallbacks.get(k) !== undefined)) {
|
||||
@@ -375,6 +444,8 @@ class Bot {
|
||||
}
|
||||
|
||||
async runManagers(causedBy: Invokee = 'system') {
|
||||
this.running = true;
|
||||
|
||||
if(this.subManagers.every(x => !x.validConfigLoaded)) {
|
||||
this.logger.warn('All managers have invalid configs!');
|
||||
this.error = 'All managers have invalid configs';
|
||||
@@ -382,15 +453,15 @@ class Bot {
|
||||
for (const manager of this.subManagers) {
|
||||
if (manager.validConfigLoaded && manager.botState.state !== RUNNING) {
|
||||
await manager.start(causedBy, {reason: 'Caused by application startup'});
|
||||
await sleep(2000);
|
||||
await sleep(this.stagger);
|
||||
}
|
||||
}
|
||||
|
||||
await this.runModStreams();
|
||||
|
||||
this.running = true;
|
||||
this.nextNannyCheck = dayjs().add(10, 'second');
|
||||
this.nextHeartbeat = dayjs().add(this.heartbeatInterval, 'second');
|
||||
await this.checkModInvites();
|
||||
await this.healthLoop();
|
||||
}
|
||||
|
||||
@@ -404,15 +475,16 @@ class Bot {
|
||||
try {
|
||||
await this.runApiNanny();
|
||||
this.nextNannyCheck = dayjs().add(10, 'second');
|
||||
} catch (err) {
|
||||
this.logger.info('Delaying next nanny check for 1 minute due to emitted error');
|
||||
this.nextNannyCheck = dayjs().add(120, 'second');
|
||||
} catch (err: any) {
|
||||
this.logger.info('Delaying next nanny check for 4 minutes due to emitted error');
|
||||
this.nextNannyCheck = dayjs().add(240, 'second');
|
||||
}
|
||||
}
|
||||
if(dayjs().isSameOrAfter(this.nextHeartbeat)) {
|
||||
try {
|
||||
await this.heartbeat();
|
||||
} catch (err) {
|
||||
await this.checkModInvites();
|
||||
} catch (err: any) {
|
||||
this.logger.error(`Error occurred during heartbeat check: ${err.message}`);
|
||||
}
|
||||
this.nextHeartbeat = dayjs().add(this.heartbeatInterval, 'second');
|
||||
@@ -424,20 +496,39 @@ class Bot {
|
||||
async heartbeat() {
|
||||
const heartbeat = `HEARTBEAT -- API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ~${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion === undefined ? 'N/A' : this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`
|
||||
this.logger.info(heartbeat);
|
||||
|
||||
// run sanity check to see if there is a service issue
|
||||
try {
|
||||
await this.testClient(false);
|
||||
} catch (err: any) {
|
||||
throw new SimpleError(`Something isn't right! This could be a Reddit API issue (service is down? buggy??) or an issue with the Bot account. Will not run heartbeat operations and will wait until next heartbeat (${dayjs.duration(this.nextHeartbeat.diff(dayjs())).humanize()}) to try again`);
|
||||
}
|
||||
let startedAny = false;
|
||||
|
||||
for (const s of this.subManagers) {
|
||||
if(s.botState.state === STOPPED && s.botState.causedBy === USER) {
|
||||
this.logger.debug('Skipping config check/restart on heartbeat due to previously being stopped by user', {subreddit: s.displayLabel});
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
// ensure calls to wiki page are also staggered so we aren't hitting api hard when bot has a ton of subreddits to check
|
||||
await sleep(this.stagger);
|
||||
const newConfig = await s.parseConfiguration();
|
||||
if(newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startQueue('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running queue'});
|
||||
}
|
||||
if(newConfig || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startEvents('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running events'});
|
||||
const willStart = newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM) || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM);
|
||||
if(willStart) {
|
||||
// stagger restart
|
||||
if (startedAny) {
|
||||
await sleep(this.stagger);
|
||||
}
|
||||
startedAny = true;
|
||||
if(newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startQueue('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running queue'});
|
||||
}
|
||||
if(newConfig || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM))
|
||||
{
|
||||
await s.startEvents('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running events'});
|
||||
}
|
||||
}
|
||||
if(s.botState.state !== RUNNING && s.eventsState.state === RUNNING && s.queueState.state === RUNNING) {
|
||||
s.botState = {
|
||||
@@ -445,7 +536,7 @@ class Bot {
|
||||
causedBy: 'system',
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.info('Stopping event polling to prevent activity processing queue from backing up. Will be restarted when config update succeeds.')
|
||||
await s.stopEvents('system', {reason: 'Invalid config will cause events to pile up in queue. Will be restarted when config update succeeds (next heartbeat).'});
|
||||
if(!(err instanceof LoggedError)) {
|
||||
@@ -472,7 +563,10 @@ class Bot {
|
||||
// @ts-ignore
|
||||
await this.client.getMe();
|
||||
shouldRetry = false;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(isRateLimitError(err)) {
|
||||
throw err;
|
||||
}
|
||||
shouldRetry = await this.nannyRetryHandler(err);
|
||||
if (!shouldRetry) {
|
||||
throw err;
|
||||
@@ -590,7 +684,7 @@ class Bot {
|
||||
this.nannyMode = undefined;
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error(`Error occurred during nanny loop: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
|
||||
@@ -29,7 +29,8 @@ import * as RuleSetSchema from '../Schema/RuleSet.json';
|
||||
import * as ActionSchema from '../Schema/Action.json';
|
||||
import {ActionObjectJson, RuleJson, RuleObjectJson, ActionJson as ActionTypeJson} from "../Common/types";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import {Author, AuthorCriteria, AuthorOptions} from "../Author/Author";
|
||||
import {Author, AuthorCriteria, AuthorOptions} from '..';
|
||||
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
|
||||
|
||||
const checkLogName = truncateStringToLength(25);
|
||||
|
||||
@@ -50,7 +51,7 @@ export abstract class Check implements ICheck {
|
||||
dryRun?: boolean;
|
||||
notifyOnTrigger: boolean;
|
||||
resources: SubredditResources;
|
||||
client: Snoowrap;
|
||||
client: ExtendedSnoowrap;
|
||||
|
||||
constructor(options: CheckOptions) {
|
||||
const {
|
||||
@@ -268,7 +269,7 @@ export abstract class Check implements ICheck {
|
||||
// otherwise AND and did not return already so all passed
|
||||
this.logger.info(`${PASS} => Rules: ${resultsSummary(allResults, this.condition)}`);
|
||||
return [true, allRuleResults];
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
e.logged = true;
|
||||
this.logger.warn(`Running rules failed due to uncaught exception`, e);
|
||||
throw e;
|
||||
@@ -345,13 +346,13 @@ export interface ICheck extends JoinCondition, ChecksActivityState {
|
||||
}
|
||||
|
||||
export interface CheckOptions extends ICheck {
|
||||
rules: Array<IRuleSet | IRule>
|
||||
actions: ActionConfig[]
|
||||
logger: Logger
|
||||
subredditName: string
|
||||
notifyOnTrigger?: boolean
|
||||
resources: SubredditResources
|
||||
client: Snoowrap
|
||||
rules: Array<IRuleSet | IRule>;
|
||||
actions: ActionConfig[];
|
||||
logger: Logger;
|
||||
subredditName: string;
|
||||
notifyOnTrigger?: boolean;
|
||||
resources: SubredditResources;
|
||||
client: ExtendedSnoowrap;
|
||||
cacheUserResult?: UserResultCacheOptions;
|
||||
}
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ class ImageData {
|
||||
}
|
||||
|
||||
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(!(err instanceof SimpleError)) {
|
||||
throw new Error(`Error occurred while fetching response from URL: ${err.message}`);
|
||||
} else {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import {HistoricalStats} from "./interfaces";
|
||||
|
||||
export const cacheOptDefaults = {ttl: 60, max: 500, checkPeriod: 600};
|
||||
export const cacheTTLDefaults = {authorTTL: 60, userNotesTTL: 300, wikiTTL: 300, submissionTTL: 60, commentTTL: 60, filterCriteriaTTL: 60, subredditTTL: 600};
|
||||
export const cacheTTLDefaults = {authorTTL: 60, userNotesTTL: 300, wikiTTL: 300, submissionTTL: 60, commentTTL: 60, filterCriteriaTTL: 60, subredditTTL: 600, selfTTL: 60};
|
||||
export const historicalDefaults: HistoricalStats = {
|
||||
eventsCheckedTotal: 0,
|
||||
eventsActionedTotal: 0,
|
||||
|
||||
@@ -5,6 +5,9 @@ import Poll from "snoostorm/out/util/Poll";
|
||||
import Snoowrap from "snoowrap";
|
||||
import {RuleResult} from "../Rule";
|
||||
import {IncomingMessage} from "http";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import Comment from "snoowrap/dist/objects/Comment";
|
||||
import RedditUser from "snoowrap/dist/objects/RedditUser";
|
||||
|
||||
/**
|
||||
* An ISO 8601 Duration
|
||||
@@ -670,6 +673,24 @@ export interface TTLConfig {
|
||||
* @default 60
|
||||
* */
|
||||
filterCriteriaTTL?: number | boolean;
|
||||
|
||||
/**
|
||||
* Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling
|
||||
*
|
||||
* This is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:
|
||||
*
|
||||
* * Ignore comments created through an Action
|
||||
* * Ignore Activity polled from modqueue that the bot just reported
|
||||
*
|
||||
* This value should be at least as long as the longest polling interval for modqueue/newComm
|
||||
*
|
||||
* * If `0` or `true` will cache indefinitely (not recommended)
|
||||
* * If `false` will not cache
|
||||
*
|
||||
* @examples [50]
|
||||
* @default 50
|
||||
* */
|
||||
selfTTL?: number | boolean
|
||||
}
|
||||
|
||||
export interface CacheConfig extends TTLConfig {
|
||||
@@ -832,6 +853,8 @@ export interface ManagerOptions {
|
||||
nickname?: string
|
||||
|
||||
notifications?: NotificationConfig
|
||||
|
||||
credentials?: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -907,6 +930,20 @@ export interface ActivityState {
|
||||
distinguished?: boolean
|
||||
approved?: boolean
|
||||
score?: CompareValue
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>`
|
||||
*
|
||||
* * EX `> 2` => greater than 2 total reports
|
||||
*
|
||||
* Defaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:
|
||||
*
|
||||
* * EX `> 3 mod` => greater than 3 mod reports
|
||||
* * EX `>= 1 user` => greater than 1 user report
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* */
|
||||
reports?: CompareValue
|
||||
age?: DurationComparor
|
||||
}
|
||||
@@ -948,6 +985,14 @@ export interface CommentState extends ActivityState {
|
||||
* A list of SubmissionState attributes to test the Submission this comment is in
|
||||
* */
|
||||
submissionState?: SubmissionState[]
|
||||
|
||||
/**
|
||||
* The (nested) level of a comment.
|
||||
*
|
||||
* * 0 mean the comment is at top-level (replying to submission)
|
||||
* * non-zero, Nth value means the comment has N parent comments
|
||||
* */
|
||||
depth?: DurationComparor
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -979,6 +1024,8 @@ export interface SubredditState {
|
||||
* A friendly description of what this State is trying to parse
|
||||
* */
|
||||
stateDescription?: string
|
||||
|
||||
isUserProfile?: boolean
|
||||
}
|
||||
|
||||
export interface StrongSubredditState extends SubredditState {
|
||||
@@ -1006,6 +1053,28 @@ export const STOPPED = 'stopped';
|
||||
export const RUNNING = 'running';
|
||||
export const PAUSED = 'paused';
|
||||
|
||||
export interface SearchAndReplaceRegExp {
|
||||
/**
|
||||
* The search value to test for
|
||||
*
|
||||
* Can be a normal string (converted to a case-sensitive literal) or a valid regular expression
|
||||
*
|
||||
* EX `["find this string", "/some string*\/ig"]`
|
||||
*
|
||||
* @examples ["find this string", "/some string*\/ig"]
|
||||
* */
|
||||
search: string
|
||||
|
||||
/**
|
||||
* The replacement string/value to use when search is found
|
||||
*
|
||||
* This can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value
|
||||
*
|
||||
* See replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace
|
||||
* */
|
||||
replace: string
|
||||
}
|
||||
|
||||
export interface NamedGroup {
|
||||
[name: string]: string
|
||||
}
|
||||
@@ -1038,6 +1107,7 @@ export type StrongCache = {
|
||||
submissionTTL: number | boolean,
|
||||
commentTTL: number | boolean,
|
||||
subredditTTL: number | boolean,
|
||||
selfTTL: number | boolean,
|
||||
filterCriteriaTTL: number | boolean,
|
||||
provider: CacheOptions
|
||||
actionedEventsMax?: number,
|
||||
@@ -1255,6 +1325,32 @@ export interface WebCredentials {
|
||||
redirectUri?: string,
|
||||
}
|
||||
|
||||
export interface SnoowrapOptions {
|
||||
/**
|
||||
* Proxy all requests to Reddit's API through this endpoint
|
||||
*
|
||||
* * ENV => `PROXY`
|
||||
* * ARG => `--proxy <proxyEndpoint>`
|
||||
*
|
||||
* @examples ["http://localhost:4443"]
|
||||
* */
|
||||
proxy?: string,
|
||||
/**
|
||||
* Manually set the debug status for snoowrap
|
||||
*
|
||||
* When snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level
|
||||
*
|
||||
* * Set to `true` to always output
|
||||
* * Set to `false` to never output
|
||||
*
|
||||
* If not present or `null` will be set based on `logLevel`
|
||||
*
|
||||
* * ENV => `SNOO_DEBUG`
|
||||
* * ARG => `--snooDebug`
|
||||
* */
|
||||
debug?: boolean,
|
||||
}
|
||||
|
||||
/**
|
||||
* The configuration for an **individual reddit account** ContextMod will run as a bot.
|
||||
*
|
||||
@@ -1264,7 +1360,7 @@ export interface WebCredentials {
|
||||
*
|
||||
* */
|
||||
export interface BotInstanceJsonConfig {
|
||||
credentials?: RedditCredentials
|
||||
credentials?: BotCredentialsJsonConfig | RedditCredentials
|
||||
/*
|
||||
* The name to display for the bot. If not specified will use the name of the reddit account IE `u/TheBotName`
|
||||
* */
|
||||
@@ -1275,33 +1371,13 @@ export interface BotInstanceJsonConfig {
|
||||
notifications?: NotificationConfig
|
||||
|
||||
/**
|
||||
* Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior
|
||||
* Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior.
|
||||
*
|
||||
* Overrides any defaults provided at top-level operator config.
|
||||
*
|
||||
* Set to an empty object to "ignore" any top-level config
|
||||
* */
|
||||
snoowrap?: {
|
||||
/**
|
||||
* Proxy all requests to Reddit's API through this endpoint
|
||||
*
|
||||
* * ENV => `PROXY`
|
||||
* * ARG => `--proxy <proxyEndpoint>`
|
||||
*
|
||||
* @examples ["http://localhost:4443"]
|
||||
* */
|
||||
proxy?: string,
|
||||
/**
|
||||
* Manually set the debug status for snoowrap
|
||||
*
|
||||
* When snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level
|
||||
*
|
||||
* * Set to `true` to always output
|
||||
* * Set to `false` to never output
|
||||
*
|
||||
* If not present or `null` will be set based on `logLevel`
|
||||
*
|
||||
* * ENV => `SNOO_DEBUG`
|
||||
* * ARG => `--snooDebug`
|
||||
* */
|
||||
debug?: boolean,
|
||||
}
|
||||
snoowrap?: SnoowrapOptions
|
||||
|
||||
/**
|
||||
* Settings related to bot behavior for subreddits it is managing
|
||||
@@ -1524,6 +1600,11 @@ export interface OperatorJsonConfig {
|
||||
* */
|
||||
caching?: OperatorCacheConfig
|
||||
|
||||
/**
|
||||
* Set global snoowrap options as well as default snoowrap config for all bots that don't specify their own
|
||||
* */
|
||||
snoowrap?: SnoowrapOptions
|
||||
|
||||
bots?: BotInstanceJsonConfig[]
|
||||
|
||||
/**
|
||||
@@ -1644,6 +1725,8 @@ export interface OperatorJsonConfig {
|
||||
* */
|
||||
friendly?: string,
|
||||
}
|
||||
|
||||
credentials?: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
export interface RequiredOperatorRedditCredentials extends RedditCredentials {
|
||||
@@ -1657,8 +1740,23 @@ export interface RequiredWebRedditCredentials extends RedditCredentials {
|
||||
redirectUri: string
|
||||
}
|
||||
|
||||
export interface ThirdPartyCredentialsJsonConfig {
|
||||
youtube?: {
|
||||
apiKey: string
|
||||
}
|
||||
[key: string]: any
|
||||
}
|
||||
|
||||
export interface BotCredentialsJsonConfig extends ThirdPartyCredentialsJsonConfig {
|
||||
reddit: RedditCredentials
|
||||
}
|
||||
|
||||
export interface BotCredentialsConfig extends ThirdPartyCredentialsJsonConfig {
|
||||
reddit: RequiredOperatorRedditCredentials
|
||||
}
|
||||
|
||||
export interface BotInstanceConfig extends BotInstanceJsonConfig {
|
||||
credentials: RequiredOperatorRedditCredentials
|
||||
credentials: BotCredentialsJsonConfig
|
||||
snoowrap: {
|
||||
proxy?: string,
|
||||
debug?: boolean,
|
||||
@@ -1720,6 +1818,7 @@ export interface OperatorConfig extends OperatorJsonConfig {
|
||||
friendly?: string,
|
||||
}
|
||||
bots: BotInstanceConfig[]
|
||||
credentials: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
//export type OperatorConfig = Required<OperatorJsonConfig>;
|
||||
@@ -1749,20 +1848,18 @@ export interface LogInfo {
|
||||
bot?: string
|
||||
}
|
||||
|
||||
export interface ActionResult {
|
||||
export interface ActionResult extends ActionProcessResult {
|
||||
kind: string,
|
||||
name: string,
|
||||
run: boolean,
|
||||
runReason?: string,
|
||||
dryRun: boolean,
|
||||
success: boolean,
|
||||
result?: string,
|
||||
}
|
||||
|
||||
export interface ActionProcessResult {
|
||||
success: boolean,
|
||||
dryRun: boolean,
|
||||
result?: string
|
||||
touchedEntities?: (Submission | Comment | RedditUser | string)[]
|
||||
}
|
||||
|
||||
export interface ActionedEvent {
|
||||
@@ -1799,6 +1896,14 @@ export interface StatusCodeError extends Error {
|
||||
error: Error
|
||||
}
|
||||
|
||||
export interface RequestError extends Error {
|
||||
name: 'RequestError',
|
||||
statusCode: number,
|
||||
message: string,
|
||||
response: IncomingMessage,
|
||||
error: Error
|
||||
}
|
||||
|
||||
export interface HistoricalStatsDisplay extends HistoricalStats {
|
||||
checksRunTotal: number
|
||||
checksFromCacheTotal: number
|
||||
@@ -1878,3 +1983,27 @@ export interface HistoricalStatUpdateData {
|
||||
rulesCachedTotal: number
|
||||
rulesTriggered: string[] | string
|
||||
}
|
||||
|
||||
export type SearchFacetType = 'title' | 'url' | 'duplicates' | 'crossposts' | 'external';
|
||||
|
||||
export interface RepostItem {
|
||||
value: string
|
||||
createdOn?: number
|
||||
source: string
|
||||
sourceUrl?: string
|
||||
score?: number
|
||||
id: string
|
||||
itemType: string
|
||||
acquisitionType: SearchFacetType | 'comment'
|
||||
sourceObj?: any
|
||||
reqSameness?: number
|
||||
}
|
||||
|
||||
export interface RepostItemResult extends RepostItem {
|
||||
sameness: number
|
||||
}
|
||||
|
||||
export interface StringComparisonOptions {
|
||||
lengthWeight?: number,
|
||||
transforms?: ((str: string) => string)[]
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import {RepeatActivityJSONConfig} from "../Rule/RepeatActivityRule";
|
||||
import {AuthorRuleJSONConfig} from "../Rule/AuthorRule";
|
||||
import {AttributionJSONConfig} from "../Rule/AttributionRule";
|
||||
import {FlairActionJson} from "../Action/SubmissionAction/FlairAction";
|
||||
import {UserFlairActionJson} from "../Action/UserFlairAction";
|
||||
import {CommentActionJson} from "../Action/CommentAction";
|
||||
import {ReportActionJson} from "../Action/ReportAction";
|
||||
import {LockActionJson} from "../Action/LockAction";
|
||||
@@ -13,9 +14,17 @@ import {ApproveActionJson} from "../Action/ApproveAction";
|
||||
import {BanActionJson} from "../Action/BanAction";
|
||||
import {RegexRuleJSONConfig} from "../Rule/RegexRule";
|
||||
import {MessageActionJson} from "../Action/MessageAction";
|
||||
import {RepostRuleJSONConfig} from "../Rule/RepostRule";
|
||||
|
||||
export type RuleJson = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | string;
|
||||
export type RuleJson = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | RepostRuleJSONConfig | string;
|
||||
export type RuleObjectJson = Exclude<RuleJson, string>
|
||||
|
||||
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | string;
|
||||
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | UserFlairActionJson | string;
|
||||
export type ActionObjectJson = Exclude<ActionJson, string>;
|
||||
|
||||
// borrowed from https://github.com/jabacchetta/set-random-interval/blob/master/src/index.ts
|
||||
export type SetRandomInterval = (
|
||||
intervalFunction: () => void,
|
||||
minDelay: number,
|
||||
maxDelay: number,
|
||||
) => { clear: () => void };
|
||||
|
||||
@@ -31,7 +31,7 @@ import {
|
||||
CacheOptions,
|
||||
BotInstanceJsonConfig,
|
||||
BotInstanceConfig,
|
||||
RequiredWebRedditCredentials
|
||||
RequiredWebRedditCredentials, RedditCredentials, BotCredentialsJsonConfig, BotCredentialsConfig
|
||||
} from "./Common/interfaces";
|
||||
import {isRuleSetJSON, RuleSetJson, RuleSetObjectJson} from "./Rule/RuleSet";
|
||||
import deepEqual from "fast-deep-equal";
|
||||
@@ -281,8 +281,6 @@ export const parseDefaultBotInstanceFromArgs = (args: any): BotInstanceJsonConfi
|
||||
heartbeat,
|
||||
hardLimit,
|
||||
authorTTL,
|
||||
snooProxy,
|
||||
snooDebug,
|
||||
sharedMod,
|
||||
caching,
|
||||
} = args || {};
|
||||
@@ -294,10 +292,6 @@ export const parseDefaultBotInstanceFromArgs = (args: any): BotInstanceJsonConfi
|
||||
accessToken,
|
||||
refreshToken,
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: snooProxy,
|
||||
debug: snooDebug,
|
||||
},
|
||||
subreddits: {
|
||||
names: subreddits,
|
||||
wikiConfig,
|
||||
@@ -330,6 +324,8 @@ export const parseOpConfigFromArgs = (args: any): OperatorJsonConfig => {
|
||||
mode,
|
||||
caching,
|
||||
authorTTL,
|
||||
snooProxy,
|
||||
snooDebug,
|
||||
} = args || {};
|
||||
|
||||
const data = {
|
||||
@@ -346,6 +342,10 @@ export const parseOpConfigFromArgs = (args: any): OperatorJsonConfig => {
|
||||
provider: caching,
|
||||
authorTTL
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: snooProxy,
|
||||
debug: snooDebug,
|
||||
},
|
||||
web: {
|
||||
enabled: web,
|
||||
port,
|
||||
@@ -387,10 +387,13 @@ const parseListFromEnv = (val: string | undefined) => {
|
||||
export const parseDefaultBotInstanceFromEnv = (): BotInstanceJsonConfig => {
|
||||
const data = {
|
||||
credentials: {
|
||||
clientId: process.env.CLIENT_ID,
|
||||
clientSecret: process.env.CLIENT_SECRET,
|
||||
accessToken: process.env.ACCESS_TOKEN,
|
||||
refreshToken: process.env.REFRESH_TOKEN,
|
||||
reddit: {
|
||||
clientId: process.env.CLIENT_ID,
|
||||
clientSecret: process.env.CLIENT_SECRET,
|
||||
accessToken: process.env.ACCESS_TOKEN,
|
||||
refreshToken: process.env.REFRESH_TOKEN,
|
||||
},
|
||||
youtube: process.env.YOUTUBE_API_KEY
|
||||
},
|
||||
subreddits: {
|
||||
names: parseListFromEnv(process.env.SUBREDDITS),
|
||||
@@ -398,10 +401,6 @@ export const parseDefaultBotInstanceFromEnv = (): BotInstanceJsonConfig => {
|
||||
dryRun: parseBool(process.env.DRYRUN, undefined),
|
||||
heartbeatInterval: process.env.HEARTBEAT !== undefined ? parseInt(process.env.HEARTBEAT) : undefined,
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: process.env.PROXY,
|
||||
debug: parseBool(process.env.SNOO_DEBUG, undefined),
|
||||
},
|
||||
polling: {
|
||||
sharedMod: parseBool(process.env.SHARE_MOD),
|
||||
},
|
||||
@@ -432,6 +431,10 @@ export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
|
||||
},
|
||||
authorTTL: process.env.AUTHOR_TTL !== undefined ? parseInt(process.env.AUTHOR_TTL) : undefined
|
||||
},
|
||||
snoowrap: {
|
||||
proxy: process.env.PROXY,
|
||||
debug: parseBool(process.env.SNOO_DEBUG, undefined),
|
||||
},
|
||||
web: {
|
||||
port: process.env.PORT !== undefined ? parseInt(process.env.PORT) : undefined,
|
||||
session: {
|
||||
@@ -443,6 +446,11 @@ export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
|
||||
clientSecret: process.env.CLIENT_SECRET,
|
||||
redirectUri: process.env.REDIRECT_URI,
|
||||
},
|
||||
},
|
||||
credentials: {
|
||||
youtube: {
|
||||
apiKey: process.env.YOUTUBE_API_KEY
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -476,7 +484,7 @@ export const parseOperatorConfigFromSources = async (args: any): Promise<Operato
|
||||
process.env[k] = v;
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
let msg = 'No .env file found at default location (./env)';
|
||||
if (envPath !== undefined) {
|
||||
msg = `${msg} or OPERATOR_ENV path (${envPath})`;
|
||||
@@ -492,14 +500,14 @@ export const parseOperatorConfigFromSources = async (args: any): Promise<Operato
|
||||
let rawConfig;
|
||||
try {
|
||||
rawConfig = await readConfigFile(operatorConfig, {log: initLogger}) as object;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
initLogger.error('Cannot continue app startup because operator config file was not parseable.');
|
||||
err.logged = true;
|
||||
throw err;
|
||||
}
|
||||
try {
|
||||
configFromFile = validateJson(rawConfig, operatorSchema, initLogger) as OperatorJsonConfig;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
initLogger.error('Cannot continue app startup because operator config file was not valid.');
|
||||
throw err;
|
||||
}
|
||||
@@ -560,11 +568,13 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
credentials: webCredentials,
|
||||
operators,
|
||||
} = {},
|
||||
snoowrap: snoowrapOp = {},
|
||||
api: {
|
||||
port: apiPort = 8095,
|
||||
secret: apiSecret = randomId(),
|
||||
friendly,
|
||||
} = {},
|
||||
credentials = {},
|
||||
bots = [],
|
||||
} = data;
|
||||
|
||||
@@ -631,12 +641,8 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
softLimit = 250,
|
||||
hardLimit = 50
|
||||
} = {},
|
||||
snoowrap = {},
|
||||
credentials: {
|
||||
clientId: ci,
|
||||
clientSecret: cs,
|
||||
...restCred
|
||||
} = {},
|
||||
snoowrap = snoowrapOp,
|
||||
credentials = {},
|
||||
subreddits: {
|
||||
names = [],
|
||||
exclude = [],
|
||||
@@ -646,7 +652,6 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
} = {},
|
||||
} = x;
|
||||
|
||||
|
||||
let botCache: StrongCache;
|
||||
let botActionedEventsDefault: number;
|
||||
|
||||
@@ -697,11 +702,42 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
}
|
||||
}
|
||||
|
||||
const botCreds = {
|
||||
clientId: (ci as string),
|
||||
clientSecret: (cs as string),
|
||||
...restCred,
|
||||
};
|
||||
let botCreds: BotCredentialsConfig;
|
||||
|
||||
if((credentials as any).clientId !== undefined) {
|
||||
const creds = credentials as RedditCredentials;
|
||||
const {
|
||||
clientId: ci,
|
||||
clientSecret: cs,
|
||||
...restCred
|
||||
} = creds;
|
||||
botCreds = {
|
||||
reddit: {
|
||||
clientId: (ci as string),
|
||||
clientSecret: (cs as string),
|
||||
...restCred,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const creds = credentials as BotCredentialsJsonConfig;
|
||||
const {
|
||||
reddit: {
|
||||
clientId: ci,
|
||||
clientSecret: cs,
|
||||
...restRedditCreds
|
||||
},
|
||||
...rest
|
||||
} = creds;
|
||||
botCreds = {
|
||||
reddit: {
|
||||
clientId: (ci as string),
|
||||
clientSecret: (cs as string),
|
||||
...restRedditCreds,
|
||||
},
|
||||
...rest
|
||||
}
|
||||
}
|
||||
|
||||
if (botCache.provider.prefix === undefined || botCache.provider.prefix === defaultProvider.prefix) {
|
||||
// need to provide unique prefix to bot
|
||||
botCache.provider.prefix = buildCachePrefix([botCache.provider.prefix, 'bot', (botName || objectHash.sha1(botCreds))]);
|
||||
@@ -773,6 +809,7 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
|
||||
friendly
|
||||
},
|
||||
bots: hydratedBots,
|
||||
credentials,
|
||||
};
|
||||
|
||||
return config;
|
||||
|
||||
@@ -43,7 +43,7 @@ const parseLink = parseUsableLinkIdentifier();
|
||||
export class RecentActivityRule extends Rule {
|
||||
window: ActivityWindowType;
|
||||
thresholds: ActivityThreshold[];
|
||||
useSubmissionAsReference: boolean;
|
||||
useSubmissionAsReference: boolean | undefined;
|
||||
imageDetection: StrongImageDetection
|
||||
lookAt?: 'comments' | 'submissions';
|
||||
|
||||
@@ -51,7 +51,7 @@ export class RecentActivityRule extends Rule {
|
||||
super(options);
|
||||
const {
|
||||
window = 15,
|
||||
useSubmissionAsReference = true,
|
||||
useSubmissionAsReference,
|
||||
imageDetection,
|
||||
lookAt,
|
||||
} = options || {};
|
||||
@@ -128,7 +128,13 @@ export class RecentActivityRule extends Rule {
|
||||
}
|
||||
|
||||
let viableActivity = activities;
|
||||
if (this.useSubmissionAsReference) {
|
||||
// if config does not specify reference then we set the default based on whether the item is a submission or not
|
||||
// -- this is essentially the same as defaulting reference to true BUT eliminates noisy "can't use comment as reference" log statement when item is a comment
|
||||
let inferredSubmissionAsRef = this.useSubmissionAsReference;
|
||||
if(inferredSubmissionAsRef === undefined) {
|
||||
inferredSubmissionAsRef = isSubmission(item);
|
||||
}
|
||||
if (inferredSubmissionAsRef) {
|
||||
if (!asSubmission(item)) {
|
||||
this.logger.warn('Cannot use post as reference because triggered item is not a Submission');
|
||||
} else if (item.is_self) {
|
||||
@@ -165,7 +171,7 @@ export class RecentActivityRule extends Rule {
|
||||
// if (referenceImage.preferredResolution !== undefined) {
|
||||
// await (referenceImage.getSimilarResolutionVariant(...referenceImage.preferredResolution) as ImageData).sharp();
|
||||
// }
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.verbose(err.message);
|
||||
}
|
||||
}
|
||||
@@ -241,11 +247,11 @@ export class RecentActivityRule extends Rule {
|
||||
if (sameImage) {
|
||||
return x;
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.warn(`Unexpected error encountered while pixel-comparing images, will skip comparison => ${err.message}`);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(!err.message.includes('did not end with a valid image extension')) {
|
||||
this.logger.warn(`Will not compare image from Submission ${x.id} due to error while parsing image URL => ${err.message}`);
|
||||
}
|
||||
@@ -421,6 +427,7 @@ export class RecentActivityRule extends Rule {
|
||||
threshold,
|
||||
testValue,
|
||||
karmaThreshold,
|
||||
combinedKarma,
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -501,6 +508,16 @@ interface RecentActivityConfig extends ActivityWindow, ReferenceSubmission {
|
||||
thresholds: ActivityThreshold[],
|
||||
|
||||
imageDetection?: ImageDetection
|
||||
|
||||
/**
|
||||
* When Activity is a submission should we only include activities that are other submissions with the same content?
|
||||
*
|
||||
* * When the Activity is a submission this defaults to **true**
|
||||
* * When the Activity is a comment it is ignored (not relevant)
|
||||
*
|
||||
* @default true
|
||||
* */
|
||||
useSubmissionAsReference?: boolean
|
||||
}
|
||||
|
||||
export interface RecentActivityRuleOptions extends RecentActivityConfig, RuleOptions {
|
||||
|
||||
@@ -296,6 +296,7 @@ export class RegexRule extends Rule {
|
||||
|
||||
const logSummary: string[] = [];
|
||||
let index = 0;
|
||||
let matchSample = undefined;
|
||||
for (const c of criteriaResults) {
|
||||
index++;
|
||||
let msg = `Criteria ${c.criteria.name || `#${index}`} ${triggeredIndicator(c.triggered)}`;
|
||||
@@ -309,8 +310,8 @@ export class RegexRule extends Rule {
|
||||
}
|
||||
msg = `${msg} (Window: ${c.criteria.window})`;
|
||||
if(c.matches.length > 0) {
|
||||
let matchSample = `-- Matched Values: ${c.matches.slice(0, 3).map(x => `"${x}"`).join(', ')}${c.matches.length > 3 ? `, and ${c.matches.length - 3} more...` : ''}`;
|
||||
logSummary.push(`${msg} ${matchSample}`);
|
||||
matchSample = `${c.matches.slice(0, 3).map(x => `"${x}"`).join(', ')}${c.matches.length > 3 ? `, and ${c.matches.length - 3} more...` : ''}`;
|
||||
logSummary.push(`${msg} -- Matched Values: ${matchSample}`);
|
||||
} else {
|
||||
logSummary.push(msg);
|
||||
}
|
||||
@@ -319,7 +320,7 @@ export class RegexRule extends Rule {
|
||||
const result = `${triggeredIndicator(criteriaMet)} ${logSummary.join(' || ')}`;
|
||||
this.logger.verbose(result);
|
||||
|
||||
return Promise.resolve([criteriaMet, this.getResult(criteriaMet, {result, data: criteriaResults})]);
|
||||
return Promise.resolve([criteriaMet, this.getResult(criteriaMet, {result, data: {results: criteriaResults, matchSample }})]);
|
||||
}
|
||||
|
||||
protected getMatchesFromActivity(a: (Submission | Comment), testOn: string[], reg: RegExp): string[] {
|
||||
|
||||
897
src/Rule/RepostRule.ts
Normal file
897
src/Rule/RepostRule.ts
Normal file
@@ -0,0 +1,897 @@
|
||||
import {Rule, RuleJSONConfig, RuleOptions, RuleResult} from "./index";
|
||||
import {Listing, SearchOptions} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import Comment from "snoowrap/dist/objects/Comment";
|
||||
import {
|
||||
compareDurationValue,
|
||||
comparisonTextOp,
|
||||
FAIL, formatNumber,
|
||||
isRepostItemResult, parseDurationComparison, parseGenericValueComparison,
|
||||
parseUsableLinkIdentifier,
|
||||
PASS, searchAndReplace, stringSameness, triggeredIndicator, windowToActivityWindowCriteria, wordCount
|
||||
} from "../util";
|
||||
import {
|
||||
ActivityWindow,
|
||||
ActivityWindowType,
|
||||
CompareValue, DurationComparor,
|
||||
JoinOperands,
|
||||
RepostItem,
|
||||
RepostItemResult,
|
||||
SearchAndReplaceRegExp,
|
||||
SearchFacetType,
|
||||
} from "../Common/interfaces";
|
||||
import objectHash from "object-hash";
|
||||
import {getActivities, getAttributionIdentifier} from "../Utils/SnoowrapUtils";
|
||||
import Fuse from "fuse.js";
|
||||
import leven from "leven";
|
||||
import {YoutubeClient, commentsAsRepostItems} from "../Utils/ThirdParty/YoutubeClient";
|
||||
import dayjs from "dayjs";
|
||||
import {rest} from "lodash";
|
||||
|
||||
const parseYtIdentifier = parseUsableLinkIdentifier();
|
||||
|
||||
export interface TextMatchOptions {
|
||||
/**
|
||||
* The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match
|
||||
*
|
||||
* Note: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere
|
||||
*
|
||||
* Defaults to `85` (85%)
|
||||
*
|
||||
* @default 85
|
||||
* @example [85]
|
||||
* */
|
||||
matchScore?: number
|
||||
|
||||
/**
|
||||
* The minimum number of words in the activity being checked for which this rule will run on
|
||||
*
|
||||
* If the word count is below the minimum the rule fails
|
||||
*
|
||||
* Defaults to 2
|
||||
*
|
||||
* @default 2
|
||||
* @example [2]
|
||||
* */
|
||||
minWordCount?: number
|
||||
|
||||
/**
|
||||
* Should text matching be case sensitive?
|
||||
*
|
||||
* Defaults to false
|
||||
*
|
||||
* @default false
|
||||
* @example [false]
|
||||
**/
|
||||
caseSensitive?: boolean
|
||||
}
|
||||
|
||||
export interface TextTransformOptions {
|
||||
/**
|
||||
* A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.
|
||||
*
|
||||
* * If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text
|
||||
* * If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text
|
||||
* */
|
||||
transformations?: SearchAndReplaceRegExp[]
|
||||
|
||||
/**
|
||||
* Specify a separate set of transformations for the activity text (submission title or comment)
|
||||
*
|
||||
* To perform no transformations when `transformations` is defined set this to an empty array (`[]`)
|
||||
* */
|
||||
transformationsActivity?: SearchAndReplaceRegExp[]
|
||||
}
|
||||
|
||||
export interface SearchFacetJSONConfig extends TextMatchOptions, TextTransformOptions, ActivityWindow {
|
||||
kind: SearchFacetType | SearchFacetType[]
|
||||
}
|
||||
|
||||
export interface SearchFacet extends SearchFacetJSONConfig {
|
||||
kind: SearchFacetType
|
||||
}
|
||||
|
||||
export type TimeBasedSelector = "newest" | "oldest" | "any" | "all";
|
||||
|
||||
export interface OccurredAt {
|
||||
/**
|
||||
* Which repost to test on
|
||||
*
|
||||
* * `any` -- ANY repost passing `condition` will cause this criteria to be true
|
||||
* * `all` -- ALL reposts must pass `condition` for this criteria to be true
|
||||
* */
|
||||
"testOn": TimeBasedSelector,
|
||||
"condition": DurationComparor
|
||||
}
|
||||
|
||||
export interface OccurrenceTests {
|
||||
count?: {
|
||||
condition?: JoinOperands
|
||||
/**
|
||||
* An array of strings containing a comparison operator and the number of repost occurrences to compare against
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* * `">= 7"` -- TRUE if 7 or more reposts were found
|
||||
* * `"< 1"` -- TRUE if less than 0 reposts were found
|
||||
* */
|
||||
test: CompareValue[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the time the reposts occurred at
|
||||
* */
|
||||
time?: {
|
||||
/**
|
||||
* How to test all the specified comparisons
|
||||
*
|
||||
* * AND -- All criteria must be true
|
||||
* * OR -- Any criteria must be true
|
||||
*
|
||||
* Defaults to AND
|
||||
*
|
||||
* @default AND
|
||||
* @example ["AND", "OR"]
|
||||
* */
|
||||
condition?: JoinOperands
|
||||
/**
|
||||
* An array of time-based conditions to test against found reposts (test when a repost was made)
|
||||
* */
|
||||
test: OccurredAt[]
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A set of criteria used to find reposts
|
||||
*
|
||||
* Contains options and conditions used to define how candidate reposts are retrieved and if they are a match.
|
||||
*
|
||||
* */
|
||||
export interface RepostCriteria extends ActivityWindow, TextMatchOptions, TextTransformOptions {
|
||||
/**
|
||||
* Define how to find candidate reposts
|
||||
*
|
||||
* * **title** -- search reddit for submissions with the same title
|
||||
* * **url** -- search reddit for submissions with the same url
|
||||
* * **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)
|
||||
* */
|
||||
searchOn?: (SearchFacetType | SearchFacetJSONConfig)[]
|
||||
|
||||
/**
|
||||
* A set of comparisons to test against the number of reposts found
|
||||
*
|
||||
* If not specified the default is "AND [occurrences] > 0" IE any reposts makes this test pass
|
||||
* */
|
||||
occurrences?: {
|
||||
/**
|
||||
* How to test all the specified comparisons
|
||||
*
|
||||
* * AND -- All criteria must be true
|
||||
* * OR -- Any criteria must be true
|
||||
*
|
||||
* Defaults to AND
|
||||
*
|
||||
* @default AND
|
||||
* @example ["AND", "OR"]
|
||||
* */
|
||||
condition?: JoinOperands
|
||||
|
||||
criteria?: OccurrenceTests[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the time the reposts occurred at
|
||||
* */
|
||||
occurredAt?: {
|
||||
/**
|
||||
* How to test all the specified comparisons
|
||||
*
|
||||
* * AND -- All criteria must be true
|
||||
* * OR -- Any criteria must be true
|
||||
*
|
||||
* Defaults to AND
|
||||
*
|
||||
* @default AND
|
||||
* @example ["AND", "OR"]
|
||||
* */
|
||||
condition?: JoinOperands
|
||||
/**
|
||||
* An array of time-based conditions to test against found reposts (test when a repost was made)
|
||||
* */
|
||||
criteria: OccurredAt[]
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum number of comments/submissions to check
|
||||
*
|
||||
* In both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the "top" maximum specified
|
||||
*
|
||||
* For comment checks this is the number of comments cached
|
||||
*
|
||||
* @default 50
|
||||
* @example [50]
|
||||
* */
|
||||
maxRedditItems?: number
|
||||
|
||||
/**
|
||||
* The maximum number of external items (youtube comments) to check (and cache for comment checks)
|
||||
*
|
||||
* @default 50
|
||||
* @example [50]
|
||||
* */
|
||||
maxExternalItems?: number
|
||||
}
|
||||
|
||||
export interface CriteriaResult {
|
||||
passed: boolean
|
||||
conditionsSummary: string
|
||||
items: RepostItemResult[]
|
||||
}
|
||||
|
||||
const parentSubmissionSearchFacetDefaults = {
|
||||
title: {
|
||||
matchScore: 85,
|
||||
minWordCount: 3
|
||||
},
|
||||
url: {
|
||||
matchScore: 0, // when looking for submissions to find repost comments on automatically include any with exact same url
|
||||
},
|
||||
duplicates: {
|
||||
matchScore: 0, // when looking for submissions to find repost comments on automatically include any that reddit thinks are duplicates
|
||||
},
|
||||
crossposts: {
|
||||
matchScore: 0, // when looking for submissions to find repost comments on automatically include any that reddit thinks are crossposts
|
||||
},
|
||||
external: {}
|
||||
}
|
||||
|
||||
const isSearchFacetType = (val: any): val is SearchFacetType => {
|
||||
if (typeof val === 'string') {
|
||||
return ['title', 'url', 'duplicates', 'crossposts', 'external'].includes(val);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const generateSearchFacet = (val: SearchFacetType | SearchFacetJSONConfig): SearchFacet[] => {
|
||||
let facets: SearchFacet[] = [];
|
||||
if (isSearchFacetType(val)) {
|
||||
facets.push({
|
||||
kind: val
|
||||
});
|
||||
} else if (Array.isArray(val.kind)) {
|
||||
facets.concat(val.kind.map(x => ({...val, kind: x})));
|
||||
} else {
|
||||
facets.push(val as SearchFacet);
|
||||
}
|
||||
|
||||
return facets.map(x => {
|
||||
return {
|
||||
...parentSubmissionSearchFacetDefaults[x.kind],
|
||||
...x,
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export class RepostRule extends Rule {
|
||||
criteria: RepostCriteria[]
|
||||
condition: JoinOperands;
|
||||
|
||||
submission?: Submission;
|
||||
|
||||
constructor(options: RepostRuleOptions) {
|
||||
super(options);
|
||||
const {
|
||||
criteria = [{}],
|
||||
condition = 'OR'
|
||||
} = options || {};
|
||||
if (criteria.length < 1) {
|
||||
throw new Error('Must provide at least one RepostCriteria');
|
||||
}
|
||||
this.criteria = criteria;
|
||||
this.condition = condition;
|
||||
}
|
||||
|
||||
getKind(): string {
|
||||
return 'Repost';
|
||||
}
|
||||
|
||||
protected getSpecificPremise(): object {
|
||||
return {
|
||||
criteria: this.criteria,
|
||||
condition: this.condition
|
||||
}
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
protected async getSubmission(item: Submission | Comment) {
|
||||
if (item instanceof Comment) {
|
||||
// @ts-ignore
|
||||
return await this.client.getSubmission(item.link_id).fetch();
|
||||
}
|
||||
return item;
|
||||
}
|
||||
|
||||
protected async process(item: Submission | Comment): Promise<[boolean, RuleResult]> {
|
||||
|
||||
let criteriaResults: CriteriaResult[] = [];
|
||||
let ytClient: YoutubeClient | undefined = undefined;
|
||||
let criteriaMatchedResults: RepostItemResult[] = [];
|
||||
let totalSubs = 0;
|
||||
let totalCommentSubs = 0;
|
||||
let totalComments = 0;
|
||||
let totalExternal = new Map<string,number>();
|
||||
let fromCache = false;
|
||||
let andFail = false;
|
||||
|
||||
for (const rCriteria of this.criteria) {
|
||||
criteriaMatchedResults = [];
|
||||
const {
|
||||
searchOn = (item instanceof Submission ? ['title', 'url', 'duplicates', 'crossposts'] : ['external', 'title', 'url', 'duplicates', 'crossposts']),
|
||||
//criteria = {},
|
||||
maxRedditItems = 50,
|
||||
maxExternalItems = 50,
|
||||
window = 20,
|
||||
...restCriteria
|
||||
} = rCriteria;
|
||||
|
||||
const searchFacets = searchOn.map(x => generateSearchFacet(x)).flat(1) as SearchFacet[];
|
||||
|
||||
const includeCrossposts = searchFacets.some(x => x.kind === 'crossposts');
|
||||
|
||||
// in getDuplicate() options add "crossposts_only=1" to get only crossposts https://www.reddit.com/r/redditdev/comments/b4t5g4/get_all_the_subreddits_that_a_post_has_been/
|
||||
// if a submission is a crosspost it has "crosspost_parent" attribute https://www.reddit.com/r/redditdev/comments/l46y2l/check_if_post_is_a_crosspost/
|
||||
|
||||
const strongWindow = windowToActivityWindowCriteria(window);
|
||||
|
||||
const candidateHash = `repostItems-${item instanceof Submission ? item.id : item.link_id}-${objectHash.sha1({
|
||||
window,
|
||||
searchOn
|
||||
})}`;
|
||||
let items: (RepostItem|RepostItemResult)[] = [];
|
||||
let cacheRes = undefined;
|
||||
if (item instanceof Comment) {
|
||||
cacheRes = await this.resources.cache.get(candidateHash) as ((RepostItem|RepostItemResult)[] | undefined | null);
|
||||
}
|
||||
|
||||
if (cacheRes === undefined || cacheRes === null) {
|
||||
|
||||
const sub = await this.getSubmission(item);
|
||||
let dups: (Submission[] | undefined) = undefined;
|
||||
|
||||
for (const sf of searchFacets) {
|
||||
|
||||
const {
|
||||
matchScore = 85,
|
||||
minWordCount = 3,
|
||||
transformations = [],
|
||||
} = sf;
|
||||
|
||||
if (sf.kind === 'external') {
|
||||
const attribution = getAttributionIdentifier(sub);
|
||||
switch (attribution.provider) {
|
||||
case 'YouTube':
|
||||
const ytCreds = this.resources.getThirdPartyCredentials('youtube')
|
||||
if (ytCreds === undefined) {
|
||||
throw new Error('Cannot extract comments from Youtube because a Youtube Data API key was not provided in configuration');
|
||||
}
|
||||
if (ytClient === undefined) {
|
||||
ytClient = new YoutubeClient(ytCreds.apiKey);
|
||||
}
|
||||
const ytComments = commentsAsRepostItems(await ytClient.getVideoTopComments(sub.url, maxExternalItems));
|
||||
items = items.concat(ytComments)
|
||||
totalExternal.set('Youtube comments', (totalExternal.get('Youtube comments') ?? 0) + ytComments.length);
|
||||
break;
|
||||
default:
|
||||
if (attribution.provider === undefined) {
|
||||
this.logger.debug('Unable to determine external provider');
|
||||
continue;
|
||||
} else {
|
||||
this.logger.debug(`External parsing of ${attribution} is not supported yet.`);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let subs: Submission[];
|
||||
|
||||
if (['title', 'url'].includes(sf.kind)) {
|
||||
let query: string;
|
||||
let searchFunc: (limit: number) => Promise<Listing<Submission | Comment>>;
|
||||
if (sf.kind === 'title') {
|
||||
query = (await this.getSubmission(item)).title;
|
||||
searchFunc = (limit: number) => {
|
||||
let opts: SearchOptions = {
|
||||
query,
|
||||
limit,
|
||||
sort: 'relevance'
|
||||
};
|
||||
if (strongWindow.subreddits?.include !== undefined && strongWindow.subreddits?.include.length > 0) {
|
||||
opts.restrictSr = true;
|
||||
opts.subreddit = strongWindow.subreddits?.include.join('+');
|
||||
}
|
||||
return this.client.search(opts);
|
||||
}
|
||||
} else {
|
||||
const attr = getAttributionIdentifier(sub);
|
||||
if (attr.provider === 'YouTube') {
|
||||
const ytId = parseYtIdentifier(sub.url);
|
||||
query = `url:https://youtu.be/${ytId}`;
|
||||
} else {
|
||||
query = `url:${sub.url}`;
|
||||
}
|
||||
searchFunc = (limit: number) => {
|
||||
let opts: SearchOptions = {
|
||||
query,
|
||||
limit,
|
||||
sort: 'top'
|
||||
};
|
||||
if (strongWindow.subreddits?.include !== undefined && strongWindow.subreddits?.include.length > 0) {
|
||||
opts.restrictSr = true;
|
||||
opts.subreddit = strongWindow.subreddits?.include.join('+');
|
||||
}
|
||||
return this.client.search(opts);
|
||||
}
|
||||
}
|
||||
subs = await getActivities(searchFunc, {window: strongWindow}) as Submission[];
|
||||
} else {
|
||||
|
||||
if (dups === undefined) {
|
||||
let searchFunc: (limit: number) => Promise<Listing<Submission | Comment>> = (limit: number) => {
|
||||
// this does not work correctly
|
||||
// see https://github.com/not-an-aardvark/snoowrap/issues/320
|
||||
// searchFunc = (limit: number) => {
|
||||
// return sub.getDuplicates({crossposts_only: 0, limit});
|
||||
// };
|
||||
return this.client.oauthRequest({
|
||||
uri: `duplicates/${sub.id}`,
|
||||
qs: {
|
||||
limit,
|
||||
}
|
||||
}).then(x => {
|
||||
return Promise.resolve(x.comments) as Promise<Listing<Submission>>
|
||||
});
|
||||
};
|
||||
subs = await getActivities(searchFunc, {window: strongWindow}) as Submission[];
|
||||
dups = subs;
|
||||
} else {
|
||||
subs = dups;
|
||||
}
|
||||
|
||||
if (sf.kind === 'duplicates') {
|
||||
// @ts-ignore
|
||||
subs = subs.filter(x => x.crosspost_parent === undefined)
|
||||
} else {
|
||||
// @ts-ignore
|
||||
subs = subs.filter(x => x.crosspost_parent !== undefined && x.crosspost_parent === sub.id)
|
||||
}
|
||||
}
|
||||
|
||||
// filter by minimum word count
|
||||
subs = subs.filter(x => wordCount(x.title) > minWordCount);
|
||||
|
||||
items = items.concat(subs.map(x => ({
|
||||
value: searchAndReplace(x.title, transformations),
|
||||
createdOn: x.created,
|
||||
source: 'reddit',
|
||||
sourceUrl: x.permalink,
|
||||
id: x.id,
|
||||
score: x.score,
|
||||
itemType: 'submission',
|
||||
acquisitionType: sf.kind,
|
||||
sourceObj: x,
|
||||
reqSameness: matchScore,
|
||||
})));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (!includeCrossposts) {
|
||||
const sub = await this.getSubmission(item);
|
||||
// remove submissions if they are official crossposts of the submission being checked and searchOn did not include 'crossposts'
|
||||
items = items.filter(x => x.itemType !== 'submission' || !(x.sourceObj.crosspost_parent !== undefined && x.sourceObj.crosspost_parent === sub.id))
|
||||
}
|
||||
|
||||
let sourceTitle = searchAndReplace(sub.title, restCriteria.transformationsActivity ?? []);
|
||||
|
||||
// do submission scoring BEFORE pruning duplicates bc...
|
||||
// might end up in a situation where we get same submission for both title and url
|
||||
// -- url is always a repost but title is not guaranteed and we if remove the url item but not the title we could potentially filter the title submission out and miss this repost
|
||||
items = items.reduce((acc: (RepostItem|RepostItemResult)[], x) => {
|
||||
if(x.itemType === 'submission') {
|
||||
totalSubs++;
|
||||
const sf = searchFacets.find(y => y.kind === x.acquisitionType) as SearchFacet;
|
||||
|
||||
let cleanTitle = x.value;
|
||||
if (!(sf.caseSensitive ?? false)) {
|
||||
cleanTitle = cleanTitle.toLowerCase();
|
||||
}
|
||||
const strMatchResults = stringSameness(sourceTitle, cleanTitle);
|
||||
if(strMatchResults.highScoreWeighted >= (x.reqSameness as number)) {
|
||||
return acc.concat({
|
||||
...x,
|
||||
sameness: Math.min(strMatchResults.highScoreWeighted, 100),
|
||||
});
|
||||
}
|
||||
return acc;
|
||||
}
|
||||
return acc.concat(x);
|
||||
}, []);
|
||||
|
||||
// now remove duplicate submissions
|
||||
items = items.reduce((acc: RepostItem[], curr) => {
|
||||
if(curr.itemType !== 'submission') {
|
||||
return acc.concat(curr);
|
||||
}
|
||||
const subId = curr.sourceObj.id;
|
||||
if (sub.id !== subId && !acc.some(x => x.itemType === 'submission' && x.sourceObj.id === subId)) {
|
||||
return acc.concat(curr);
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
|
||||
if (item instanceof Comment) {
|
||||
// we need to gather comments from submissions
|
||||
|
||||
// first cut down the number of submissions to retrieve because we don't care about have ALL submissions,
|
||||
// just most popular comments (which will be in the most popular submissions)
|
||||
let subs = items.filter(x => x.itemType === 'submission').map(x => x.sourceObj) as Submission[];
|
||||
totalCommentSubs += subs.length;
|
||||
|
||||
const nonSubItems = items.filter(x => x.itemType !== 'submission' && wordCount(x.value) > (restCriteria.minWordCount ?? 3));
|
||||
|
||||
subs.sort((a, b) => a.score - b.score).reverse();
|
||||
// take top 10 submissions
|
||||
subs = subs.slice(0, 10);
|
||||
|
||||
let comments: Comment[] = [];
|
||||
for (const sub of subs) {
|
||||
|
||||
const commFunc = (limit: number) => {
|
||||
return this.client.oauthRequest({
|
||||
uri: `${sub.subreddit_name_prefixed}/comments/${sub.id}`,
|
||||
// get ONLY top-level comments, sorted by Top
|
||||
qs: {
|
||||
sort: 'top',
|
||||
depth: 0,
|
||||
limit,
|
||||
}
|
||||
}).then(x => {
|
||||
return x.comments as Promise<Listing<Comment>>
|
||||
});
|
||||
}
|
||||
// and return the top 20 most popular
|
||||
const subComments = await getActivities(commFunc, {window: {count: 20}, skipReplies: true}) as Listing<Comment>;
|
||||
comments = comments.concat(subComments);
|
||||
}
|
||||
|
||||
// sort by highest scores
|
||||
comments.sort((a, b) => a.score - b.score).reverse();
|
||||
// filter out all comments with fewer words than required (prevent false negatives)
|
||||
comments.filter(x => wordCount(x.body) > (restCriteria.minWordCount ?? 3));
|
||||
totalComments += Math.min(comments.length, maxRedditItems);
|
||||
|
||||
// and take the user-defined maximum number of items
|
||||
items = nonSubItems.concat(comments.slice(0, maxRedditItems).map(x => ({
|
||||
value: searchAndReplace(x.body, restCriteria.transformations ?? []),
|
||||
createdOn: x.created,
|
||||
source: 'reddit',
|
||||
id: x.id,
|
||||
sourceUrl: x.permalink,
|
||||
score: x.score,
|
||||
itemType: 'comment',
|
||||
acquisitionType: 'comment'
|
||||
})));
|
||||
}
|
||||
|
||||
// cache items for 20 minutes
|
||||
await this.resources.cache.set(candidateHash, items, {ttl: 1200});
|
||||
} else {
|
||||
items = cacheRes;
|
||||
totalExternal = items.reduce((acc, curr) => {
|
||||
if(curr.acquisitionType === 'external') {
|
||||
acc.set(`${curr.source} comments`, (acc.get(`${curr.source} comments`) ?? 0 ) + 1);
|
||||
return acc;
|
||||
}
|
||||
return acc;
|
||||
}, new Map<string, number>());
|
||||
//totalSubs = items.filter(x => x.itemType === 'submission').length;
|
||||
//totalCommentSubs = totalSubs;
|
||||
totalComments = items.filter(x => x.itemType === 'comment' && x.source === 'reddit').length;
|
||||
fromCache = true;
|
||||
}
|
||||
|
||||
const {
|
||||
matchScore = 85,
|
||||
caseSensitive = false,
|
||||
transformations = [],
|
||||
transformationsActivity = transformations,
|
||||
occurrences = {
|
||||
condition: 'AND',
|
||||
criteria: [
|
||||
{
|
||||
count: {
|
||||
test: ['> 0']
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
} = restCriteria;
|
||||
|
||||
if(item instanceof Submission) {
|
||||
// we've already done difference calculations in the searchFacet phase
|
||||
// and when the check is for a sub it means we are only checking if the submissions has been reposted which means either:
|
||||
// * very similar title (default sameness of 85% or more)
|
||||
// * duplicate/same URL -- which is a repost, duh
|
||||
// so just add all items to critMatches at this point
|
||||
criteriaMatchedResults = criteriaMatchedResults.concat(items.filter(x => "sameness" in x) as RepostItemResult[]);
|
||||
} else {
|
||||
let sourceContent = searchAndReplace(item.body, transformationsActivity);
|
||||
if (!caseSensitive) {
|
||||
sourceContent = sourceContent.toLowerCase();
|
||||
}
|
||||
|
||||
for (const i of items) {
|
||||
const itemContent = !caseSensitive ? i.value.toLowerCase() : i.value;
|
||||
const strMatchResults = stringSameness(sourceContent, itemContent);
|
||||
if(strMatchResults.highScoreWeighted >= matchScore) {
|
||||
criteriaMatchedResults.push({
|
||||
...i,
|
||||
// @ts-ignore
|
||||
reqSameness: matchScore,
|
||||
sameness: Math.min(strMatchResults.highScoreWeighted, 100)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now do occurrence and time tests
|
||||
|
||||
const {
|
||||
condition: occCondition = 'AND',
|
||||
criteria: occCriteria = [
|
||||
{
|
||||
count: {
|
||||
test: ['> 0']
|
||||
}
|
||||
}
|
||||
]
|
||||
} = occurrences;
|
||||
|
||||
let orPass = false;
|
||||
let occurrenceReason = null;
|
||||
|
||||
for(const occurrenceTest of occCriteria) {
|
||||
|
||||
const {
|
||||
count:{
|
||||
condition: oCondition = 'AND',
|
||||
test: oCriteria = []
|
||||
} = {},
|
||||
time: {
|
||||
condition: tCondition = 'AND',
|
||||
test: tCriteria = [],
|
||||
} = {}
|
||||
} = occurrenceTest;
|
||||
|
||||
let conditionFailSummaries = [];
|
||||
|
||||
const passedConditions = [];
|
||||
const failedConditions = [];
|
||||
|
||||
for (const oc of oCriteria) {
|
||||
const ocCompare = parseGenericValueComparison(oc);
|
||||
const ocMatch = comparisonTextOp(criteriaMatchedResults.length, ocCompare.operator, ocCompare.value);
|
||||
if (ocMatch) {
|
||||
passedConditions.push(oc);
|
||||
} else {
|
||||
failedConditions.push(oc);
|
||||
if (oCondition === 'AND') {
|
||||
conditionFailSummaries.push(`(AND) ${oc} occurrences was not true`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (passedConditions.length === 0 && oCriteria.length > 0) {
|
||||
conditionFailSummaries.push('(OR) No occurrence tests passed');
|
||||
}
|
||||
|
||||
const existingPassed = passedConditions.length;
|
||||
if (conditionFailSummaries.length === 0) {
|
||||
const timeAwareReposts = [...criteriaMatchedResults].filter(x => x.createdOn !== undefined).sort((a, b) => (a.createdOn as number) - (b.createdOn as number));
|
||||
for (const tc of tCriteria) {
|
||||
let toTest: RepostItemResult[] = [];
|
||||
const durationCompare = parseDurationComparison(tc.condition);
|
||||
switch (tc.testOn) {
|
||||
case 'newest':
|
||||
case 'oldest':
|
||||
if (tc.testOn === 'newest') {
|
||||
toTest = timeAwareReposts.slice(-1);
|
||||
} else {
|
||||
toTest = timeAwareReposts.slice(0, 1);
|
||||
}
|
||||
break;
|
||||
case 'any':
|
||||
case 'all':
|
||||
toTest = timeAwareReposts;
|
||||
break;
|
||||
}
|
||||
const timePass = tc.testOn === 'any' ? toTest.some(x => compareDurationValue(durationCompare, dayjs.unix(x.createdOn as number))) : toTest.every(x => compareDurationValue(durationCompare, dayjs.unix(x.createdOn as number)));
|
||||
if (timePass) {
|
||||
passedConditions.push(tc.condition);
|
||||
} else {
|
||||
failedConditions.push(tc.condition);
|
||||
if (tCondition === 'AND') {
|
||||
conditionFailSummaries.push(`(AND) ${tc.condition} was not true`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (tCriteria.length > 0 && passedConditions.length === existingPassed) {
|
||||
conditionFailSummaries.push('(OR) No time-based tests passed');
|
||||
}
|
||||
}
|
||||
|
||||
if(conditionFailSummaries.length !== 0 && occCondition === 'AND') {
|
||||
// failed occurrence tests (high-level)
|
||||
occurrenceReason = conditionFailSummaries.join(' | ');
|
||||
break;
|
||||
}
|
||||
|
||||
if(passedConditions.length > 0 && occCondition === 'OR') {
|
||||
occurrenceReason = passedConditions.join(' | ');
|
||||
orPass = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let passed = occCriteria.length === 0;
|
||||
|
||||
if(occCriteria.length > 0) {
|
||||
if(occCondition === 'OR') {
|
||||
passed = orPass;
|
||||
occurrenceReason = occurrenceReason === null ? 'No occurrence test sets passed' : occurrenceReason;
|
||||
} else if(occCondition === 'AND') {
|
||||
passed = occurrenceReason === null;
|
||||
occurrenceReason = occurrenceReason === null ? 'All tests passed' : occurrenceReason;
|
||||
}
|
||||
//passed = (occCondition === 'OR' && orPass) || (occurrenceFailureReason === null && occCondition === 'AND')
|
||||
}
|
||||
|
||||
const results = {
|
||||
passed,
|
||||
conditionsSummary: occurrenceReason as string,
|
||||
items: criteriaMatchedResults
|
||||
};
|
||||
criteriaResults.push(results)
|
||||
|
||||
|
||||
if(!results.passed) {
|
||||
if(this.condition === 'AND') {
|
||||
andFail = true;
|
||||
break;
|
||||
}
|
||||
} else if(this.condition === 'OR') {
|
||||
break;
|
||||
}
|
||||
if (!results.passed && this.condition === 'AND') {
|
||||
andFail = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// get all repost items for stats and SCIENCE
|
||||
const repostItemResults = [...criteriaResults
|
||||
// only want reposts from criteria that passed
|
||||
.filter(x => x.passed).map(x => x.items)
|
||||
.flat()
|
||||
// make sure we are only accumulating unique reposts
|
||||
.reduce((acc, curr) => {
|
||||
const hash = `${curr.source}-${curr.itemType}-${curr.id}`;
|
||||
if (!acc.has(hash)) {
|
||||
acc.set(hash, curr);
|
||||
}
|
||||
return acc;
|
||||
}, new Map<string, RepostItemResult>()).values()];
|
||||
|
||||
repostItemResults.sort((a, b) => a.sameness - b.sameness).reverse();
|
||||
const foundRepost = criteriaResults.length > 0;
|
||||
|
||||
|
||||
let avgSameness = null;
|
||||
let closestSummary = null;
|
||||
let closestSameness = null;
|
||||
let searchCandidateSummary = '';
|
||||
|
||||
if(item instanceof Comment) {
|
||||
searchCandidateSummary = `Searched top ${totalComments} comments in top 10 ${fromCache ? '' : `of ${totalCommentSubs} `}most popular submissions`;
|
||||
if(totalExternal.size > 0) {
|
||||
searchCandidateSummary += ", ";
|
||||
const extSumm: string[] = [];
|
||||
totalExternal.forEach((v, k) => {
|
||||
extSumm.push(`${v} ${k}`);
|
||||
});
|
||||
searchCandidateSummary += extSumm.join(', ');
|
||||
}
|
||||
} else {
|
||||
searchCandidateSummary = `Searched ${totalSubs}`
|
||||
}
|
||||
|
||||
let summary = `${searchCandidateSummary} and found ${repostItemResults.length} reposts.`;
|
||||
|
||||
if(repostItemResults.length > 0) {
|
||||
avgSameness = formatNumber(repostItemResults.reduce((acc, curr) => acc + curr.sameness, 0) / criteriaResults.length);
|
||||
const closest = repostItemResults[0];
|
||||
summary += ` --- Closest Match => >> ${closest.value} << from ${closest.source} (${closest.sourceUrl}) with ${formatNumber(closest.sameness)}% sameness.`
|
||||
closestSummary = `matched a ${closest.itemType} from ${closest.source}`;
|
||||
closestSameness = closest.sameness;
|
||||
if(criteriaResults.length > 1) {
|
||||
summary += ` Avg ${formatNumber(avgSameness)}%`;
|
||||
}
|
||||
}
|
||||
|
||||
let passed;
|
||||
|
||||
if(this.condition === 'AND') {
|
||||
const failedCrit = criteriaResults.find(x => !x.passed);
|
||||
if(failedCrit !== undefined) {
|
||||
summary += `BUT a criteria failed >> ${failedCrit.conditionsSummary} << and rule has AND condition.`;
|
||||
passed = false;
|
||||
} else {
|
||||
passed = true;
|
||||
}
|
||||
} else {
|
||||
const passedCrit = criteriaResults.find(x => x.passed);
|
||||
if(passedCrit === undefined) {
|
||||
summary += `BUT all criteria failed`;
|
||||
passed = false;
|
||||
} else {
|
||||
passed = true;
|
||||
}
|
||||
}
|
||||
|
||||
const result = `${passed ? PASS : FAIL} ${summary}`;
|
||||
this.logger.verbose(result);
|
||||
|
||||
return [passed, this.getResult(passed, {
|
||||
result,
|
||||
data: {
|
||||
allResults: criteriaResults,
|
||||
closestSameness: passed ? formatNumber(closestSameness as number) : undefined,
|
||||
closestSummary: passed ? closestSummary : undefined,
|
||||
}
|
||||
})];
|
||||
}
|
||||
}
|
||||
|
||||
interface RepostConfig {
|
||||
/**
|
||||
* A list of Regular Expressions and conditions under which tested Activity(ies) are matched
|
||||
* @minItems 1
|
||||
* @examples [{"regex": "/reddit/", "matchThreshold": "> 3"}]
|
||||
* */
|
||||
criteria?: RepostCriteria[]
|
||||
/**
|
||||
* * If `OR` then any set of Criteria that pass will trigger the Rule
|
||||
* * If `AND` then all Criteria sets must pass to trigger the Rule
|
||||
*
|
||||
* @default "OR"
|
||||
* */
|
||||
condition?: 'AND' | 'OR'
|
||||
}
|
||||
|
||||
export interface RepostRuleOptions extends RepostConfig, RuleOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* Search for reposts of a Submission or Comment
|
||||
*
|
||||
* * For submissions the title or URL can searched and matched against
|
||||
* * For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against
|
||||
*
|
||||
* */
|
||||
export interface RepostRuleJSONConfig extends RepostConfig, RuleJSONConfig {
|
||||
/**
|
||||
* @examples ["repost"]
|
||||
* */
|
||||
kind: 'repost'
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import HistoryRule, {HistoryJSONConfig} from "./HistoryRule";
|
||||
import RegexRule, {RegexRuleJSONConfig} from "./RegexRule";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import Snoowrap from "snoowrap";
|
||||
import {RepostRule, RepostRuleJSONConfig} from "./RepostRule";
|
||||
|
||||
export function ruleFactory
|
||||
(config: RuleJSONConfig, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Rule {
|
||||
@@ -31,6 +32,9 @@ export function ruleFactory
|
||||
case 'regex':
|
||||
cfg = config as RegexRuleJSONConfig;
|
||||
return new RegexRule({...cfg, logger, subredditName, resources, client});
|
||||
case 'repost':
|
||||
cfg = config as RepostRuleJSONConfig;
|
||||
return new RepostRule({...cfg, logger, subredditName, resources, client});
|
||||
default:
|
||||
throw new Error('rule "kind" was not recognized.');
|
||||
}
|
||||
|
||||
@@ -117,13 +117,13 @@ export abstract class Rule implements IRule, Triggerable {
|
||||
this.logger.verbose('(Skipped) Exclusive author criteria not matched');
|
||||
return Promise.resolve([null, this.getResult(null, {result: 'Exclusive author criteria not matched'})]);
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error occurred during Rule pre-process checks');
|
||||
throw err;
|
||||
}
|
||||
try {
|
||||
return this.process(item);
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error occurred while processing rule');
|
||||
throw err;
|
||||
}
|
||||
@@ -240,6 +240,6 @@ export interface RuleJSONConfig extends IRule {
|
||||
* The kind of rule to run
|
||||
* @examples ["recentActivity", "repeatActivity", "author", "attribution", "history"]
|
||||
*/
|
||||
kind: 'recentActivity' | 'repeatActivity' | 'author' | 'attribution' | 'history' | 'regex'
|
||||
kind: 'recentActivity' | 'repeatActivity' | 'author' | 'attribution' | 'history' | 'regex' | 'repost'
|
||||
}
|
||||
|
||||
|
||||
@@ -167,6 +167,11 @@
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"depth": {
|
||||
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"distinguished": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -184,7 +189,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -258,7 +263,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -386,6 +391,7 @@
|
||||
"message",
|
||||
"remove",
|
||||
"report",
|
||||
"userflair",
|
||||
"usernote"
|
||||
],
|
||||
"type": "string"
|
||||
|
||||
@@ -856,6 +856,17 @@
|
||||
],
|
||||
"description": "The cache provider and, optionally, a custom configuration for that provider\n\nIf not present or `null` provider will be `memory`.\n\nTo specify another `provider` but use its default configuration set this property to a string of one of the available providers: `memory`, `redis`, or `none`"
|
||||
},
|
||||
"selfTTL": {
|
||||
"default": 50,
|
||||
"description": "Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling\n\nThis is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:\n\n* Ignore comments created through an Action\n* Ignore Activity polled from modqueue that the bot just reported\n\nThis value should be at least as long as the longest polling interval for modqueue/newComm\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
"examples": [
|
||||
50
|
||||
],
|
||||
"type": [
|
||||
"number",
|
||||
"boolean"
|
||||
]
|
||||
},
|
||||
"submissionTTL": {
|
||||
"default": 60,
|
||||
"description": "Amount of time, in seconds, a submission should be cached\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
@@ -1118,6 +1129,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/FlairActionJson"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/UserFlairActionJson"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/CommentActionJson"
|
||||
},
|
||||
@@ -1256,6 +1270,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RuleSetJson"
|
||||
},
|
||||
@@ -1294,6 +1311,11 @@
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"depth": {
|
||||
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"distinguished": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -1311,7 +1333,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1457,6 +1479,10 @@
|
||||
],
|
||||
"type": "boolean"
|
||||
},
|
||||
"flair_template_id": {
|
||||
"description": "Flair template ID to assign",
|
||||
"type": "string"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -2004,6 +2030,76 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"OccurredAt": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"testOn": {
|
||||
"$ref": "#/definitions/TimeBasedSelector",
|
||||
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"condition",
|
||||
"testOn"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"OccurrenceTests": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"time": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"PollingOptions": {
|
||||
"description": "A configuration for where, how, and when to poll Reddit for Activities to process",
|
||||
"examples": [
|
||||
@@ -2137,7 +2233,7 @@
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"window": {
|
||||
@@ -2716,6 +2812,224 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RepostCriteria": {
|
||||
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxExternalItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxRedditItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"occurredAt": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"criteria"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"occurrences": {
|
||||
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurrenceTests"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"searchOn": {
|
||||
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SearchFacetJSONConfig"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RepostRuleJSONConfig": {
|
||||
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"$ref": "#/definitions/AuthorOptions",
|
||||
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
|
||||
"examples": [
|
||||
{
|
||||
"include": [
|
||||
{
|
||||
"flairText": [
|
||||
"Contributor",
|
||||
"Veteran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"condition": {
|
||||
"default": "OR",
|
||||
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
|
||||
"examples": [
|
||||
{
|
||||
"matchThreshold": "> 3",
|
||||
"regex": "/reddit/"
|
||||
}
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/RepostCriteria"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind of rule to run",
|
||||
"enum": [
|
||||
"repost"
|
||||
],
|
||||
"examples": [
|
||||
"repost"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
|
||||
"examples": [
|
||||
"myNewRule"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RuleSetJson": {
|
||||
"description": "A RuleSet is a \"nested\" set of `Rule` objects that can be used to create more complex AND/OR behavior. Think of the outcome of a `RuleSet` as the result of all of its run `Rule` objects (based on `condition`)",
|
||||
"properties": {
|
||||
@@ -2753,6 +3067,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
@@ -2767,6 +3084,111 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchAndReplaceRegExp": {
|
||||
"properties": {
|
||||
"replace": {
|
||||
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
|
||||
"examples": [
|
||||
"find this string",
|
||||
"/some string*/ig"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replace",
|
||||
"search"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchFacetJSONConfig": {
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"kind": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SubmissionCheckJson": {
|
||||
"properties": {
|
||||
"actions": {
|
||||
@@ -2788,6 +3210,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/FlairActionJson"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/UserFlairActionJson"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/CommentActionJson"
|
||||
},
|
||||
@@ -2926,6 +3351,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RuleSetJson"
|
||||
},
|
||||
@@ -2993,7 +3421,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -3026,6 +3454,9 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"isUserProfile": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -3056,6 +3487,122 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ThirdPartyCredentialsJsonConfig": {
|
||||
"additionalProperties": {
|
||||
},
|
||||
"properties": {
|
||||
"youtube": {
|
||||
"properties": {
|
||||
"apiKey": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiKey"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"TimeBasedSelector": {
|
||||
"enum": [
|
||||
"all",
|
||||
"any",
|
||||
"newest",
|
||||
"oldest"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"UserFlairActionJson": {
|
||||
"description": "Flair the Submission",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"$ref": "#/definitions/AuthorOptions",
|
||||
"description": "If present then these Author criteria are checked before running the Action. If criteria fails then the Action is not run.",
|
||||
"examples": [
|
||||
{
|
||||
"include": [
|
||||
{
|
||||
"flairText": [
|
||||
"Contributor",
|
||||
"Veteran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"css": {
|
||||
"description": "The text of the css class of the flair to apply",
|
||||
"type": "string"
|
||||
},
|
||||
"dryRun": {
|
||||
"default": false,
|
||||
"description": "If `true` the Action will not make the API request to Reddit to perform its action.",
|
||||
"examples": [
|
||||
false,
|
||||
true
|
||||
],
|
||||
"type": "boolean"
|
||||
},
|
||||
"enable": {
|
||||
"default": true,
|
||||
"description": "If set to `false` the Action will not be run",
|
||||
"examples": [
|
||||
true
|
||||
],
|
||||
"type": "boolean"
|
||||
},
|
||||
"flair_template_id": {
|
||||
"description": "Flair template to pick.\n\n**Note:** If this template is used text/css are ignored",
|
||||
"type": "string"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the Action.\n\nIf any set of criteria passes the Action will be run."
|
||||
},
|
||||
"kind": {
|
||||
"description": "The type of action that will be performed",
|
||||
"enum": [
|
||||
"userflair"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this Action. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes",
|
||||
"examples": [
|
||||
"myDescriptiveAction"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
},
|
||||
"text": {
|
||||
"description": "The text of the flair to apply",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"UserNoteActionJson": {
|
||||
"description": "Add a Toolbox User Note to the Author of this Activity",
|
||||
"properties": {
|
||||
@@ -3239,6 +3786,9 @@
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"credentials": {
|
||||
"$ref": "#/definitions/ThirdPartyCredentialsJsonConfig"
|
||||
},
|
||||
"dryRun": {
|
||||
"default": "undefined",
|
||||
"description": "Use this option to override the `dryRun` setting for all `Checks`",
|
||||
|
||||
@@ -19,6 +19,28 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"BotCredentialsJsonConfig": {
|
||||
"properties": {
|
||||
"reddit": {
|
||||
"$ref": "#/definitions/RedditCredentials"
|
||||
},
|
||||
"youtube": {
|
||||
"properties": {
|
||||
"apiKey": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiKey"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"reddit"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"BotInstanceJsonConfig": {
|
||||
"description": "The configuration for an **individual reddit account** ContextMod will run as a bot.\n\nMultiple bot configs may be specified (one per reddit account).\n\n**NOTE:** If `bots` is not specified in a `FILE` then a default `bot` is generated using `ENV/ARG` values IE `CLIENT_ID`, etc...but if `bots` IS specified the default is not generated.",
|
||||
"properties": {
|
||||
@@ -27,15 +49,12 @@
|
||||
"description": "Settings to configure the default caching behavior for this bot\n\nEvery setting not specified will default to what is specified by the global operator caching config"
|
||||
},
|
||||
"credentials": {
|
||||
"$ref": "#/definitions/RedditCredentials",
|
||||
"description": "Credentials required for the bot to interact with Reddit's API\n\nThese credentials will provided to both the API and Web interface unless otherwise specified with the `web.credentials` property\n\nRefer to the [required credentials table](https://github.com/FoxxMD/context-mod/blob/master/docs/operatorConfiguration.md#minimum-required-configuration) to see what is necessary to run the bot.",
|
||||
"examples": [
|
||||
"anyOf": [
|
||||
{
|
||||
"accessToken": "p75_1c467b2",
|
||||
"clientId": "f4b4df1_9oiu",
|
||||
"clientSecret": "34v5q1c564_yt7",
|
||||
"redirectUri": "http://localhost:8085/callback",
|
||||
"refreshToken": "34_f1w1v4"
|
||||
"$ref": "#/definitions/RedditCredentials"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/BotCredentialsJsonConfig"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -105,21 +124,8 @@
|
||||
"type": "object"
|
||||
},
|
||||
"snoowrap": {
|
||||
"description": "Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior",
|
||||
"properties": {
|
||||
"debug": {
|
||||
"description": "Manually set the debug status for snoowrap\n\nWhen snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level\n\n* Set to `true` to always output\n* Set to `false` to never output\n\nIf not present or `null` will be set based on `logLevel`\n\n* ENV => `SNOO_DEBUG`\n* ARG => `--snooDebug`",
|
||||
"type": "boolean"
|
||||
},
|
||||
"proxy": {
|
||||
"description": "Proxy all requests to Reddit's API through this endpoint\n\n* ENV => `PROXY`\n* ARG => `--proxy <proxyEndpoint>`",
|
||||
"examples": [
|
||||
"http://localhost:4443"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
"$ref": "#/definitions/SnoowrapOptions",
|
||||
"description": "Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior.\n\nOverrides any defaults provided at top-level operator config.\n\nSet to an empty object to \"ignore\" any top-level config"
|
||||
},
|
||||
"subreddits": {
|
||||
"description": "Settings related to bot behavior for subreddits it is managing",
|
||||
@@ -395,6 +401,17 @@
|
||||
],
|
||||
"description": "The cache provider and, optionally, a custom configuration for that provider\n\nIf not present or `null` provider will be `memory`.\n\nTo specify another `provider` but use its default configuration set this property to a string of one of the available providers: `memory`, `redis`, or `none`"
|
||||
},
|
||||
"selfTTL": {
|
||||
"default": 50,
|
||||
"description": "Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling\n\nThis is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:\n\n* Ignore comments created through an Action\n* Ignore Activity polled from modqueue that the bot just reported\n\nThis value should be at least as long as the longest polling interval for modqueue/newComm\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
"examples": [
|
||||
50
|
||||
],
|
||||
"type": [
|
||||
"number",
|
||||
"boolean"
|
||||
]
|
||||
},
|
||||
"submissionTTL": {
|
||||
"default": 60,
|
||||
"description": "Amount of time, in seconds, a submission should be cached\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
@@ -510,6 +527,40 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"SnoowrapOptions": {
|
||||
"properties": {
|
||||
"debug": {
|
||||
"description": "Manually set the debug status for snoowrap\n\nWhen snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level\n\n* Set to `true` to always output\n* Set to `false` to never output\n\nIf not present or `null` will be set based on `logLevel`\n\n* ENV => `SNOO_DEBUG`\n* ARG => `--snooDebug`",
|
||||
"type": "boolean"
|
||||
},
|
||||
"proxy": {
|
||||
"description": "Proxy all requests to Reddit's API through this endpoint\n\n* ENV => `PROXY`\n* ARG => `--proxy <proxyEndpoint>`",
|
||||
"examples": [
|
||||
"http://localhost:4443"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ThirdPartyCredentialsJsonConfig": {
|
||||
"additionalProperties": {
|
||||
},
|
||||
"properties": {
|
||||
"youtube": {
|
||||
"properties": {
|
||||
"apiKey": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiKey"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"WebCredentials": {
|
||||
"description": "Separate credentials for the web interface can be provided when also running the api.\n\nAll properties not specified will default to values given in ENV/ARG credential properties\n\nRefer to the [required credentials table](https://github.com/FoxxMD/context-mod/blob/master/docs/operatorConfiguration.md#minimum-required-configuration) to see what is necessary for the web interface.",
|
||||
"examples": [
|
||||
@@ -579,6 +630,9 @@
|
||||
"$ref": "#/definitions/OperatorCacheConfig",
|
||||
"description": "Settings to configure the default caching behavior globally\n\nThese settings will be used by each bot, and subreddit, that does not specify their own"
|
||||
},
|
||||
"credentials": {
|
||||
"$ref": "#/definitions/ThirdPartyCredentialsJsonConfig"
|
||||
},
|
||||
"logging": {
|
||||
"description": "Settings to configure global logging defaults",
|
||||
"properties": {
|
||||
@@ -654,6 +708,10 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"snoowrap": {
|
||||
"$ref": "#/definitions/SnoowrapOptions",
|
||||
"description": "Set global snoowrap options as well as default snoowrap config for all bots that don't specify their own"
|
||||
},
|
||||
"web": {
|
||||
"description": "Settings for the web interface",
|
||||
"properties": {
|
||||
|
||||
@@ -19,6 +19,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
@@ -641,6 +644,11 @@
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"depth": {
|
||||
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"distinguished": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -658,7 +666,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -987,6 +995,76 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"OccurredAt": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"testOn": {
|
||||
"$ref": "#/definitions/TimeBasedSelector",
|
||||
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"condition",
|
||||
"testOn"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"OccurrenceTests": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"time": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RecentActivityRuleJSONConfig": {
|
||||
"description": "Checks a user's history for any Activity (Submission/Comment) in the subreddits specified in thresholds\n\nAvailable data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):\n\n```\nsummary => comma-deliminated list of subreddits that hit the threshold and their count EX subredditA(1), subredditB(4),...\nsubCount => Total number of subreddits that hit the threshold\ntotalCount => Total number of all activity occurrences in subreddits\n```",
|
||||
"properties": {
|
||||
@@ -1070,7 +1148,7 @@
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"window": {
|
||||
@@ -1486,6 +1564,329 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RepostCriteria": {
|
||||
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxExternalItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxRedditItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"occurredAt": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"criteria"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"occurrences": {
|
||||
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurrenceTests"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"searchOn": {
|
||||
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SearchFacetJSONConfig"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RepostRuleJSONConfig": {
|
||||
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"$ref": "#/definitions/AuthorOptions",
|
||||
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
|
||||
"examples": [
|
||||
{
|
||||
"include": [
|
||||
{
|
||||
"flairText": [
|
||||
"Contributor",
|
||||
"Veteran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"condition": {
|
||||
"default": "OR",
|
||||
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
|
||||
"examples": [
|
||||
{
|
||||
"matchThreshold": "> 3",
|
||||
"regex": "/reddit/"
|
||||
}
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/RepostCriteria"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind of rule to run",
|
||||
"enum": [
|
||||
"repost"
|
||||
],
|
||||
"examples": [
|
||||
"repost"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
|
||||
"examples": [
|
||||
"myNewRule"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchAndReplaceRegExp": {
|
||||
"properties": {
|
||||
"replace": {
|
||||
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
|
||||
"examples": [
|
||||
"find this string",
|
||||
"/some string*/ig"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replace",
|
||||
"search"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchFacetJSONConfig": {
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"kind": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SubmissionState": {
|
||||
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
|
||||
"examples": [
|
||||
@@ -1535,7 +1936,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1568,6 +1969,9 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"isUserProfile": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -1598,6 +2002,15 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"TimeBasedSelector": {
|
||||
"enum": [
|
||||
"all",
|
||||
"any",
|
||||
"newest",
|
||||
"oldest"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"UserNoteCriteria": {
|
||||
"properties": {
|
||||
"count": {
|
||||
|
||||
@@ -618,6 +618,11 @@
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"depth": {
|
||||
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"distinguished": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -635,7 +640,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -964,6 +969,76 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"OccurredAt": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"testOn": {
|
||||
"$ref": "#/definitions/TimeBasedSelector",
|
||||
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"condition",
|
||||
"testOn"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"OccurrenceTests": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"properties": {
|
||||
"condition": {
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"time": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"test": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"test"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RecentActivityRuleJSONConfig": {
|
||||
"description": "Checks a user's history for any Activity (Submission/Comment) in the subreddits specified in thresholds\n\nAvailable data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):\n\n```\nsummary => comma-deliminated list of subreddits that hit the threshold and their count EX subredditA(1), subredditB(4),...\nsubCount => Total number of subreddits that hit the threshold\ntotalCount => Total number of all activity occurrences in subreddits\n```",
|
||||
"properties": {
|
||||
@@ -1047,7 +1122,7 @@
|
||||
},
|
||||
"useSubmissionAsReference": {
|
||||
"default": true,
|
||||
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
|
||||
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"window": {
|
||||
@@ -1463,6 +1538,329 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RepostCriteria": {
|
||||
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxExternalItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
|
||||
"type": "number"
|
||||
},
|
||||
"maxRedditItems": {
|
||||
"default": 50,
|
||||
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"occurredAt": {
|
||||
"description": "Test the time the reposts occurred at",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurredAt"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"criteria"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"occurrences": {
|
||||
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"default": "AND",
|
||||
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/OccurrenceTests"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"searchOn": {
|
||||
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SearchFacetJSONConfig"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RepostRuleJSONConfig": {
|
||||
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"$ref": "#/definitions/AuthorOptions",
|
||||
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
|
||||
"examples": [
|
||||
{
|
||||
"include": [
|
||||
{
|
||||
"flairText": [
|
||||
"Contributor",
|
||||
"Veteran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"isMod": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"condition": {
|
||||
"default": "OR",
|
||||
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
|
||||
"enum": [
|
||||
"AND",
|
||||
"OR"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"criteria": {
|
||||
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
|
||||
"examples": [
|
||||
{
|
||||
"matchThreshold": "> 3",
|
||||
"regex": "/reddit/"
|
||||
}
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/RepostCriteria"
|
||||
},
|
||||
"minItems": 1,
|
||||
"type": "array"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind of rule to run",
|
||||
"enum": [
|
||||
"repost"
|
||||
],
|
||||
"examples": [
|
||||
"repost"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
|
||||
"examples": [
|
||||
"myNewRule"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchAndReplaceRegExp": {
|
||||
"properties": {
|
||||
"replace": {
|
||||
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
|
||||
"examples": [
|
||||
"find this string",
|
||||
"/some string*/ig"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replace",
|
||||
"search"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SearchFacetJSONConfig": {
|
||||
"properties": {
|
||||
"caseSensitive": {
|
||||
"default": false,
|
||||
"description": "Should text matching be case sensitive?\n\nDefaults to false",
|
||||
"type": "boolean"
|
||||
},
|
||||
"kind": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"crossposts",
|
||||
"duplicates",
|
||||
"external",
|
||||
"title",
|
||||
"url"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"matchScore": {
|
||||
"default": 85,
|
||||
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
|
||||
"type": "number"
|
||||
},
|
||||
"minWordCount": {
|
||||
"default": 2,
|
||||
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
|
||||
"type": "number"
|
||||
},
|
||||
"transformations": {
|
||||
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"transformationsActivity": {
|
||||
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SearchAndReplaceRegExp"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ActivityWindowCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SubmissionState": {
|
||||
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
|
||||
"examples": [
|
||||
@@ -1512,7 +1910,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1545,6 +1943,9 @@
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"isUserProfile": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -1575,6 +1976,15 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"TimeBasedSelector": {
|
||||
"enum": [
|
||||
"all",
|
||||
"any",
|
||||
"newest",
|
||||
"oldest"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"UserNoteCriteria": {
|
||||
"properties": {
|
||||
"count": {
|
||||
@@ -1649,6 +2059,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RegexRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
|
||||
@@ -1,12 +1,22 @@
|
||||
import Snoowrap, {Comment, Subreddit} from "snoowrap";
|
||||
import Snoowrap, {Comment, Subreddit, WikiPage} from "snoowrap";
|
||||
import {Logger} from "winston";
|
||||
import {SubmissionCheck} from "../Check/SubmissionCheck";
|
||||
import {CommentCheck} from "../Check/CommentCheck";
|
||||
import {
|
||||
cacheStats, createHistoricalStatsDisplay,
|
||||
cacheStats,
|
||||
createHistoricalStatsDisplay,
|
||||
createRetryHandler,
|
||||
determineNewResults, findLastIndex, formatNumber,
|
||||
mergeArr, parseFromJsonOrYamlToObject, pollingInfo, resultsSummary, sleep, totalFromMapStats, triggeredIndicator,
|
||||
determineNewResults,
|
||||
findLastIndex,
|
||||
formatNumber, likelyJson5,
|
||||
mergeArr,
|
||||
parseFromJsonOrYamlToObject,
|
||||
parseRedditEntity,
|
||||
pollingInfo,
|
||||
resultsSummary,
|
||||
sleep,
|
||||
totalFromMapStats,
|
||||
triggeredIndicator,
|
||||
} from "../util";
|
||||
import {Poll} from "snoostorm";
|
||||
import pEvent from "p-event";
|
||||
@@ -41,6 +51,7 @@ import NotificationManager from "../Notification/NotificationManager";
|
||||
import action from "../Web/Server/routes/authenticated/user/action";
|
||||
import {createHistoricalDefaults, historicalDefaults} from "../Common/defaults";
|
||||
import {ExtendedSnoowrap} from "../Utils/SnoowrapClients";
|
||||
import {isRateLimitError, isStatusError} from "../Utils/Errors";
|
||||
|
||||
export interface RunningState {
|
||||
state: RunState,
|
||||
@@ -52,6 +63,7 @@ export interface runCheckOptions {
|
||||
delayUntil?: number,
|
||||
dryRun?: boolean,
|
||||
refresh?: boolean,
|
||||
force?: boolean,
|
||||
}
|
||||
|
||||
export interface CheckTask {
|
||||
@@ -73,7 +85,7 @@ interface QueuedIdentifier {
|
||||
state: 'queued' | 'processing'
|
||||
}
|
||||
|
||||
export class Manager {
|
||||
export class Manager extends EventEmitter {
|
||||
subreddit: Subreddit;
|
||||
client: ExtendedSnoowrap;
|
||||
logger: Logger;
|
||||
@@ -85,6 +97,7 @@ export class Manager {
|
||||
wikiLocation: string;
|
||||
lastWikiRevision?: DayjsObj
|
||||
lastWikiCheck: DayjsObj = dayjs();
|
||||
wikiFormat: ('yaml' | 'json') = 'yaml';
|
||||
//wikiUpdateRunning: boolean = false;
|
||||
|
||||
streamListedOnce: string[] = [];
|
||||
@@ -94,7 +107,6 @@ export class Manager {
|
||||
sharedModqueue: boolean;
|
||||
cacheManager: BotResourcesManager;
|
||||
globalDryRun?: boolean;
|
||||
emitter: EventEmitter = new EventEmitter();
|
||||
queue: QueueObject<CheckTask>;
|
||||
// firehose is used to ensure all activities from different polling streams are unique
|
||||
// that is -- if the same activities is in both modqueue and unmoderated we don't want to process the activity twice or use stale data
|
||||
@@ -130,6 +142,8 @@ export class Manager {
|
||||
|
||||
notificationManager: NotificationManager;
|
||||
|
||||
modPermissions?: string[]
|
||||
|
||||
// use by api nanny to slow event consumption
|
||||
delayBy?: number;
|
||||
|
||||
@@ -182,6 +196,8 @@ export class Manager {
|
||||
}
|
||||
|
||||
constructor(sub: Subreddit, client: ExtendedSnoowrap, logger: Logger, cacheManager: BotResourcesManager, opts: RuntimeManagerOptions = {botName: 'ContextMod', maxWorkers: 1}) {
|
||||
super();
|
||||
|
||||
const {dryRun, sharedModqueue = false, wikiLocation = 'botconfig/contextbot', botName, maxWorkers} = opts;
|
||||
this.displayLabel = opts.nickname || `${sub.display_name_prefixed}`;
|
||||
const getLabels = this.getCurrentLabels;
|
||||
@@ -253,6 +269,18 @@ export class Manager {
|
||||
})(this), 10000);
|
||||
}
|
||||
|
||||
protected async getModPermissions(): Promise<string[]> {
|
||||
if(this.modPermissions !== undefined) {
|
||||
return this.modPermissions as string[];
|
||||
}
|
||||
this.logger.debug('Retrieving mod permissions for bot');
|
||||
const userInfo = parseRedditEntity(this.botName, 'user');
|
||||
const mods = this.subreddit.getModerators({name: userInfo.name});
|
||||
// @ts-ignore
|
||||
this.modPermissions = mods[0].mod_permissions;
|
||||
return this.modPermissions as string[];
|
||||
}
|
||||
|
||||
protected getMaxWorkers(subMaxWorkers?: number) {
|
||||
let maxWorkers = this.globalMaxWorkers;
|
||||
|
||||
@@ -339,6 +367,7 @@ export class Manager {
|
||||
const {
|
||||
polling = [{pollOn: 'unmoderated', limit: DEFAULT_POLLING_LIMIT, interval: DEFAULT_POLLING_INTERVAL}],
|
||||
caching,
|
||||
credentials,
|
||||
dryRun,
|
||||
footer,
|
||||
nickname,
|
||||
@@ -379,6 +408,7 @@ export class Manager {
|
||||
logger: this.logger,
|
||||
subreddit: this.subreddit,
|
||||
caching,
|
||||
credentials,
|
||||
client: this.client,
|
||||
};
|
||||
this.resources = await this.cacheManager.set(this.subreddit.display_name, resourceConfig);
|
||||
@@ -390,6 +420,10 @@ export class Manager {
|
||||
const commentChecks: Array<CommentCheck> = [];
|
||||
const subChecks: Array<SubmissionCheck> = [];
|
||||
const structuredChecks = configBuilder.parseToStructured(validJson);
|
||||
|
||||
// TODO check that bot has permissions for subreddit for all specified actions
|
||||
// can find permissions in this.subreddit.mod_permissions
|
||||
|
||||
for (const jCheck of structuredChecks) {
|
||||
const checkConfig = {
|
||||
...jCheck,
|
||||
@@ -415,7 +449,7 @@ export class Manager {
|
||||
this.logger.info(checkSummary);
|
||||
}
|
||||
this.validConfigLoaded = true;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.validConfigLoaded = false;
|
||||
throw err;
|
||||
}
|
||||
@@ -428,9 +462,45 @@ export class Manager {
|
||||
|
||||
try {
|
||||
let sourceData: string;
|
||||
let wiki: WikiPage;
|
||||
try {
|
||||
// @ts-ignore
|
||||
const wiki = await this.subreddit.getWikiPage(this.wikiLocation).fetch();
|
||||
try {
|
||||
// @ts-ignore
|
||||
wiki = await this.subreddit.getWikiPage(this.wikiLocation).fetch();
|
||||
} catch (err: any) {
|
||||
if(isStatusError(err) && err.statusCode === 404) {
|
||||
// see if we can create the page
|
||||
if (!this.client.scope.includes('wikiedit')) {
|
||||
throw new Error(`Page does not exist and could not be created because Bot does not have oauth permission 'wikiedit'`);
|
||||
}
|
||||
const modPermissions = await this.getModPermissions();
|
||||
if (!modPermissions.includes('all') && !modPermissions.includes('wiki')) {
|
||||
throw new Error(`Page does not exist and could not be created because Bot not have mod permissions for creating wiki pages. Must have 'all' or 'wiki'`);
|
||||
}
|
||||
if(!this.client.scope.includes('modwiki')) {
|
||||
throw new Error(`Bot COULD create wiki config page but WILL NOT because it does not have the oauth permissions 'modwiki' which is required to set page visibility and editing permissions. Safety first!`);
|
||||
}
|
||||
// @ts-ignore
|
||||
wiki = await this.subreddit.getWikiPage(this.wikiLocation).edit({
|
||||
text: '',
|
||||
reason: 'Empty configuration created for ContextMod'
|
||||
});
|
||||
this.logger.info(`Wiki page at ${this.wikiLocation} did not exist, but bot created it!`);
|
||||
|
||||
// 0 = use subreddit wiki permissions
|
||||
// 1 = only approved wiki contributors
|
||||
// 2 = only mods may edit and view
|
||||
// @ts-ignore
|
||||
await this.subreddit.getWikiPage(this.wikiLocation).editSettings({
|
||||
permissionLevel: 2,
|
||||
// don't list this page on r/[subreddit]/wiki/pages
|
||||
listed: false,
|
||||
});
|
||||
this.logger.info('Bot set wiki page visibility to MODS ONLY');
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
const revisionDate = dayjs.unix(wiki.revision_date);
|
||||
if (!force && this.validConfigLoaded && (this.lastWikiRevision !== undefined && this.lastWikiRevision.isSame(revisionDate))) {
|
||||
// nothing to do, we already have this revision
|
||||
@@ -456,23 +526,41 @@ export class Manager {
|
||||
|
||||
this.lastWikiRevision = revisionDate;
|
||||
sourceData = await wiki.content_md;
|
||||
} catch (err) {
|
||||
const msg = `Could not read wiki configuration. Please ensure the page https://reddit.com${this.subreddit.url}wiki/${this.wikiLocation} exists and is readable -- error: ${err.message}`;
|
||||
} catch (err: any) {
|
||||
let hint = '';
|
||||
if(isStatusError(err) && err.statusCode === 403) {
|
||||
hint = `\r\nHINT: Either the page is restricted to mods only and the bot's reddit account does have the mod permission 'all' or 'wiki' OR the bot does not have the 'wikiread' oauth permission`;
|
||||
}
|
||||
const msg = `Could not read wiki configuration. Please ensure the page https://reddit.com${this.subreddit.url}wiki/${this.wikiLocation} exists and is readable${hint} -- error: ${err.message}`;
|
||||
this.logger.error(msg);
|
||||
throw new ConfigParseError(msg);
|
||||
}
|
||||
|
||||
if (sourceData === '') {
|
||||
if (sourceData.replace('\r\n', '').trim() === '') {
|
||||
this.logger.error(`Wiki page contents was empty`);
|
||||
throw new ConfigParseError('Wiki page contents was empty');
|
||||
}
|
||||
|
||||
const [configObj, jsonErr, yamlErr] = parseFromJsonOrYamlToObject(sourceData);
|
||||
if (jsonErr === undefined) {
|
||||
this.wikiFormat = 'json';
|
||||
} else if (yamlErr === undefined) {
|
||||
this.wikiFormat = 'yaml';
|
||||
} else {
|
||||
this.wikiFormat = likelyJson5(sourceData) ? 'json' : 'yaml';
|
||||
}
|
||||
|
||||
if (configObj === undefined) {
|
||||
this.logger.error(`Could not parse wiki page contents as JSON or YAML:`);
|
||||
this.logger.error(jsonErr);
|
||||
this.logger.error(yamlErr);
|
||||
this.logger.error(`Could not parse wiki page contents as JSON or YAML. Looks like it should be ${this.wikiFormat}?`);
|
||||
if (this.wikiFormat === 'json') {
|
||||
this.logger.error(jsonErr);
|
||||
this.logger.error('Check DEBUG output for yaml error');
|
||||
this.logger.debug(yamlErr);
|
||||
} else {
|
||||
this.logger.error(yamlErr);
|
||||
this.logger.error('Check DEBUG output for json error');
|
||||
this.logger.debug(jsonErr);
|
||||
}
|
||||
throw new ConfigParseError('Could not parse wiki page contents as JSON or YAML')
|
||||
}
|
||||
|
||||
@@ -484,7 +572,7 @@ export class Manager {
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.validConfigLoaded = false;
|
||||
throw err;
|
||||
}
|
||||
@@ -494,6 +582,18 @@ export class Manager {
|
||||
const checks = checkType === 'Comment' ? this.commentChecks : this.submissionChecks;
|
||||
let item = activity;
|
||||
const itemId = await item.id;
|
||||
|
||||
if(await this.resources.hasRecentSelf(item)) {
|
||||
const {force = false} = options || {};
|
||||
let recentMsg = `Found in Activities recently (last ${this.resources.selfTTL} seconds) modified/created by this bot`;
|
||||
if(force) {
|
||||
this.logger.debug(`${recentMsg} but will run anyway because "force" option was true.`);
|
||||
} else {
|
||||
this.logger.debug(`${recentMsg} so will skip running.`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let allRuleResults: RuleResult[] = [];
|
||||
const itemIdentifier = `${checkType === 'Submission' ? 'SUB' : 'COM'} ${itemId}`;
|
||||
this.currentLabels = [itemIdentifier];
|
||||
@@ -502,50 +602,10 @@ export class Manager {
|
||||
const [peek, _] = await itemContentPeek(item);
|
||||
ePeek = peek;
|
||||
this.logger.info(`<EVENT> ${peek}`);
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error(`Error occurred while generate item peek for ${checkType} Activity ${itemId}`, err);
|
||||
}
|
||||
|
||||
const {
|
||||
checkNames = [],
|
||||
delayUntil,
|
||||
dryRun,
|
||||
refresh = false,
|
||||
} = options || {};
|
||||
|
||||
let wasRefreshed = false;
|
||||
|
||||
if (delayUntil !== undefined) {
|
||||
const created = dayjs.unix(item.created_utc);
|
||||
const diff = dayjs().diff(created, 's');
|
||||
if (diff < delayUntil) {
|
||||
this.logger.verbose(`Delaying processing until Activity is ${delayUntil} seconds old (${delayUntil - diff}s)`);
|
||||
await sleep(delayUntil - diff);
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
wasRefreshed = true;
|
||||
}
|
||||
}
|
||||
// refresh signal from firehose if activity was ingested multiple times before processing or re-queued while processing
|
||||
// want to make sure we have the most recent data
|
||||
if(!wasRefreshed && refresh === true) {
|
||||
this.logger.verbose('Refreshed data (probably due to signal from firehose)');
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
}
|
||||
|
||||
const startingApiLimit = this.client.ratelimitRemaining;
|
||||
|
||||
if (item instanceof Submission) {
|
||||
if (await item.removed_by_category === 'deleted') {
|
||||
this.logger.warn('Submission was deleted, cannot process.');
|
||||
return;
|
||||
}
|
||||
} else if (item.author.name === '[deleted]') {
|
||||
this.logger.warn('Comment was deleted, cannot process.');
|
||||
return;
|
||||
}
|
||||
|
||||
let checksRun = 0;
|
||||
let actionsRun = 0;
|
||||
let totalRulesRun = 0;
|
||||
@@ -567,7 +627,48 @@ export class Manager {
|
||||
let triggeredCheckName;
|
||||
const checksRunNames = [];
|
||||
const cachedCheckNames = [];
|
||||
const startingApiLimit = this.client.ratelimitRemaining;
|
||||
|
||||
const {
|
||||
checkNames = [],
|
||||
delayUntil,
|
||||
dryRun,
|
||||
refresh = false,
|
||||
} = options || {};
|
||||
|
||||
let wasRefreshed = false;
|
||||
|
||||
try {
|
||||
|
||||
if (delayUntil !== undefined) {
|
||||
const created = dayjs.unix(item.created_utc);
|
||||
const diff = dayjs().diff(created, 's');
|
||||
if (diff < delayUntil) {
|
||||
this.logger.verbose(`Delaying processing until Activity is ${delayUntil} seconds old (${delayUntil - diff}s)`);
|
||||
await sleep(delayUntil - diff);
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
wasRefreshed = true;
|
||||
}
|
||||
}
|
||||
// refresh signal from firehose if activity was ingested multiple times before processing or re-queued while processing
|
||||
// want to make sure we have the most recent data
|
||||
if(!wasRefreshed && refresh === true) {
|
||||
this.logger.verbose('Refreshed data (probably due to signal from firehose)');
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
}
|
||||
|
||||
if (item instanceof Submission) {
|
||||
if (await item.removed_by_category === 'deleted') {
|
||||
this.logger.warn('Submission was deleted, cannot process.');
|
||||
return;
|
||||
}
|
||||
} else if (item.author.name === '[deleted]') {
|
||||
this.logger.warn('Comment was deleted, cannot process.');
|
||||
return;
|
||||
}
|
||||
|
||||
for (const check of checks) {
|
||||
if (checkNames.length > 0 && !checkNames.map(x => x.toLowerCase()).some(x => x === check.name.toLowerCase())) {
|
||||
this.logger.warn(`Check ${check.name} not in array of requested checks to run, skipping...`);
|
||||
@@ -598,10 +699,11 @@ export class Manager {
|
||||
this.logger.info('Check was triggered but cache result options specified NOT to run actions...counting as check NOT triggered');
|
||||
triggered = false;
|
||||
}
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (e.logged !== true) {
|
||||
this.logger.warn(`Running rules for Check ${check.name} failed due to uncaught exception`, e);
|
||||
}
|
||||
this.emit('error', e);
|
||||
}
|
||||
|
||||
if (triggered) {
|
||||
@@ -614,6 +716,11 @@ export class Manager {
|
||||
actionedEvent.ruleSummary = resultsSummary(currentResults, check.condition);
|
||||
}
|
||||
runActions = await check.runActions(item, currentResults.filter(x => x.triggered), dryRun);
|
||||
// we only can about report and comment actions since those can produce items for newComm and modqueue
|
||||
const recentCandidates = runActions.filter(x => ['report','comment'].includes(x.kind.toLocaleLowerCase())).map(x => x.touchedEntities === undefined ? [] : x.touchedEntities).flat();
|
||||
for(const recent of recentCandidates) {
|
||||
await this.resources.setRecentSelf(recent as (Submission|Comment));
|
||||
}
|
||||
actionsRun = runActions.length;
|
||||
|
||||
if(check.notifyOnTrigger) {
|
||||
@@ -628,10 +735,11 @@ export class Manager {
|
||||
this.logger.info('No checks triggered');
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (!(err instanceof LoggedError) && err.logged !== true) {
|
||||
this.logger.error('An unhandled error occurred while running checks', err);
|
||||
}
|
||||
this.emit('error', err);
|
||||
} finally {
|
||||
try {
|
||||
actionedEvent.actionResults = runActions;
|
||||
@@ -642,7 +750,7 @@ export class Manager {
|
||||
this.logger.verbose(`Run Stats: Checks ${checksRun} | Rules => Total: ${totalRulesRun} Unique: ${allRuleResults.length} Cached: ${totalRulesRun - allRuleResults.length} Rolling Avg: ~${formatNumber(this.rulesUniqueRollingAvg)}/s | Actions ${actionsRun}`);
|
||||
this.logger.verbose(`Reddit API Stats: Initial ${startingApiLimit} | Current ${this.client.ratelimitRemaining} | Used ~${startingApiLimit - this.client.ratelimitRemaining} | Events ~${formatNumber(this.eventsRollingAvg)}/s`);
|
||||
this.currentLabels = [];
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error occurred while cleaning up Activity check and generating stats', err);
|
||||
} finally {
|
||||
this.resources.updateHistoricalStats({
|
||||
@@ -664,7 +772,7 @@ export class Manager {
|
||||
// give current handle() time to stop
|
||||
//await sleep(1000);
|
||||
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 5, maxOtherRetry: 1}, this.logger);
|
||||
const retryHandler = createRetryHandler({maxRequestRetry: 3, maxOtherRetry: 1}, this.logger);
|
||||
|
||||
const subName = this.subreddit.display_name;
|
||||
|
||||
@@ -766,13 +874,19 @@ export class Manager {
|
||||
// @ts-ignore
|
||||
stream.on('error', async (err: any) => {
|
||||
|
||||
this.emit('error', err);
|
||||
|
||||
if(isRateLimitError(err)) {
|
||||
this.logger.error('Encountered rate limit while polling! Bot is all out of requests :( Stopping subreddit queue and polling.');
|
||||
await this.stop();
|
||||
}
|
||||
this.logger.error('Polling error occurred', err);
|
||||
const shouldRetry = await retryHandler(err);
|
||||
if (shouldRetry) {
|
||||
stream.startInterval();
|
||||
} else {
|
||||
this.logger.warn('Pausing event polling due to too many errors');
|
||||
await this.pauseEvents();
|
||||
this.logger.warn('Stopping subreddit processing/polling due to too many errors');
|
||||
await this.stop();
|
||||
}
|
||||
});
|
||||
this.streams.push(stream);
|
||||
@@ -856,10 +970,19 @@ export class Manager {
|
||||
} else {
|
||||
const pauseWaitStart = dayjs();
|
||||
this.logger.info(`Activity processing queue is stopping...waiting for ${this.queue.running()} activities to finish processing`);
|
||||
const fullStopTime = dayjs().add(5, 'seconds');
|
||||
let gracefulStop = true;
|
||||
while (this.queue.running() > 0) {
|
||||
gracefulStop = false;
|
||||
if(dayjs().isAfter(fullStopTime)) {
|
||||
break;
|
||||
}
|
||||
await sleep(1500);
|
||||
this.logger.verbose(`Activity processing queue is stopping...waiting for ${this.queue.running()} activities to finish processing`);
|
||||
}
|
||||
if(!gracefulStop) {
|
||||
this.logger.warn('Waited longer than 5 seconds to stop activities. Something isn\'t right so forcing stop :/ ');
|
||||
}
|
||||
this.logger.info(`Activity processing queue stopped by ${causedBy} and ${this.queue.length()} queued activities cleared (waited ${dayjs().diff(pauseWaitStart, 's')} seconds while activity processing finished)`);
|
||||
this.firehose.kill();
|
||||
this.queue.kill();
|
||||
|
||||
@@ -5,8 +5,7 @@ import {PollConfiguration} from "snoostorm/out/util/Poll";
|
||||
import {ClearProcessedOptions, DEFAULT_POLLING_INTERVAL} from "../Common/interfaces";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import { Duration } from "dayjs/plugin/duration";
|
||||
import {parseDuration, sleep} from "../util";
|
||||
import setRandomInterval from 'set-random-interval';
|
||||
import {parseDuration, random} from "../util";
|
||||
|
||||
type Awaitable<T> = Promise<T> | T;
|
||||
|
||||
@@ -51,31 +50,10 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
clearInterval(this.interval);
|
||||
}
|
||||
|
||||
startInterval = () => {
|
||||
this.running = true;
|
||||
this.randInterval = setRandomInterval((function (self) {
|
||||
createInterval = () => {
|
||||
this.interval = setTimeout((function (self) {
|
||||
return async () => {
|
||||
try {
|
||||
// clear the tracked, processed activity ids after a set period or number of activities have been processed
|
||||
// because when RCB is long-running and has streams from high-volume subreddits this list never gets smaller...
|
||||
|
||||
// so clear if after time period
|
||||
if ((self.clearProcessedAfter !== undefined && dayjs().isSameOrAfter(self.clearProcessedAfter))
|
||||
// or clear if processed list is larger than defined max allowable size (default setting, 2 * polling option limit)
|
||||
|| (self.clearProcessedSize !== undefined && self.processed.size >= self.clearProcessedSize)) {
|
||||
if (self.retainProcessed === 0) {
|
||||
self.processed = new Set();
|
||||
} else {
|
||||
// retain some processed so we have continuity between processed list resets -- this is default behavior and retains polling option limit # of activities
|
||||
// we can slice from the set here because ID order is guaranteed for Set object so list is oldest -> newest
|
||||
// -- retain last LIMIT number of activities (or all if retain # is larger than list due to user config error)
|
||||
self.processed = new Set(Array.from(self.processed).slice(Math.max(0, self.processed.size - self.retainProcessed)));
|
||||
}
|
||||
// reset time interval if there is one
|
||||
if (self.clearProcessedAfter !== undefined && self.clearProcessedDuration !== undefined) {
|
||||
self.clearProcessedAfter = dayjs().add(self.clearProcessedDuration.asSeconds(), 's');
|
||||
}
|
||||
}
|
||||
const batch = await self.getter();
|
||||
const newItems: T[] = [];
|
||||
for (const item of batch) {
|
||||
@@ -90,19 +68,23 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
|
||||
// Emit the new listing of all new items
|
||||
self.emit("listing", newItems);
|
||||
} catch (err) {
|
||||
|
||||
// if everything succeeded then create a new timeout
|
||||
self.createInterval();
|
||||
} catch (err: any) {
|
||||
self.emit('error', err);
|
||||
self.end();
|
||||
}
|
||||
}
|
||||
})(this), this.frequency - 1, this.frequency + 1);
|
||||
})(this), random(this.frequency - 1, this.frequency + 1));
|
||||
}
|
||||
|
||||
startInterval = () => {
|
||||
this.running = true;
|
||||
this.createInterval();
|
||||
}
|
||||
|
||||
end = () => {
|
||||
this.running = false;
|
||||
if(this.randInterval !== undefined) {
|
||||
this.randInterval.clear();
|
||||
}
|
||||
super.end();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import {
|
||||
cacheStats, compareDurationValue, comparisonTextOp, createCacheManager, createHistoricalStatsDisplay,
|
||||
formatNumber, getActivityAuthorName, getActivitySubredditName, isStrongSubredditState,
|
||||
mergeArr, parseDurationComparison,
|
||||
parseExternalUrl, parseGenericValueComparison,
|
||||
parseExternalUrl, parseGenericValueComparison, parseRedditEntity,
|
||||
parseWikiContext, shouldCacheSubredditStateCriteriaResult, subredditStateIsNameOnly, toStrongSubredditState
|
||||
} from "../util";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
@@ -40,7 +40,7 @@ import {
|
||||
HistoricalStats,
|
||||
HistoricalStatUpdateData,
|
||||
SubredditHistoricalStats,
|
||||
SubredditHistoricalStatsDisplay,
|
||||
SubredditHistoricalStatsDisplay, ThirdPartyCredentialsJsonConfig,
|
||||
} from "../Common/interfaces";
|
||||
import UserNotes from "./UserNotes";
|
||||
import Mustache from "mustache";
|
||||
@@ -62,6 +62,7 @@ export interface SubredditResourceConfig extends Footer {
|
||||
subreddit: Subreddit,
|
||||
logger: Logger;
|
||||
client: ExtendedSnoowrap
|
||||
credentials?: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
interface SubredditResourceOptions extends Footer {
|
||||
@@ -74,6 +75,7 @@ interface SubredditResourceOptions extends Footer {
|
||||
client: ExtendedSnoowrap;
|
||||
prefix?: string;
|
||||
actionedEventsMax: number;
|
||||
thirdPartyCredentials: ThirdPartyCredentialsJsonConfig
|
||||
}
|
||||
|
||||
export interface SubredditResourceSetOptions extends CacheConfig, Footer {
|
||||
@@ -88,6 +90,7 @@ export class SubredditResources {
|
||||
protected submissionTTL: number | false = cacheTTLDefaults.submissionTTL;
|
||||
protected commentTTL: number | false = cacheTTLDefaults.commentTTL;
|
||||
protected filterCriteriaTTL: number | false = cacheTTLDefaults.filterCriteriaTTL;
|
||||
public selfTTL: number | false = cacheTTLDefaults.selfTTL;
|
||||
name: string;
|
||||
protected logger: Logger;
|
||||
userNotes: UserNotes;
|
||||
@@ -101,6 +104,7 @@ export class SubredditResources {
|
||||
historicalSaveInterval?: any;
|
||||
prefix?: string
|
||||
actionedEventsMax: number;
|
||||
thirdPartyCredentials: ThirdPartyCredentialsJsonConfig;
|
||||
|
||||
stats: {
|
||||
cache: ResourceStats
|
||||
@@ -116,6 +120,7 @@ export class SubredditResources {
|
||||
authorTTL,
|
||||
wikiTTL,
|
||||
filterCriteriaTTL,
|
||||
selfTTL,
|
||||
submissionTTL,
|
||||
commentTTL,
|
||||
subredditTTL,
|
||||
@@ -126,6 +131,7 @@ export class SubredditResources {
|
||||
actionedEventsMax,
|
||||
cacheSettingsHash,
|
||||
client,
|
||||
thirdPartyCredentials,
|
||||
} = options || {};
|
||||
|
||||
this.cacheSettingsHash = cacheSettingsHash;
|
||||
@@ -140,7 +146,9 @@ export class SubredditResources {
|
||||
this.subredditTTL = subredditTTL === true ? 0 : subredditTTL;
|
||||
this.wikiTTL = wikiTTL === true ? 0 : wikiTTL;
|
||||
this.filterCriteriaTTL = filterCriteriaTTL === true ? 0 : filterCriteriaTTL;
|
||||
this.selfTTL = selfTTL === true ? 0 : selfTTL;
|
||||
this.subreddit = subreddit;
|
||||
this.thirdPartyCredentials = thirdPartyCredentials;
|
||||
this.name = name;
|
||||
if (logger === undefined) {
|
||||
const alogger = winston.loggers.get('app')
|
||||
@@ -380,12 +388,56 @@ export class SubredditResources {
|
||||
// @ts-ignore
|
||||
return await item.fetch();
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error while trying to fetch a cached activity', err);
|
||||
throw err.logged;
|
||||
}
|
||||
}
|
||||
|
||||
async hasActivity(item: Submission | Comment) {
|
||||
const hash = asSubmission(item) ? `sub-${item.name}` : `comm-${item.name}`;
|
||||
const res = await this.cache.get(hash);
|
||||
return res !== undefined && res !== null;
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
async getRecentSelf(item: Submission | Comment): Promise<(Submission | Comment | undefined)> {
|
||||
const hash = asSubmission(item) ? `sub-recentSelf-${item.name}` : `comm-recentSelf-${item.name}`;
|
||||
const res = await this.cache.get(hash);
|
||||
if(res === null) {
|
||||
return undefined;
|
||||
}
|
||||
return res as (Submission | Comment | undefined);
|
||||
}
|
||||
|
||||
async setRecentSelf(item: Submission | Comment) {
|
||||
if(this.selfTTL !== false) {
|
||||
const hash = asSubmission(item) ? `sub-recentSelf-${item.name}` : `comm-recentSelf-${item.name}`;
|
||||
// @ts-ignore
|
||||
await this.cache.set(hash, item, {ttl: this.selfTTL});
|
||||
}
|
||||
return;
|
||||
}
|
||||
/**
|
||||
* Returns true if the activity being checked was recently acted on/created by the bot and has not changed since that time
|
||||
* */
|
||||
async hasRecentSelf(item: Submission | Comment) {
|
||||
const recent = await this.getRecentSelf(item) as (Submission | Comment | undefined);
|
||||
if (recent !== undefined) {
|
||||
return item.num_reports === recent.num_reports;
|
||||
|
||||
// can't really used edited since its only ever updated once with no timestamp
|
||||
// if(item.num_reports !== recent.num_reports) {
|
||||
// return false;
|
||||
// }
|
||||
// if(!asSubmission(item)) {
|
||||
// return item.edited === recent.edited;
|
||||
// }
|
||||
// return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
async getSubreddit(item: Submission | Comment) {
|
||||
try {
|
||||
@@ -415,7 +467,7 @@ export class SubredditResources {
|
||||
|
||||
return subreddit as Subreddit;
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
this.logger.error('Error while trying to fetch a cached activity', err);
|
||||
throw err.logged;
|
||||
}
|
||||
@@ -527,7 +579,7 @@ export class SubredditResources {
|
||||
// @ts-ignore
|
||||
const wikiPage = sub.getWikiPage(wikiContext.wiki);
|
||||
wikiContent = await wikiPage.content_md;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
let msg = `Could not read wiki page for an unknown reason. Please ensure the page 'https://reddit.com${sub.display_name_prefixed}/wiki/${wikiContext.wiki}' exists and is readable`;
|
||||
if(err.statusCode !== undefined) {
|
||||
if(err.statusCode === 404) {
|
||||
@@ -543,7 +595,7 @@ export class SubredditResources {
|
||||
try {
|
||||
const response = await fetch(extUrl as string);
|
||||
wikiContent = await response.text();
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
const msg = `Error occurred while trying to fetch the url ${extUrl}`;
|
||||
this.logger.error(msg, err);
|
||||
throw new LoggedError(msg);
|
||||
@@ -648,7 +700,7 @@ export class SubredditResources {
|
||||
this.stats.cache.subredditCrit.miss++;
|
||||
await this.cache.set(hash, itemResult, {ttl: this.filterCriteriaTTL});
|
||||
return itemResult;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (err.logged !== true) {
|
||||
this.logger.error('Error occurred while testing subreddit criteria', err);
|
||||
}
|
||||
@@ -718,7 +770,7 @@ export class SubredditResources {
|
||||
this.stats.cache.itemCrit.miss++;
|
||||
await this.cache.set(hash, itemResult, {ttl: this.filterCriteriaTTL});
|
||||
return itemResult;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (err.logged !== true) {
|
||||
this.logger.error('Error occurred while testing item criteria', err);
|
||||
}
|
||||
@@ -751,6 +803,15 @@ export class SubredditResources {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case 'isUserProfile':
|
||||
const entity = parseRedditEntity(subreddit.display_name);
|
||||
const entityIsUserProfile = entity.type === 'user';
|
||||
if(crit[k] !== entityIsUserProfile) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${entityIsUserProfile}`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
case 'over18':
|
||||
case 'over_18':
|
||||
// handling an edge case where user may have confused Comment/Submission state "over_18" with SubredditState "over18"
|
||||
@@ -822,13 +883,30 @@ export class SubredditResources {
|
||||
break;
|
||||
case 'reports':
|
||||
if (!item.can_mod_post) {
|
||||
log.debug(`Cannot test for reports on Activity in a subreddit bot account is not a moderato Activist. Skipping criteria...`);
|
||||
log.debug(`Cannot test for reports on Activity in a subreddit bot account is not a moderator of. Skipping criteria...`);
|
||||
break;
|
||||
}
|
||||
const reportCompare = parseGenericValueComparison(crit[k] as string);
|
||||
if(!comparisonTextOp(item.num_reports, reportCompare.operator, reportCompare.value)) {
|
||||
let reportType = 'total';
|
||||
if(reportCompare.extra !== undefined && reportCompare.extra.trim() !== '') {
|
||||
const requestedType = reportCompare.extra.toLocaleLowerCase().trim();
|
||||
if(requestedType.includes('mod')) {
|
||||
reportType = 'mod';
|
||||
} else if(requestedType.includes('user')) {
|
||||
reportType = 'user';
|
||||
} else {
|
||||
log.warn(`Did not recognize the report type "${requestedType}" -- can only use "mod" or "user". Will default to TOTAL reports`);
|
||||
}
|
||||
}
|
||||
let reportNum = item.num_reports;
|
||||
if(reportType === 'user') {
|
||||
reportNum = item.user_reports.length;
|
||||
} else {
|
||||
reportNum = item.mod_reports.length;
|
||||
}
|
||||
if(!comparisonTextOp(reportNum, reportCompare.operator, reportCompare.value)) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${item.num_reports}`)
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} ${reportType} reports | Found => ${k}:${reportNum} ${reportType} reports`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
@@ -880,7 +958,7 @@ export class SubredditResources {
|
||||
log.debug(`Failed to match title as regular expression: ${titleReg}`);
|
||||
return false;
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
log.error(`An error occurred while attempting to match title against string as regular expression: ${titleReg}. Most likely the string does not make a valid regular expression.`, err);
|
||||
return false
|
||||
}
|
||||
@@ -910,6 +988,19 @@ export class SubredditResources {
|
||||
return false
|
||||
}
|
||||
break;
|
||||
case 'depth':
|
||||
if(item instanceof Submission) {
|
||||
log.warn(`Cannot test for 'depth' on a Submission`);
|
||||
break;
|
||||
}
|
||||
// @ts-ignore
|
||||
const depthCompare = parseGenericValueComparison(crit[k] as string);
|
||||
if(!comparisonTextOp(item.score, depthCompare.operator, depthCompare.value)) {
|
||||
// @ts-ignore
|
||||
log.debug(`Failed: Expected => ${k}:${crit[k]} | Found => ${k}:${item.score}`)
|
||||
return false
|
||||
}
|
||||
break;
|
||||
default:
|
||||
// @ts-ignore
|
||||
if (item[k] !== undefined) {
|
||||
@@ -1003,6 +1094,13 @@ export class SubredditResources {
|
||||
// }
|
||||
// return hash;
|
||||
}
|
||||
|
||||
getThirdPartyCredentials(name: string) {
|
||||
if(this.thirdPartyCredentials[name] !== undefined) {
|
||||
return this.thirdPartyCredentials[name];
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export class BotResourcesManager {
|
||||
@@ -1018,6 +1116,7 @@ export class BotResourcesManager {
|
||||
actionedEventsMaxDefault?: number;
|
||||
actionedEventsDefault: number;
|
||||
pruneInterval: any;
|
||||
defaultThirdPartyCredentials: ThirdPartyCredentialsJsonConfig;
|
||||
|
||||
constructor(config: BotInstanceConfig) {
|
||||
const {
|
||||
@@ -1029,19 +1128,24 @@ export class BotResourcesManager {
|
||||
submissionTTL,
|
||||
subredditTTL,
|
||||
filterCriteriaTTL,
|
||||
selfTTL,
|
||||
provider,
|
||||
actionedEventsMax,
|
||||
actionedEventsDefault,
|
||||
},
|
||||
name,
|
||||
credentials,
|
||||
credentials: {
|
||||
reddit,
|
||||
...thirdParty
|
||||
},
|
||||
caching,
|
||||
} = config;
|
||||
caching.provider.prefix = buildCachePrefix([caching.provider.prefix, 'SHARED']);
|
||||
const {actionedEventsMax: eMax, actionedEventsDefault: eDef, ...relevantCacheSettings} = caching;
|
||||
this.cacheHash = objectHash.sha1(relevantCacheSettings);
|
||||
this.defaultCacheConfig = caching;
|
||||
this.ttlDefaults = {authorTTL, userNotesTTL, wikiTTL, commentTTL, submissionTTL, filterCriteriaTTL, subredditTTL};
|
||||
this.defaultThirdPartyCredentials = thirdParty;
|
||||
this.ttlDefaults = {authorTTL, userNotesTTL, wikiTTL, commentTTL, submissionTTL, filterCriteriaTTL, subredditTTL, selfTTL};
|
||||
|
||||
const options = provider;
|
||||
this.cacheType = options.store;
|
||||
@@ -1073,13 +1177,14 @@ export class BotResourcesManager {
|
||||
|
||||
async set(subName: string, initOptions: SubredditResourceConfig): Promise<SubredditResources> {
|
||||
let hash = 'default';
|
||||
const { caching, ...init } = initOptions;
|
||||
const { caching, credentials, ...init } = initOptions;
|
||||
|
||||
let opts: SubredditResourceOptions = {
|
||||
cache: this.defaultCache,
|
||||
cacheType: this.cacheType,
|
||||
cacheSettingsHash: hash,
|
||||
ttl: this.ttlDefaults,
|
||||
thirdPartyCredentials: credentials ?? this.defaultThirdPartyCredentials,
|
||||
prefix: this.defaultCacheConfig.provider.prefix,
|
||||
actionedEventsMax: this.actionedEventsMaxDefault !== undefined ? Math.min(this.actionedEventsDefault, this.actionedEventsMaxDefault) : this.actionedEventsDefault,
|
||||
...init,
|
||||
@@ -1107,6 +1212,7 @@ export class BotResourcesManager {
|
||||
actionedEventsMax: eventsMax,
|
||||
cacheType: trueProvider.store,
|
||||
cacheSettingsHash: hash,
|
||||
thirdPartyCredentials: credentials ?? this.defaultThirdPartyCredentials,
|
||||
prefix: subPrefix,
|
||||
...init,
|
||||
...trueRest,
|
||||
@@ -1137,4 +1243,37 @@ export class BotResourcesManager {
|
||||
|
||||
return resource;
|
||||
}
|
||||
|
||||
async getPendingSubredditInvites(): Promise<(string[])> {
|
||||
const subredditNames = await this.defaultCache.get(`modInvites`);
|
||||
if (subredditNames !== undefined && subredditNames !== null) {
|
||||
return subredditNames as string[];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
async addPendingSubredditInvite(subreddit: string): Promise<void> {
|
||||
let subredditNames = await this.defaultCache.get(`modInvites`) as (string[] | undefined | null);
|
||||
if (subredditNames === undefined || subredditNames === null) {
|
||||
subredditNames = [];
|
||||
}
|
||||
subredditNames.push(subreddit);
|
||||
await this.defaultCache.set(`modInvites`, subredditNames, {ttl: 0});
|
||||
return;
|
||||
}
|
||||
|
||||
async deletePendingSubredditInvite(subreddit: string): Promise<void> {
|
||||
let subredditNames = await this.defaultCache.get(`modInvites`) as (string[] | undefined | null);
|
||||
if (subredditNames === undefined || subredditNames === null) {
|
||||
subredditNames = [];
|
||||
}
|
||||
subredditNames = subredditNames.filter(x => x !== subreddit);
|
||||
await this.defaultCache.set(`modInvites`, subredditNames, {ttl: 0});
|
||||
return;
|
||||
}
|
||||
|
||||
async clearPendingSubredditInvites(): Promise<void> {
|
||||
await this.defaultCache.del(`modInvites`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import {
|
||||
COMMENT_URL_ID,
|
||||
deflateUserNotes, getActivityAuthorName,
|
||||
inflateUserNotes,
|
||||
isScopeError,
|
||||
parseLinkIdentifier,
|
||||
SUBMISSION_URL_ID
|
||||
} from "../util";
|
||||
@@ -14,6 +13,7 @@ import LoggedError from "../Utils/LoggedError";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {RichContent} from "../Common/interfaces";
|
||||
import {Cache} from 'cache-manager';
|
||||
import {isScopeError} from "../Utils/Errors";
|
||||
|
||||
interface RawUserNotesPayload {
|
||||
ver: number,
|
||||
@@ -185,7 +185,7 @@ export class UserNotes {
|
||||
}
|
||||
|
||||
return userNotes as RawUserNotesPayload;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
const msg = `Could not read usernotes. Make sure at least one moderator has used toolbox and usernotes before.`;
|
||||
this.logger.error(msg, err);
|
||||
throw new LoggedError(msg);
|
||||
@@ -235,7 +235,7 @@ export class UserNotes {
|
||||
}
|
||||
|
||||
return payload as RawUserNotesPayload;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
let msg = 'Could not edit usernotes.';
|
||||
// Make sure at least one moderator has used toolbox and usernotes before and that this account has editing permissions`;
|
||||
if(isScopeError(err)) {
|
||||
|
||||
22
src/Utils/Errors.ts
Normal file
22
src/Utils/Errors.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import {StatusCodeError, RequestError} from "../Common/interfaces";
|
||||
|
||||
|
||||
export const isRateLimitError = (err: any) => {
|
||||
return typeof err === 'object' && err.name === 'RateLimitError';
|
||||
}
|
||||
|
||||
export const isScopeError = (err: any): boolean => {
|
||||
if(typeof err === 'object' && err.name === 'StatusCodeError' && err.response !== undefined) {
|
||||
const authHeader = err.response.headers['www-authenticate'];
|
||||
return authHeader !== undefined && authHeader.includes('insufficient_scope');
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
export const isStatusError = (err: any): err is StatusCodeError => {
|
||||
return typeof err === 'object' && err.name === 'StatusCodeError' && err.response !== undefined;
|
||||
}
|
||||
|
||||
export const isRequestError = (err: any): err is RequestError => {
|
||||
return typeof err === 'object' && err.name === 'RequestError' && err.response !== undefined;
|
||||
}
|
||||
@@ -26,13 +26,25 @@ export class ExtendedSnoowrap extends Snoowrap {
|
||||
}
|
||||
try {
|
||||
return parseSubredditName(x);
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
return x;
|
||||
}
|
||||
});
|
||||
|
||||
return await this.oauthRequest({uri: '/api/info', method: 'get', qs: { sr_name: names.join(',')}}) as Listing<Subreddit>;
|
||||
}
|
||||
|
||||
async assignUserFlairByTemplateId(options: { flairTemplateId: string, username: string, subredditName: string }): Promise<any> {
|
||||
return await this.oauthRequest({
|
||||
uri: `/r/${options.subredditName}/api/selectflair`,
|
||||
method: 'post',
|
||||
form: {
|
||||
api_type: 'json',
|
||||
name: options.username,
|
||||
flair_template_id: options.flairTemplateId,
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export class RequestTrackingSnoowrap extends ExtendedSnoowrap {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import Snoowrap, {RedditUser} from "snoowrap";
|
||||
import Snoowrap, {Listing, RedditUser} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import Comment from "snoowrap/dist/objects/Comment";
|
||||
import {Duration, DurationUnitsObjectType} from "dayjs/plugin/duration";
|
||||
@@ -15,7 +15,7 @@ import {
|
||||
import {
|
||||
compareDurationValue,
|
||||
comparisonTextOp, escapeRegex, getActivityAuthorName,
|
||||
isActivityWindowCriteria, isStatusError,
|
||||
isActivityWindowCriteria,
|
||||
normalizeName,
|
||||
parseDuration,
|
||||
parseDurationComparison,
|
||||
@@ -23,7 +23,7 @@ import {
|
||||
parseGenericValueOrPercentComparison,
|
||||
parseRuleResultsToMarkdownSummary, parseStringToRegex,
|
||||
parseSubredditName,
|
||||
truncateStringToLength
|
||||
truncateStringToLength, windowToActivityWindowCriteria
|
||||
} from "../util";
|
||||
import UserNotes from "../Subreddit/UserNotes";
|
||||
import {Logger} from "winston";
|
||||
@@ -31,6 +31,7 @@ import InvalidRegexError from "./InvalidRegexError";
|
||||
import SimpleError from "./SimpleError";
|
||||
import {AuthorCriteria} from "../Author/Author";
|
||||
import {URL} from "url";
|
||||
import {isStatusError} from "./Errors";
|
||||
|
||||
export const BOT_LINK = 'https://www.reddit.com/r/ContextModBot/comments/otz396/introduction_to_contextmodbot';
|
||||
|
||||
@@ -43,14 +44,16 @@ export interface AuthorActivitiesOptions {
|
||||
chunkSize?: number,
|
||||
// TODO maybe move this into window
|
||||
keepRemoved?: boolean,
|
||||
[key: string]: any,
|
||||
}
|
||||
|
||||
export async function getAuthorActivities(user: RedditUser, options: AuthorTypedActivitiesOptions): Promise<Array<Submission | Comment>> {
|
||||
export async function getActivities(listingFunc: (limit: number) => Promise<Listing<Submission | Comment>>, options: AuthorActivitiesOptions): Promise<Array<Submission | Comment>> {
|
||||
|
||||
const {
|
||||
chunkSize: cs = 100,
|
||||
window: optWindow,
|
||||
keepRemoved = true,
|
||||
...restFetchOptions
|
||||
} = options;
|
||||
|
||||
let satisfiedCount: number | undefined,
|
||||
@@ -64,33 +67,56 @@ export async function getAuthorActivities(user: RedditUser, options: AuthorTyped
|
||||
let includes: string[] = [];
|
||||
let excludes: string[] = [];
|
||||
|
||||
if (isActivityWindowCriteria(optWindow)) {
|
||||
const {
|
||||
satisfyOn = 'any',
|
||||
count,
|
||||
duration,
|
||||
subreddits: {
|
||||
include = [],
|
||||
exclude = [],
|
||||
} = {},
|
||||
} = optWindow;
|
||||
const strongWindow = windowToActivityWindowCriteria(optWindow);
|
||||
|
||||
includes = include.map(x => parseSubredditName(x).toLowerCase());
|
||||
excludes = exclude.map(x => parseSubredditName(x).toLowerCase());
|
||||
const {
|
||||
satisfyOn = 'any',
|
||||
count,
|
||||
duration: oDuration,
|
||||
subreddits: {
|
||||
include = [],
|
||||
exclude = [],
|
||||
} = {},
|
||||
} = strongWindow;
|
||||
|
||||
if (includes.length > 0 && excludes.length > 0) {
|
||||
// TODO add logger so this can be logged...
|
||||
// this.logger.warn('include and exclude both specified, exclude will be ignored');
|
||||
}
|
||||
satisfiedCount = count;
|
||||
durVal = duration;
|
||||
satisfy = satisfyOn
|
||||
} else if (typeof optWindow === 'number') {
|
||||
satisfiedCount = optWindow;
|
||||
} else {
|
||||
durVal = optWindow as DurationVal;
|
||||
satisfy = satisfyOn;
|
||||
satisfiedCount = count;
|
||||
includes = include;
|
||||
excludes = exclude;
|
||||
durVal = oDuration;
|
||||
|
||||
if (includes.length > 0 && excludes.length > 0) {
|
||||
// TODO add logger so this can be logged...
|
||||
// this.logger.warn('include and exclude both specified, exclude will be ignored');
|
||||
}
|
||||
|
||||
// if (isActivityWindowCriteria(optWindow)) {
|
||||
// const {
|
||||
// satisfyOn = 'any',
|
||||
// count,
|
||||
// duration,
|
||||
// subreddits: {
|
||||
// include = [],
|
||||
// exclude = [],
|
||||
// } = {},
|
||||
// } = optWindow;
|
||||
//
|
||||
// includes = include.map(x => parseSubredditName(x).toLowerCase());
|
||||
// excludes = exclude.map(x => parseSubredditName(x).toLowerCase());
|
||||
//
|
||||
// if (includes.length > 0 && excludes.length > 0) {
|
||||
// // TODO add logger so this can be logged...
|
||||
// // this.logger.warn('include and exclude both specified, exclude will be ignored');
|
||||
// }
|
||||
// satisfiedCount = count;
|
||||
// durVal = duration;
|
||||
// satisfy = satisfyOn
|
||||
// } else if (typeof optWindow === 'number') {
|
||||
// satisfiedCount = optWindow;
|
||||
// } else {
|
||||
// durVal = optWindow as DurationVal;
|
||||
// }
|
||||
|
||||
// if count is less than max limit (100) go ahead and just get that many. may result in faster response time for low numbers
|
||||
if (satisfiedCount !== undefined) {
|
||||
chunkSize = Math.min(chunkSize, satisfiedCount);
|
||||
@@ -124,27 +150,8 @@ export async function getAuthorActivities(user: RedditUser, options: AuthorTyped
|
||||
}
|
||||
|
||||
let items: Array<Submission | Comment> = [];
|
||||
//let count = 1;
|
||||
let listing = [];
|
||||
try {
|
||||
switch (options.type) {
|
||||
case 'comment':
|
||||
listing = await user.getComments({limit: chunkSize});
|
||||
break;
|
||||
case 'submission':
|
||||
listing = await user.getSubmissions({limit: chunkSize});
|
||||
break;
|
||||
default:
|
||||
listing = await user.getOverview({limit: chunkSize});
|
||||
break;
|
||||
}
|
||||
} catch (err) {
|
||||
if(isStatusError(err) && err.statusCode === 404) {
|
||||
throw new SimpleError('Reddit returned a 404 for user history. Likely this user is shadowbanned.');
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
let listing = await listingFunc(chunkSize);
|
||||
let hitEnd = false;
|
||||
let offset = chunkSize;
|
||||
while (!hitEnd) {
|
||||
@@ -219,12 +226,35 @@ export async function getAuthorActivities(user: RedditUser, options: AuthorTyped
|
||||
|
||||
if (!hitEnd) {
|
||||
offset += chunkSize;
|
||||
listing = await listing.fetchMore({amount: chunkSize});
|
||||
listing = await listing.fetchMore({amount: chunkSize, ...restFetchOptions});
|
||||
}
|
||||
}
|
||||
return Promise.resolve(items);
|
||||
}
|
||||
|
||||
export async function getAuthorActivities(user: RedditUser, options: AuthorTypedActivitiesOptions): Promise<Array<Submission | Comment>> {
|
||||
|
||||
const listFunc = (chunkSize: number): Promise<Listing<Submission | Comment>> => {
|
||||
switch (options.type) {
|
||||
case 'comment':
|
||||
return user.getComments({limit: chunkSize});
|
||||
case 'submission':
|
||||
return user.getSubmissions({limit: chunkSize});
|
||||
default:
|
||||
return user.getOverview({limit: chunkSize});
|
||||
}
|
||||
};
|
||||
try {
|
||||
return await getActivities(listFunc, options);
|
||||
} catch (err: any) {
|
||||
if(isStatusError(err) && err.statusCode === 404) {
|
||||
throw new SimpleError('Reddit returned a 404 for user history. Likely this user is shadowbanned.');
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const getAuthorComments = async (user: RedditUser, options: AuthorActivitiesOptions): Promise<Comment[]> => {
|
||||
return await getAuthorActivities(user, {...options, type: 'comment'}) as unknown as Promise<Comment[]>;
|
||||
}
|
||||
@@ -336,7 +366,7 @@ export const testAuthorCriteria = async (item: (Comment | Submission), authorOpt
|
||||
if(shadowBanned) {
|
||||
return false;
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(isStatusError(err) && err.statusCode === 404) {
|
||||
// user is shadowbanned
|
||||
// if criteria specifies they should not be shadowbanned then return false now
|
||||
@@ -545,7 +575,7 @@ export const testAuthorCriteria = async (item: (Comment | Submission), authorOpt
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if(isStatusError(err) && err.statusCode === 404) {
|
||||
throw new SimpleError('Reddit returned a 404 while trying to retrieve User profile. It is likely this user is shadowbanned.');
|
||||
} else {
|
||||
@@ -571,7 +601,7 @@ export const itemContentPeek = async (item: (Comment | Submission), peekLength =
|
||||
submissionTitle = item.title;
|
||||
peek = `${truncatePeek(item.title)} by ${author} https://reddit.com${item.permalink}`;
|
||||
|
||||
} else if (item instanceof Comment) {
|
||||
} else {
|
||||
// replace newlines with spaces to make peek more compact
|
||||
content = truncatePeek(item.body.replaceAll('\n', ' '));
|
||||
peek = `${truncatePeek(content)} by ${author} in https://reddit.com${item.permalink}`;
|
||||
|
||||
70
src/Utils/StringMatching/CosineSimilarity.ts
Normal file
70
src/Utils/StringMatching/CosineSimilarity.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
// reproduced from https://github.com/sumn2u/string-comparison/blob/master/jscosine.js
|
||||
// https://sumn2u.medium.com/string-similarity-comparision-in-js-with-examples-4bae35f13968
|
||||
|
||||
interface StrMap {
|
||||
[key: string]: number
|
||||
}
|
||||
|
||||
interface BoolMap {
|
||||
[key: string]: boolean
|
||||
}
|
||||
|
||||
|
||||
function termFreqMap(str: string) {
|
||||
var words = str.split(' ');
|
||||
var termFreq: StrMap = {};
|
||||
words.forEach(function(w) {
|
||||
termFreq[w] = (termFreq[w] || 0) + 1;
|
||||
});
|
||||
return termFreq;
|
||||
}
|
||||
|
||||
function addKeysToDict(map: StrMap, dict: BoolMap) {
|
||||
for (var key in map) {
|
||||
dict[key] = true;
|
||||
}
|
||||
}
|
||||
|
||||
function termFreqMapToVector(map: StrMap, dict: StrMap): number[] {
|
||||
var termFreqVector = [];
|
||||
for (var term in dict) {
|
||||
termFreqVector.push(map[term] || 0);
|
||||
}
|
||||
return termFreqVector;
|
||||
}
|
||||
|
||||
function vecDotProduct(vecA: number[], vecB: number[]) {
|
||||
var product = 0;
|
||||
for (var i = 0; i < vecA.length; i++) {
|
||||
product += vecA[i] * vecB[i];
|
||||
}
|
||||
return product;
|
||||
}
|
||||
|
||||
function vecMagnitude(vec: number[]) {
|
||||
var sum = 0;
|
||||
for (var i = 0; i < vec.length; i++) {
|
||||
sum += vec[i] * vec[i];
|
||||
}
|
||||
return Math.sqrt(sum);
|
||||
}
|
||||
|
||||
function cosineSimilarity(vecA: number[], vecB: number[]) {
|
||||
return vecDotProduct(vecA, vecB) / (vecMagnitude(vecA) * vecMagnitude(vecB));
|
||||
}
|
||||
|
||||
const calculateCosineSimilarity = function textCosineSimilarity(strA: string, strB: string) {
|
||||
var termFreqA = termFreqMap(strA);
|
||||
var termFreqB = termFreqMap(strB);
|
||||
|
||||
var dict = {};
|
||||
addKeysToDict(termFreqA, dict);
|
||||
addKeysToDict(termFreqB, dict);
|
||||
|
||||
var termFreqVecA = termFreqMapToVector(termFreqA, dict);
|
||||
var termFreqVecB = termFreqMapToVector(termFreqB, dict);
|
||||
|
||||
return cosineSimilarity(termFreqVecA, termFreqVecB);
|
||||
}
|
||||
|
||||
export default calculateCosineSimilarity;
|
||||
19
src/Utils/StringMatching/levenSimilarity.ts
Normal file
19
src/Utils/StringMatching/levenSimilarity.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import leven from "leven";
|
||||
|
||||
const levenSimilarity = (valA: string, valB: string) => {
|
||||
let longer: string;
|
||||
let shorter: string;
|
||||
if (valA.length > valB.length) {
|
||||
longer = valA;
|
||||
shorter = valB;
|
||||
} else {
|
||||
longer = valB;
|
||||
shorter = valA;
|
||||
}
|
||||
|
||||
const distance = leven(longer, shorter);
|
||||
const diff = (distance / longer.length) * 100;
|
||||
return [distance, 100 - diff];
|
||||
}
|
||||
|
||||
export default levenSimilarity;
|
||||
59
src/Utils/ThirdParty/YoutubeClient.ts
vendored
Normal file
59
src/Utils/ThirdParty/YoutubeClient.ts
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
import {URLSearchParams} from "url";
|
||||
import fetch from "node-fetch";
|
||||
import {parseUsableLinkIdentifier} from "../../util";
|
||||
import dayjs from "dayjs";
|
||||
import {youtube, youtube_v3 } from '@googleapis/youtube';
|
||||
import Schema$CommentThread = youtube_v3.Schema$CommentThread;
|
||||
import {RepostItem} from "../../Common/interfaces";
|
||||
|
||||
const parseYtIdentifier = parseUsableLinkIdentifier();
|
||||
|
||||
export class YoutubeClient {
|
||||
apiKey: string;
|
||||
client: youtube_v3.Youtube
|
||||
|
||||
constructor(key: string) {
|
||||
this.apiKey = key;
|
||||
this.client = youtube({version: 'v3', auth: key});
|
||||
}
|
||||
|
||||
getVideoTopComments = async (url: string, maxResults: number = 50): Promise<Schema$CommentThread[]> => {
|
||||
|
||||
const videoId = parseYtIdentifier(url);
|
||||
|
||||
const res = await this.client.commentThreads.list({
|
||||
part: ['snippet'],
|
||||
videoId,
|
||||
maxResults: maxResults,
|
||||
textFormat: 'plainText',
|
||||
order: 'relevance',
|
||||
});
|
||||
|
||||
const items = res.data.items as Schema$CommentThread[];
|
||||
items.sort((a, b) => (a.snippet?.topLevelComment?.snippet?.likeCount as number) - (b.snippet?.topLevelComment?.snippet?.likeCount as number)).reverse();
|
||||
|
||||
return items;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export const commentsAsRepostItems = (comments: Schema$CommentThread[]): RepostItem[] => {
|
||||
return comments.map((x) => {
|
||||
const textDisplay = x.snippet?.topLevelComment?.snippet?.textDisplay;
|
||||
const publishedAt = x.snippet?.topLevelComment?.snippet?.publishedAt;
|
||||
const id = x.snippet?.topLevelComment?.id;
|
||||
const videoId = x.snippet?.topLevelComment?.snippet?.videoId;
|
||||
|
||||
return {
|
||||
value: textDisplay as string,
|
||||
createdOn: dayjs(publishedAt as string).unix(),
|
||||
source: 'Youtube',
|
||||
id: x.snippet?.topLevelComment?.id as string,
|
||||
sourceUrl: `https://youtube.com/watch?v=${videoId}&lc=${id}`,
|
||||
score: x.snippet?.topLevelComment?.snippet?.likeCount as number,
|
||||
acquisitionType: 'external',
|
||||
itemType: 'comment'
|
||||
};
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ export const getLogger = (options: any, name = 'app'): Logger => {
|
||||
|
||||
const consoleTransport = new transports.Console({
|
||||
handleExceptions: true,
|
||||
// @ts-expect-error
|
||||
handleRejections: true,
|
||||
});
|
||||
|
||||
@@ -31,7 +30,6 @@ export const getLogger = (options: any, name = 'app'): Logger => {
|
||||
name: 'duplex',
|
||||
dump: false,
|
||||
handleExceptions: true,
|
||||
// @ts-expect-error
|
||||
handleRejections: true,
|
||||
}),
|
||||
...additionalTransports,
|
||||
|
||||
@@ -39,16 +39,28 @@ import {prettyPrintJson} from "pretty-print-json";
|
||||
import DelimiterStream from 'delimiter-stream';
|
||||
import {pipeline} from 'stream/promises';
|
||||
import {defaultBotStatus} from "../Common/defaults";
|
||||
import {booleanMiddle} from "../Common/middleware";
|
||||
import {arrayMiddle, booleanMiddle} from "../Common/middleware";
|
||||
import {BotInstance, CMInstance} from "../interfaces";
|
||||
import { URL } from "url";
|
||||
import {MESSAGE} from "triple-beam";
|
||||
import Autolinker from "autolinker";
|
||||
import path from "path";
|
||||
import {ExtendedSnoowrap} from "../../Utils/SnoowrapClients";
|
||||
|
||||
const emitter = new EventEmitter();
|
||||
|
||||
const app = addAsync(express());
|
||||
app.use(bodyParser.json());
|
||||
const jsonParser = bodyParser.json();
|
||||
|
||||
// do not modify body if we are proxying it to server
|
||||
app.use((req, res, next) => {
|
||||
if(req.url.indexOf('/api') !== 0) {
|
||||
jsonParser(req, res, next);
|
||||
} else {
|
||||
next();
|
||||
}
|
||||
});
|
||||
|
||||
app.use(bodyParser.urlencoded({extended: false}));
|
||||
//app.use(cookieParser());
|
||||
app.set('views', `${__dirname}/../assets/views`);
|
||||
@@ -57,6 +69,8 @@ app.use('/public', express.static(`${__dirname}/../assets/public`));
|
||||
app.use('/monaco', express.static(`${__dirname}/../../../node_modules/monaco-editor/`));
|
||||
app.use('/schemas', express.static(`${__dirname}/../../Schema/`));
|
||||
|
||||
const userAgent = `web:contextBot:web`;
|
||||
|
||||
const proxy = httpProxy.createProxyServer({
|
||||
ws: true,
|
||||
//hostRewrite: true,
|
||||
@@ -69,6 +83,7 @@ declare module 'express-session' {
|
||||
sort?: string,
|
||||
level?: string,
|
||||
state?: string,
|
||||
scope?: string[],
|
||||
botId?: string,
|
||||
authBotId?: string,
|
||||
}
|
||||
@@ -182,9 +197,9 @@ const webClient = async (options: OperatorConfig) => {
|
||||
* */
|
||||
|
||||
passport.serializeUser(async function (data: any, done) {
|
||||
const {user, subreddits} = data;
|
||||
const {user, subreddits, scope, token} = data;
|
||||
//await webCache.set(`userSession-${user}`, { subreddits: subreddits.map((x: Subreddit) => x.display_name), isOperator: webOps.includes(user.toLowerCase()) }, {ttl: provider.ttl as number});
|
||||
done(null, { subreddits: subreddits.map((x: Subreddit) => x.display_name), isOperator: webOps.includes(user.toLowerCase()), name: user });
|
||||
done(null, { subreddits: subreddits.map((x: Subreddit) => x.display_name), isOperator: webOps.includes(user.toLowerCase()), name: user, scope, token, tokenExpiresAt: dayjs().unix() + (60 * 60) });
|
||||
});
|
||||
|
||||
passport.deserializeUser(async function (obj, done) {
|
||||
@@ -213,8 +228,8 @@ const webClient = async (options: OperatorConfig) => {
|
||||
} else if (req.session.state !== state) {
|
||||
return done('Unexpected <b>state</b> value returned');
|
||||
}
|
||||
const client = await Snoowrap.fromAuthCode({
|
||||
userAgent: `web:contextBot:web`,
|
||||
const client = await ExtendedSnoowrap.fromAuthCode({
|
||||
userAgent,
|
||||
clientId,
|
||||
clientSecret,
|
||||
redirectUri: redirectUri as string,
|
||||
@@ -222,7 +237,8 @@ const webClient = async (options: OperatorConfig) => {
|
||||
});
|
||||
const user = await client.getMe().name as string;
|
||||
const subs = await client.getModeratedSubreddits();
|
||||
return done(null, {user, subreddits: subs});
|
||||
io.to(req.session.id).emit('authStatus', {canSaveWiki: req.session.scope?.includes('wikiedit')});
|
||||
return done(null, {user, subreddits: subs, scope: req.session.scope, token: client.accessToken});
|
||||
}
|
||||
));
|
||||
|
||||
@@ -255,14 +271,24 @@ const webClient = async (options: OperatorConfig) => {
|
||||
}
|
||||
}
|
||||
|
||||
app.getAsync('/login', async (req, res, next) => {
|
||||
const scopeMiddle = arrayMiddle(['scope']);
|
||||
const successMiddle = booleanMiddle([{name: 'closeOnSuccess', defaultVal: undefined, required: false}]);
|
||||
app.getAsync('/login', scopeMiddle, successMiddle, async (req, res, next) => {
|
||||
if (redirectUri === undefined) {
|
||||
return res.render('error', {error: `No <b>redirectUri</b> was specified through environmental variables or program argument. This must be provided in order to use the web interface.`});
|
||||
}
|
||||
const {query: { scope: reqScopes = [], closeOnSuccess } } = req;
|
||||
const scope = [...new Set(['identity', 'mysubreddits', ...(reqScopes as string[])])];
|
||||
req.session.state = randomId();
|
||||
req.session.scope = scope;
|
||||
// @ts-ignore
|
||||
if(closeOnSuccess === true) {
|
||||
// @ts-ignore
|
||||
req.session.closeOnSuccess = closeOnSuccess;
|
||||
}
|
||||
const authUrl = Snoowrap.getAuthUrl({
|
||||
clientId,
|
||||
scope: ['identity', 'mysubreddits'],
|
||||
scope: scope,
|
||||
redirectUri: redirectUri as string,
|
||||
permanent: false,
|
||||
state: req.session.state,
|
||||
@@ -292,7 +318,7 @@ const webClient = async (options: OperatorConfig) => {
|
||||
// @ts-ignore
|
||||
const invite = await webCache.get(`invite:${req.session.inviteId}`) as InviteData;
|
||||
const client = await Snoowrap.fromAuthCode({
|
||||
userAgent: `web:contextBot:web`,
|
||||
userAgent,
|
||||
clientId: invite.clientId,
|
||||
clientSecret: invite.clientSecret,
|
||||
redirectUri: invite.redirectUri,
|
||||
@@ -344,7 +370,15 @@ const webClient = async (options: OperatorConfig) => {
|
||||
if(e !== undefined) {
|
||||
return res.render('error', {error: err});
|
||||
}
|
||||
return res.redirect('/');
|
||||
// @ts-ignore
|
||||
const useCloseRedir: boolean = req.session.closeOnSuccess as any
|
||||
// @ts-ignore
|
||||
delete req.session.closeOnSuccess;
|
||||
if(useCloseRedir === true) {
|
||||
return res.render('close');
|
||||
} else {
|
||||
return res.redirect('/');
|
||||
}
|
||||
});
|
||||
})(req, res, next);
|
||||
});
|
||||
@@ -572,7 +606,7 @@ const webClient = async (options: OperatorConfig) => {
|
||||
try {
|
||||
server = await app.listen(port);
|
||||
io = new SocketServer(server);
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
logger.error('Error occurred while initializing web or socket.io server', err);
|
||||
err.logged = true;
|
||||
throw err;
|
||||
@@ -630,7 +664,7 @@ const webClient = async (options: OperatorConfig) => {
|
||||
const user = req.user as Express.User;
|
||||
|
||||
const isOperator = instance.operators.includes(user.name);
|
||||
const canAccessBot = isOperator || intersect(user.subreddits, botInstance.subreddits).length > 0;
|
||||
const canAccessBot = isOperator || intersect(user.subreddits, botInstance.subreddits.map(x => x.replace(/\\*r\/*/,''))).length > 0;
|
||||
if (!user.isOperator && !canAccessBot) {
|
||||
return res.status(404).render('error', {error: msg});
|
||||
}
|
||||
@@ -761,7 +795,7 @@ const webClient = async (options: OperatorConfig) => {
|
||||
},
|
||||
}).json() as any;
|
||||
|
||||
} catch(err) {
|
||||
} catch(err: any) {
|
||||
logger.error(`Error occurred while retrieving bot information. Will update heartbeat -- ${err.message}`, {instance: instance.friendly});
|
||||
refreshClient(clients.find(x => normalizeUrl(x.host) === instance.normalUrl) as BotConnection);
|
||||
return res.render('offline', {
|
||||
@@ -811,25 +845,60 @@ const webClient = async (options: OperatorConfig) => {
|
||||
});
|
||||
});
|
||||
|
||||
app.getAsync('/config', async (req: express.Request, res: express.Response) => {
|
||||
res.render('config', {
|
||||
title: `Configuration Editor`
|
||||
app.getAsync('/bot/invites', defaultSession, async (req: express.Request, res: express.Response) => {
|
||||
res.render('modInvites', {
|
||||
title: `Pending Moderation Invites`,
|
||||
});
|
||||
});
|
||||
|
||||
app.getAsync('/config/content', [ensureAuthenticatedApi, defaultSession, instanceWithPermissions, botWithPermissions, createUserToken], async (req: express.Request, res: express.Response) => {
|
||||
const {subreddit} = req.query as any;
|
||||
const resp = await got.get(`${(req.instance as CMInstance).normalUrl}/config`, {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${req.token}`,
|
||||
},
|
||||
searchParams: {
|
||||
subreddit,
|
||||
bot: req.bot?.botName
|
||||
}
|
||||
}).text();
|
||||
app.getAsync('/config', defaultSession, async (req: express.Request, res: express.Response) => {
|
||||
const {format = 'json'} = req.query as any;
|
||||
res.render('config', {
|
||||
title: `Configuration Editor`,
|
||||
format,
|
||||
canSave: req.user?.scope?.includes('wikiedit') && req.user?.tokenExpiresAt !== undefined && dayjs.unix(req.user?.tokenExpiresAt).isAfter(dayjs())
|
||||
});
|
||||
});
|
||||
|
||||
return res.send(resp);
|
||||
app.postAsync('/config', [ensureAuthenticatedApi, defaultSession, instanceWithPermissions, botWithPermissions], async (req: express.Request, res: express.Response) => {
|
||||
const {subreddit} = req.query as any;
|
||||
const {location, data, create = false} = req.body as any;
|
||||
|
||||
const client = new ExtendedSnoowrap({
|
||||
userAgent,
|
||||
clientId,
|
||||
clientSecret,
|
||||
accessToken: req.user?.token
|
||||
});
|
||||
|
||||
try {
|
||||
// @ts-ignore
|
||||
const wiki = await client.getSubreddit(subreddit).getWikiPage(location);
|
||||
await wiki.edit({
|
||||
text: data,
|
||||
reason: create ? 'Created Config through CM Web' : 'Updated through CM Web'
|
||||
});
|
||||
} catch (err: any) {
|
||||
res.status(500);
|
||||
return res.send(err.message);
|
||||
}
|
||||
|
||||
if(create) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await client.getSubreddit(subreddit).getWikiPage(location).editSettings({
|
||||
permissionLevel: 2,
|
||||
// don't list this page on r/[subreddit]/wiki/pages
|
||||
listed: false,
|
||||
});
|
||||
} catch (err: any) {
|
||||
res.status(500);
|
||||
return res.send(`Successfully created wiki page for configuration but encountered error while setting visibility. You should manually set the wiki page visibility on reddit. \r\n Error: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
res.status(200);
|
||||
return res.send();
|
||||
});
|
||||
|
||||
app.getAsync('/events', [ensureAuthenticatedApi, defaultSession, instanceWithPermissions, botWithPermissions, createUserToken], async (req: express.Request, res: express.Response) => {
|
||||
@@ -989,7 +1058,7 @@ const webClient = async (options: OperatorConfig) => {
|
||||
}
|
||||
}).json() as object;
|
||||
io.to(session.id).emit('opStats', resp);
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
logger.error(`Could not retrieve stats ${err.message}`, {instance: bot.friendly});
|
||||
clearInterval(interval);
|
||||
}
|
||||
@@ -1027,7 +1096,7 @@ const webClient = async (options: OperatorConfig) => {
|
||||
}
|
||||
}).json() as object;
|
||||
return {success: true, ...resp};
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
return {success: false, error: err.message};
|
||||
}
|
||||
}
|
||||
@@ -1096,7 +1165,7 @@ const webClient = async (options: OperatorConfig) => {
|
||||
// botStat.indicator = 'red';
|
||||
// }
|
||||
logger.verbose(`Heartbeat detected`, {instance: botStat.friendly});
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
botStat.error = err.message;
|
||||
logger.error(`Heartbeat response from ${botStat.friendly} was not ok: ${err.message}`, {instance: botStat.friendly});
|
||||
} finally {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user