mirror of
https://github.com/FoxxMD/context-mod.git
synced 2026-01-14 07:57:57 -05:00
Compare commits
146 Commits
0.11.0
...
imageCompa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2028843714 | ||
|
|
873f9d3c91 | ||
|
|
783ef5db53 | ||
|
|
f0cb5c1315 | ||
|
|
8e1b916ea4 | ||
|
|
4acf87eacd | ||
|
|
5dd5a32c51 | ||
|
|
207907881f | ||
|
|
44da276d41 | ||
|
|
6c98b6f995 | ||
|
|
cc0c3dfe61 | ||
|
|
b48d75fda3 | ||
|
|
2adf2d258d | ||
|
|
c55a1c6502 | ||
|
|
0011ff8853 | ||
|
|
4bbf871051 | ||
|
|
54755dc480 | ||
|
|
01f95a37e7 | ||
|
|
5ddad418b0 | ||
|
|
b5b2e88c1f | ||
|
|
194ded7be6 | ||
|
|
7ba375d702 | ||
|
|
9a4c38151f | ||
|
|
f8df6fc93f | ||
|
|
86a3b229cb | ||
|
|
ca3e8d7d80 | ||
|
|
5af4384871 | ||
|
|
47957e6ab9 | ||
|
|
7f1a404b4e | ||
|
|
1f64a56260 | ||
|
|
366cb2b629 | ||
|
|
b9b442ad1e | ||
|
|
81a1bdb446 | ||
|
|
1e4b369b1e | ||
|
|
7a34a7b531 | ||
|
|
b83bb6f998 | ||
|
|
3348af2780 | ||
|
|
04896a7363 | ||
|
|
d8003e049c | ||
|
|
b67a933084 | ||
|
|
d684ecc0ff | ||
|
|
9efd4751d8 | ||
|
|
9331c2a3c8 | ||
|
|
d6f7ce2441 | ||
|
|
ffd7033faf | ||
|
|
df5825d8df | ||
|
|
42c6ca7af5 | ||
|
|
1e94835f97 | ||
|
|
6230ef707d | ||
|
|
b290a4696d | ||
|
|
4c965f7215 | ||
|
|
ce990094a1 | ||
|
|
4196d2acb0 | ||
|
|
3150da8b4a | ||
|
|
655c82d5e1 | ||
|
|
73302b718e | ||
|
|
bbf91ceac0 | ||
|
|
67b793c2aa | ||
|
|
d635e5a65d | ||
|
|
8dc140a953 | ||
|
|
eaa9f627e2 | ||
|
|
16cb28cb72 | ||
|
|
fa450f9f8f | ||
|
|
1e76ca6c0e | ||
|
|
25b7ea497f | ||
|
|
2f93774346 | ||
|
|
e2f6a92a90 | ||
|
|
0ffddaac9e | ||
|
|
4ddf6ddf26 | ||
|
|
7f61c190ea | ||
|
|
fbf328d90f | ||
|
|
36c6f7f1b8 | ||
|
|
0379ad17b9 | ||
|
|
02ed9f91f9 | ||
|
|
9fcc3db7b2 | ||
|
|
65ea84a69d | ||
|
|
a4b8d3a8ef | ||
|
|
2692a5fecb | ||
|
|
192c1659a0 | ||
|
|
cc241e41f4 | ||
|
|
4bfb57a6cf | ||
|
|
b6dedae7a1 | ||
|
|
f2783bd7a4 | ||
|
|
2efe41eadd | ||
|
|
ebd60b9abe | ||
|
|
2a16df49a4 | ||
|
|
0d6841259b | ||
|
|
8c0755c8c2 | ||
|
|
5e1da5bc5d | ||
|
|
242c6a49b5 | ||
|
|
aa399c160e | ||
|
|
d7cab4092d | ||
|
|
0370e592f9 | ||
|
|
116d06733a | ||
|
|
22a8a694a7 | ||
|
|
2ed24eee11 | ||
|
|
8822d8520a | ||
|
|
9832292a5b | ||
|
|
7a86c722fa | ||
|
|
2ca4043c02 | ||
|
|
4da8a0b353 | ||
|
|
492ff78b13 | ||
|
|
64a0b0890d | ||
|
|
546daddd49 | ||
|
|
f91d81029f | ||
|
|
68ee1718e0 | ||
|
|
c0d19ede39 | ||
|
|
bb05d64428 | ||
|
|
1977c7317f | ||
|
|
6f784d5aa2 | ||
|
|
4b5c9b82e4 | ||
|
|
0315ad23ae | ||
|
|
da70753f42 | ||
|
|
d59f1b63d1 | ||
|
|
7542947029 | ||
|
|
2d02434e7e | ||
|
|
e2824ea94c | ||
|
|
1c94548947 | ||
|
|
2073e3f650 | ||
|
|
90b8f481ec | ||
|
|
9ad9092e9e | ||
|
|
12adfe9975 | ||
|
|
83dceddae8 | ||
|
|
99b46cb97f | ||
|
|
3ac07cb3e2 | ||
|
|
d7f08d4e27 | ||
|
|
338f393969 | ||
|
|
57e930ca8a | ||
|
|
af3b917b57 | ||
|
|
d01bcc53fe | ||
|
|
e2fe2b4745 | ||
|
|
785099b20c | ||
|
|
726ceb03d2 | ||
|
|
1c37771591 | ||
|
|
67aeaea5f1 | ||
|
|
a8ac4b8497 | ||
|
|
71571d3672 | ||
|
|
2799b6caeb | ||
|
|
e8f94ad1be | ||
|
|
4411d1a413 | ||
|
|
c919532aac | ||
|
|
522ba33377 | ||
|
|
3a18cc219f | ||
|
|
554d7dd86e | ||
|
|
29c3924ab7 | ||
|
|
5551f2c63f |
@@ -8,6 +8,7 @@ coverage
|
||||
.idea
|
||||
*.bak
|
||||
*.sqlite
|
||||
*.sqlite*
|
||||
*.json
|
||||
*.json5
|
||||
*.yaml
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -383,7 +383,7 @@ dist
|
||||
**/src/**/*.js
|
||||
**/tests/**/*.js
|
||||
**/tests/**/*.map
|
||||
!src/Web/assets/public/yaml/*
|
||||
!src/Web/assets/**
|
||||
**/src/**/*.map
|
||||
/**/*.sqlite
|
||||
/**/*.bak
|
||||
|
||||
@@ -111,13 +111,19 @@ COPY --from=build --chown=abc:abc /app /app
|
||||
|
||||
RUN npm install --production \
|
||||
&& npm cache clean --force \
|
||||
&& chown abc:abc node_modules
|
||||
&& chown abc:abc node_modules \
|
||||
&& rm -rf node_modules/ts-node \
|
||||
&& rm -rf node_modules/typescript
|
||||
|
||||
ENV NPM_CONFIG_LOGLEVEL debug
|
||||
|
||||
# can set database to use more performant better-sqlite3 since we control everything
|
||||
ENV DB_DRIVER=better-sqlite3
|
||||
|
||||
# NODE_ARGS are expanded after `node` command in the entrypoint IE "node {NODE_ARGS} src/index.js run"
|
||||
# by default enforce better memory mangement by limiting max long-lived GC space to 512MB
|
||||
ENV NODE_ARGS="--max_old_space_size=512"
|
||||
|
||||
ARG webPort=8085
|
||||
ENV PORT=$webPort
|
||||
EXPOSE $PORT
|
||||
|
||||
@@ -30,6 +30,7 @@ Feature Highlights for **Moderators:**
|
||||
* Event notification via Discord
|
||||
* [**Web interface**](#web-ui-and-screenshots) for monitoring, administration, and oauth bot authentication
|
||||
* [**Placeholders**](/docs/subreddit/actionTemplating.md) (like automoderator) can be configured via a wiki page or raw text and supports [mustache](https://mustache.github.io) templating
|
||||
* [**Partial Configurations**](/docs/subreddit/components/README.md#partial-configurations) -- offload parts of your configuration to shared locations to consolidate logic between multiple subreddits
|
||||
|
||||
Feature highlights for **Developers and Hosting (Operators):**
|
||||
|
||||
@@ -43,6 +44,7 @@ Feature highlights for **Developers and Hosting (Operators):**
|
||||
* Historical statistics
|
||||
* [Docker container support](/docs/operator/installation.md#docker-recommended)
|
||||
* Easy, UI-based [OAuth authentication](/docs/operator/addingBot.md) for adding Bots and moderator dashboard
|
||||
* Integration with [InfluxDB](https://www.influxdata.com) for detailed [time-series metrics](/docs/operator/database.md#influx) and a pre-built [Grafana](https://grafana.com) [dashboard](/docs/operator/database.md#grafana)
|
||||
|
||||
# Table of Contents
|
||||
|
||||
@@ -145,6 +147,13 @@ A built-in editor using [monaco-editor](https://microsoft.github.io/monaco-edito
|
||||
|
||||

|
||||
|
||||
### [Grafana Dashboard](/docs/operator/database.md#grafana)
|
||||
|
||||
* Overall stats (active bots/subreddits, api calls, per second/hour/minute activity ingest)
|
||||
* Over time graphs for events, per subreddit, and for individual rules/check/actions
|
||||
|
||||

|
||||
|
||||
## License
|
||||
|
||||
[MIT](/LICENSE)
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
# used https://github.com/linuxserver/docker-plex as a template
|
||||
|
||||
# NODE_ARGS can be passed by ENV in docker command like "docker run foxxmd/context-mod -e NODE_ARGS=--optimize_for_size"
|
||||
|
||||
exec \
|
||||
s6-setuidgid abc \
|
||||
/usr/local/bin/node /app/src/index.js run
|
||||
/usr/local/bin/node $NODE_ARGS /app/src/index.js run
|
||||
|
||||
BIN
docs/images/grafana.jpg
Normal file
BIN
docs/images/grafana.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 183 KiB |
@@ -130,3 +130,59 @@ retention: 2000
|
||||
runs:
|
||||
...
|
||||
```
|
||||
|
||||
# Influx
|
||||
|
||||
ContextMod supports writing detailed time-series data to [InfluxDB](https://www.influxdata.com/).
|
||||
|
||||
This data can be used to monitor the overall health, performance, and metrics for a ContextMod server. Currently, this data can **only be used by an Operator** as it requires access to the operator configuration and CM instance.
|
||||
|
||||
CM supports InfluxDB OSS > 2.3 or InfluxDB Cloud.
|
||||
|
||||
**Note:** This is an **advanced feature** and assumes you have enough technical knowledge to follow the documentation provided by each application to deploy and configure them. No support is guaranteed for installation, configuration, or use of Influx and Grafana.
|
||||
|
||||
## Supported Metrics
|
||||
|
||||
TBA
|
||||
|
||||
## Setup
|
||||
|
||||
### InfluxDB OSS
|
||||
|
||||
* Install [InfluxDB](https://docs.influxdata.com/influxdb/v2.3/install/)
|
||||
* [Configure InfluxDB using the UI](https://docs.influxdata.com/influxdb/v2.3/install/#set-up-influxdb-through-the-ui)
|
||||
* You will need **Username**, **Password**, **Organization Name**, and **Bucket Name** later for Grafana setup so make sure to record them somewhere
|
||||
* [Create a Token](https://docs.influxdata.com/influxdb/v2.3/security/tokens/create-token/) with enough permissions to write/read to the bucket you configured
|
||||
* After the token is created **view/copy the token** to clipboard by clicking the token name. You will need this for Grafana setup.
|
||||
|
||||
### ContextMod
|
||||
|
||||
Add the following block to the top-level of your operator configuration:
|
||||
|
||||
```yaml
|
||||
influxConfig:
|
||||
credentials:
|
||||
url: 'http://localhost:8086' # URL to your influx DB instance
|
||||
token: '9RtZ5YZ6bfEXAMPLENJsTSKg==' # token created in the previous step
|
||||
org: MyOrg # organization created in the previous step
|
||||
bucket: contextmod # name of the bucket created in the previous step
|
||||
```
|
||||
|
||||
## Grafana
|
||||
|
||||
A pre-built dashboard for [Grafana](https://grafana.com) can be imported to display overall metrics/stats using InfluxDB data.
|
||||
|
||||

|
||||
|
||||
* Create a new Data Source using **InfluxDB** type
|
||||
* Choose **Flux** for the **Query Language**
|
||||
* Fill in the details for **URL**, **Basic Auth Details** and **InfluxDB Details** using the data you created in the [Influx Setup step](#influxdb-oss)
|
||||
* Set **Min time interval** to `60s`
|
||||
* Click **Save and test**
|
||||
* Import Dashboard
|
||||
* **Browse** the Dashboard pane
|
||||
* Click **Import** and **upload** the [grafana dashboard json file](/docs/operator/grafana.json)
|
||||
* Chose the data source you created from the **InfluxDB CM** dropdown
|
||||
* Click **Import**
|
||||
|
||||
The dashboard can be filtered by **Bots** and **Subreddits** dropdowns at the top of the page to get more specific details.
|
||||
|
||||
3148
docs/operator/grafana.json
Normal file
3148
docs/operator/grafana.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -10,7 +10,7 @@ PROTIP: Using a container management tool like [Portainer.io CE](https://www.por
|
||||
|
||||
### [Dockerhub](https://hub.docker.com/r/foxxmd/context-mod)
|
||||
|
||||
An example of starting the container using the [minimum configuration](/docs/operator/operatorConfiguration.md#minimum-config) with a [configuration file](/docs/operator/operatorConfiguration.md#defining-configuration-via-file):
|
||||
An example of starting the container using the [minimum configuration](/docs/operator/configuration.md#minimum-config):
|
||||
|
||||
* Bind the directory where your config file, logs, and database are located on your host machine into the container's default `DATA_DIR` by using `-v /host/path/folder:/config`
|
||||
* Expose the web interface using the container port `8085`
|
||||
@@ -76,3 +76,21 @@ Be aware that Heroku's [free dyno plan](https://devcenter.heroku.com/articles/fr
|
||||
* The **Worker** dyno **will not** go to sleep but you will NOT be able to access the web interface. You can, however, still see how Cm is running by reading the logs for the dyno.
|
||||
|
||||
If you want to use a free dyno it is recommended you perform first-time setup (bot authentication and configuration, testing, etc...) with the **Web** dyno, then SWITCH to a **Worker** dyno so it can run 24/7.
|
||||
|
||||
# Memory Management
|
||||
|
||||
Node exhibits [lazy GC cleanup](https://github.com/FoxxMD/context-mod/issues/90#issuecomment-1190384006) which can result in memory usage for long-running CM instances increasing to unreasonable levels. This problem does not seem to be an issue with CM itself but with Node's GC approach. The increase does not affect CM's performance and, for systems with less memory, the Node *should* limit memory usage based on total available.
|
||||
|
||||
In practice CM uses ~130MB for a single bot, single subreddit setup. Up to ~350MB for many (10+) bots or many (20+) subreddits.
|
||||
|
||||
If you need to reign in CM's memory usage for some reason this can be addressed by setting an upper limit for memory usage with `node` args by using either:
|
||||
|
||||
**--max_old_space_size=**
|
||||
|
||||
Value is megabytes. This sets an explicit limit on GC memory usage.
|
||||
|
||||
This is set by default in the [Docker](#docker-recommended) container using the env `NODE_ARGS` to `--max_old_space_size=512`. It can be disabled by overriding the ENV.
|
||||
|
||||
**--optimize_for_size**
|
||||
|
||||
Tells Node to optimize for (less) memory usage rather than some performance optimizations. This option is not memory size dependent. In practice performance does not seem to be affected and it reduces (but not entirely prevents) memory increases over long periods.
|
||||
|
||||
@@ -2,44 +2,92 @@ Actions that can submit text (Report, Comment, UserNote) will have their `conten
|
||||
|
||||
See here for a [cheatsheet](https://gist.github.com/FoxxMD/d365707cf99fdb526a504b8b833a5b78) and [here](https://www.tsmean.com/articles/mustache/the-ultimate-mustache-tutorial/) for a more thorough tutorial.
|
||||
|
||||
# Template Data
|
||||
|
||||
## Activity Data
|
||||
|
||||
Activity data can be accessed using the `item` variable. Example
|
||||
|
||||
```
|
||||
This activity is a {{item.kind}} with {{item.votes}} votes, created {{item.age}} ago.
|
||||
```
|
||||
Produces:
|
||||
|
||||
> This activity is a submission with 10 votes created 5 minutes ago.
|
||||
|
||||
### Common
|
||||
|
||||
All Actions with `content` have access to this data:
|
||||
|
||||
```json5
|
||||
| Name | Description | Example |
|
||||
|-------------|-----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------|
|
||||
| `kind` | The Activity type (submission or comment) | submission |
|
||||
| `author` | Name of the Author of the Activity being processed | FoxxMD |
|
||||
| `permalink` | URL to the Activity | https://reddit.com/r/mySuibreddit/comments/ab23f/my_post |
|
||||
| `votes` | Number of upvotes | 69 |
|
||||
| `age` | The age of the Activity in a [human friendly format](https://day.js.org/docs/en/durations/humanize) | 5 minutes |
|
||||
| `botLink` | A URL to CM's introduction thread | https://www.reddit.com/r/ContextModBot/comments/otz396/introduction_to_contextmodbot |
|
||||
|
||||
{
|
||||
item: {
|
||||
kind: 'string', // the type of item (comment/submission)
|
||||
author: 'string', // name of the item author (reddit user)
|
||||
permalink: 'string', // a url to the item
|
||||
url: 'string', // if the item is a Submission then its URL (external for link type submission, reddit link for self-posts)
|
||||
title: 'string', // if the item is a Submission, then the title of the Submission,
|
||||
botLink: 'string' // a link to the bot's FAQ
|
||||
},
|
||||
rules: {
|
||||
// contains all rules that were run and are accessible using the name, lowercased, with all spaces/dashes/underscores removed
|
||||
}
|
||||
}
|
||||
### Submissions
|
||||
|
||||
If the **Activity** is a Submission these additional properties are accessible:
|
||||
|
||||
| Name | Description | Example |
|
||||
|---------------|-----------------------------------------------------------------|-------------------------|
|
||||
| `upvoteRatio` | The upvote ratio | 100% |
|
||||
| `nsfw` | If the submission is marked as NSFW | true |
|
||||
| `spoiler` | If the submission is marked as a spoiler | true |
|
||||
| `url` | If the submission was a link then this is the URL for that link | http://example.com |
|
||||
| `title` | The title of the submission | Test post please ignore |
|
||||
|
||||
### Comments
|
||||
|
||||
If the **Activity** is a Comment these additional properties are accessible:
|
||||
|
||||
| Name | Description | Example |
|
||||
|------|--------------------------------------------------------------|---------|
|
||||
| `op` | If the Author is the OP of the Submission this comment is in | true |
|
||||
|
||||
### Moderator
|
||||
|
||||
If the **Activity** occurred in a Subreddit the Bot moderates these properties are accessible:
|
||||
|
||||
| Name | Description | Example |
|
||||
|---------------|-------------------------------------|---------|
|
||||
| `reports` | The number of reports recieved | 1 |
|
||||
| `modReports` | The number of reports by moderators | 1 |
|
||||
| `userReports` | The number of reports by users | 1 |
|
||||
|
||||
## Rule Data
|
||||
|
||||
### Summary
|
||||
|
||||
A summary of what rules were processed and which were triggered, with results, is available using the `ruleSummary` variable. Example:
|
||||
|
||||
```
|
||||
A summary of rules processed for this activity:
|
||||
|
||||
The properties of `rules` are accessible using the name, lower-cased, with all spaces/dashes/underscores. If no name is given `kind` is used as `name` Example:
|
||||
|
||||
{{ruleSummary}}
|
||||
```
|
||||
|
||||
"rules": [
|
||||
{
|
||||
"name": "My Custom-Recent Activity Rule", // mycustomrecentactivityrule
|
||||
"kind": "recentActivity"
|
||||
},
|
||||
{
|
||||
// name = repeatsubmission
|
||||
"kind": "repeatActivity",
|
||||
}
|
||||
]
|
||||
Would produce:
|
||||
> A summary of rules processed for this activity:
|
||||
>
|
||||
> * namedRegexRule - ✘
|
||||
> * nameAttributionRule - ✓ - 1 Attribution(s) met the threshold of < 20%, with 1 (3%) of 32 Total -- window: 6 months
|
||||
> * noXPost ✓ - ✓ 1 of 1 unique items repeated <= 3 times, largest repeat: 1
|
||||
|
||||
|
||||
### Individual
|
||||
|
||||
Individual **Rules** can be accessed using the name of the rule, **lower-cased, with all spaces/dashes/underscores.** Example:
|
||||
|
||||
```
|
||||
Submission was repeated {{rules.noxpost.largestRepeat}} times
|
||||
```
|
||||
Produces
|
||||
|
||||
**To see what data is available for individual Rules [consult the schema](#configuration) for each Rule.**
|
||||
> Submission was repeated 7 times
|
||||
|
||||
#### Quick Templating Tutorial
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ This list is not exhaustive. [For complete documentation on a subreddit's config
|
||||
* [Author](#author)
|
||||
* [Regex](#regex)
|
||||
* [Repost](#repost)
|
||||
* [Sentiment Analysis](#sentiment-analysis)
|
||||
* [Rule Sets](#rule-sets)
|
||||
* [Actions](#actions)
|
||||
* [Named Actions](#named-actions)
|
||||
@@ -39,10 +40,13 @@ This list is not exhaustive. [For complete documentation on a subreddit's config
|
||||
* [Message](#message)
|
||||
* [Remove](#remove)
|
||||
* [Report](#report)
|
||||
* [UserNote](#usernote)
|
||||
* [Toolbox UserNote](#usernote)
|
||||
* [Mod Note](#mod-note)
|
||||
* [Filters](#filters)
|
||||
* [Filter Types](#filter-types)
|
||||
* [Author Filter](#author-filter)
|
||||
* [Mod Notes/Actions](#mod-actionsnotes-filter)
|
||||
* [Toolbox UserNotes](#toolbox-usernotes-filter)
|
||||
* [Item Filter](#item-filter)
|
||||
* [Subreddit Filter](#subreddit-filter)
|
||||
* [Named Filters](#named-filters)
|
||||
@@ -63,6 +67,7 @@ This list is not exhaustive. [For complete documentation on a subreddit's config
|
||||
* [Check Order](#check-order)
|
||||
* [Rule Order](#rule-order)
|
||||
* [Configuration Re-use and Caching](#configuration-re-use-and-caching)
|
||||
* [Partial Configurations](#partial-configurations)
|
||||
* [Subreddit-ready examples](#subreddit-ready-examples)
|
||||
|
||||
# Runs
|
||||
@@ -365,6 +370,12 @@ The **Repost** rule is used to find reposts for both **Submissions** and **Comme
|
||||
|
||||
This rule is for searching **all of Reddit** for reposts, as opposed to just the history of the Author of the Activity being checked. If you only want to check for reposts by the Author of the Activity being checked you should use the [Repeat Activity](/docs/subreddit/components/repeatActivity) rule.
|
||||
|
||||
### Sentiment Analysis
|
||||
|
||||
[**Full Documentation**](/docs/subreddit/components/sentiment)
|
||||
|
||||
The **Sentiment Rule** is used to determine the overall emotional intent (negative, neutral, positive) of a Submission or Comment by analyzing the actual text content of the Activity.
|
||||
|
||||
# Rule Sets
|
||||
|
||||
The `rules` list on a `Check` can contain both `Rule` objects and `RuleSet` objects.
|
||||
@@ -612,16 +623,19 @@ actions:
|
||||
|
||||
Remove the Activity being processed. [Schema Documentation](https://json-schema.app/view/%23/%23%2Fdefinitions%2FSubmissionCheckJson/%23%2Fdefinitions%2FRemoveActionJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fedge%2Fsrc%2FSchema%2FApp.json)
|
||||
|
||||
* **note** can be [templated](#templating)
|
||||
* **reasonId** IDs can be found in the [editor](/docs/webInterface.md) using the **Removal Reasons** popup
|
||||
|
||||
If neither note nor reasonId are included then no removal reason is added.
|
||||
|
||||
```yaml
|
||||
actions:
|
||||
- kind: remove
|
||||
spam: boolean # optional, mark as spam on removal
|
||||
spam: false # optional, mark as spam on removal
|
||||
note: 'a moderator-readable note' # optional, a note only visible to moderators (new reddit only)
|
||||
reasonId: '2n0f4674-365e-46d2-8fc7-a337d85d5340' # optional, the ID of a removal reason to add to the removal action (new reddit only)
|
||||
```
|
||||
|
||||
#### What About Removal Reason?
|
||||
|
||||
Reddit does not support setting a removal reason through the API. Please complain in [r/modsupport](https://www.reddit.com/r/modsupport) or [r/redditdev](https://www.reddit.com/r/redditdev) to help get this added :)
|
||||
|
||||
### Report
|
||||
|
||||
Report the Activity being processed. [Schema Documentation](https://json-schema.app/view/%23/%23%2Fdefinitions%2FSubmissionCheckJson/%23%2Fdefinitions%2FReportActionJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fedge%2Fsrc%2FSchema%2FApp.json)
|
||||
@@ -651,6 +665,30 @@ actions:
|
||||
allowDuplicate: boolean # if false then the usernote will not be added if the same note appears for this activity
|
||||
```
|
||||
|
||||
### Mod Note
|
||||
|
||||
[**Full Documentation**](/docs/subreddit/components/modActions/README.md#mod-note-action)
|
||||
|
||||
Add a [Mod Note](https://www.reddit.com/r/modnews/comments/t8vafc/announcing_mod_notes/) for the Author of the Activity.
|
||||
|
||||
* `type` must be one of the [valid note labels](https://www.reddit.com/dev/api#POST_api_mod_notes):
|
||||
* BOT_BAN
|
||||
* PERMA_BAN
|
||||
* BAN
|
||||
* ABUSE_WARNING
|
||||
* SPAM_WARNING
|
||||
* SPAM_WATCH
|
||||
* SOLID_CONTRIBUTOR
|
||||
* HELPFUL_USER
|
||||
|
||||
```yaml
|
||||
actions:
|
||||
- kind: modnote
|
||||
type: SPAM_WATCH
|
||||
content: 'a note only mods can see message' # optional
|
||||
referenceActivity: boolean # if true the Note will be linked to the Activity being processed
|
||||
```
|
||||
|
||||
# Filters
|
||||
|
||||
**Filters** are an additional channel for determining if an Event should be processed by ContextMod. They differ from **Rules** in several key ways:
|
||||
@@ -732,6 +770,14 @@ There are two types of Filter. Both types have the same "shape" in the configura
|
||||
|
||||
Test the Author of an Activity. See [Schema documentation](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fedge%2Fsrc%2FSchema%2FApp.json) for all possible Author Criteria
|
||||
|
||||
#### Mod Actions/Notes Filter
|
||||
|
||||
See [Mod Actions/Notes](/docs/subreddit/components/modActions/README.md#mod-action-filter) documentation.
|
||||
|
||||
#### Toolbox UserNotes Filter
|
||||
|
||||
See [UserNotes](/docs/subreddit/components/userNotes/README.md) documentation
|
||||
|
||||
### Item Filter
|
||||
|
||||
Test for properties of an Activity:
|
||||
@@ -1031,7 +1077,7 @@ If the Check is using `AND` condition for its rules (default) then if either Rul
|
||||
|
||||
**It is therefore advantageous to list your lightweight Rules first in each Check.**
|
||||
|
||||
### Configuration Re-use and Caching
|
||||
## Configuration Re-use and Caching
|
||||
|
||||
ContextMod implements caching functionality for:
|
||||
|
||||
@@ -1055,6 +1101,116 @@ PROTIP: You can monitor the re-use of cache in the `Cache` section of your subre
|
||||
|
||||
[Learn more about how Caching works](/docs/operator/caching.md)
|
||||
|
||||
## Partial Configurations
|
||||
|
||||
ContextMod supports fetching parts of a configuration (a **Fragment**) from an external source. Fragments are an advanced feature and should only be used by users who are familiar with CM's configuration syntax and understand the risks/downsides associates with fragmenting a configuration.
|
||||
|
||||
**Fragments** are supported for:
|
||||
|
||||
* [Runs](#runs)
|
||||
* [Checks](#checks)
|
||||
* [Rules](#rules)
|
||||
* [Actions](#actions)
|
||||
|
||||
### Should You Use Partial Configurations?
|
||||
|
||||
* **PROS**
|
||||
* Consolidate shared configuration for many subreddits into one location
|
||||
* Shared configuration can be updated independently of subreddits
|
||||
* Allows sharing access to configuration outside of moderators of a specific subreddit or even reddit
|
||||
* **CONS**
|
||||
* Editor does not currently support viewing, editing, or updating Fragments. Only the Fragment URL is visible in a Subreddit's configuration
|
||||
* No editor support for viewing obscures "complete view" of configuration and makes editor less useful for validation
|
||||
* Currently, editor cannot validate individual Fragments. They must be copy-pasted "in place" within a normal configuration.
|
||||
* Using external (non-wiki) sources means **you** are responsible for the security/access to the fragment
|
||||
|
||||
In general, Fragments should only be used to offload small, well-tested pieces of a configuration that can be shared between many subreddits. Examples:
|
||||
|
||||
* A regex Rule for spam links
|
||||
* A Recent Activity Rule for reporting users from freekarma subreddits
|
||||
|
||||
### Usage
|
||||
|
||||
A Fragment may be either a special string or a Fragment object. The fetched Fragment can be either an object or an array of objects of the type of Fragment being replaced.
|
||||
|
||||
**String**
|
||||
|
||||
If value starts with `wiki:` then the proceeding value will be used to get a wiki page from the current subreddit
|
||||
|
||||
* EX `wiki:botconfig/myFragment` tries to get `https://reddit.com/r/currentSubreddit/wiki/botconfig/myFragment`
|
||||
|
||||
If the value starts with `wiki:` and ends with `|someValue` then `someValue` will be used as the base subreddit for the wiki page
|
||||
|
||||
* EX `wiki:myFragment/test|ContextModBot` tries to get `https://reddit.com/r/ContextModBot/wiki/myFragment/test`
|
||||
|
||||
If the value starts with `url:` then the value is fetched as an external url and expects raw text returned
|
||||
|
||||
* EX `url:https://pastebin.com/raw/38qfL7mL` tries to get the text response of `https://pastebin.com/raw/38qfL7mL`
|
||||
|
||||
**Object**
|
||||
|
||||
The object contains:
|
||||
|
||||
* `path` -- REQUIRED string following rules above
|
||||
* `ttl` -- OPTIONAL, number of seconds to cache the URL result. Defaults to `WikiTTL`
|
||||
|
||||
#### Examples
|
||||
|
||||
**Replacing A Rule with a URL Fragment**
|
||||
|
||||
```yaml
|
||||
runs:
|
||||
- checks:
|
||||
- name: Free Karma Alert
|
||||
description: Check if author has posted in 'freekarma' subreddits
|
||||
kind: submission
|
||||
rules:
|
||||
- 'url:https://gist.githubusercontent.com/FoxxMD/0e1ee1ab950ff4d1f0cd26172bae7f8f/raw/0ebfaca903e4a651827effac5775c8718fb6e1f2/fragmentRule.yaml'
|
||||
- name: badSub
|
||||
kind: recentActivity
|
||||
useSubmissionAsReference: false
|
||||
thresholds:
|
||||
# if the number of activities (sub/comment) found CUMULATIVELY in the subreddits listed is
|
||||
# equal to or greater than 1 then the rule is triggered
|
||||
- threshold: '>= 1'
|
||||
subreddits:
|
||||
- MyBadSubreddit
|
||||
window: 7 days
|
||||
actions:
|
||||
- kind: report
|
||||
content: 'uses freekarma subreddits and bad subreddits'
|
||||
```
|
||||
|
||||
**Replacing A Rule with a URL Fragment (Multiple)**
|
||||
|
||||
```yaml
|
||||
runs:
|
||||
- checks:
|
||||
- name: Free Karma Alert
|
||||
description: Check if author has posted in 'freekarma' subreddits
|
||||
kind: submission
|
||||
rules:
|
||||
- 'url:https://gist.githubusercontent.com/FoxxMD/0e1ee1ab950ff4d1f0cd26172bae7f8f/raw/0ebfaca903e4a651827effac5775c8718fb6e1f2/fragmentRuleArray.yaml'
|
||||
actions:
|
||||
- kind: report
|
||||
content: 'uses freekarma subreddits and bad subreddits'
|
||||
```
|
||||
|
||||
**Replacing A Rule with a Wiki Fragment**
|
||||
|
||||
```yaml
|
||||
runs:
|
||||
- checks:
|
||||
- name: Free Karma Alert
|
||||
description: Check if author has posted in 'freekarma' subreddits
|
||||
kind: submission
|
||||
rules:
|
||||
- 'wiki:freeKarmaFrag'
|
||||
actions:
|
||||
- kind: report
|
||||
content: 'uses freekarma subreddits'
|
||||
```
|
||||
|
||||
# Subreddit-Ready Examples
|
||||
|
||||
Refer to the [Subreddit-Ready Examples](/docs/subreddit/components/subredditReady) section to find ready-to-use configurations for common scenarios (spam, freekarma blocking, etc...). This is also a good place to familiarize yourself with what complete configurations look like.
|
||||
|
||||
152
docs/subreddit/components/modActions/README.md
Normal file
152
docs/subreddit/components/modActions/README.md
Normal file
@@ -0,0 +1,152 @@
|
||||
# Table of Contents
|
||||
|
||||
* [Overview](#overview)
|
||||
* [Mod Note Action](#mod-note-action)
|
||||
* [Mod Action Filter](#mod-action-filter)
|
||||
* [API Usage](#api-usage)
|
||||
* [When To Use?](#when-to-use)
|
||||
* [Examples](#examples)
|
||||
|
||||
# Overview
|
||||
|
||||
[Mod Notes](https://www.reddit.com/r/modnews/comments/t8vafc/announcing_mod_notes/) is a feature for New Reddit that allow moderators to add short, categorizable notes to Users of their subreddit, optionally associating te note with a submission/comment the User made. They are inspired by [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes) which are also [supported by ContextMod.](/docs/subreddit/components/userNotes) Reddit's **Mod Notes** also combine [Moderation Log](https://mods.reddithelp.com/hc/en-us/articles/360022402312-Moderation-Log) actions (**Mod Actions**) for the selected User alongside moderator notes, enabling a full "overview" of moderator interactions with a User in their subreddit.
|
||||
|
||||
ContextMod supports adding **Mod Notes** to an Author using an [Action](/docs/subreddit/components/README.md#mod-note) and using **Mod Actions/Mod Notes** as a criteria in an [Author Filter](/docs/subreddit/components/README.md#author-filter)
|
||||
|
||||
# Mod Note Action
|
||||
|
||||
[**Schema Reference**](https://json-schema.app/view/%23%2Fdefinitions%2FModNoteActionJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fedge%2Fsrc%2FSchema%2FApp.json)
|
||||
|
||||
* `type` must be one of the [valid note labels](https://www.reddit.com/dev/api#POST_api_mod_notes):
|
||||
* BOT_BAN
|
||||
* PERMA_BAN
|
||||
* BAN
|
||||
* ABUSE_WARNING
|
||||
* SPAM_WARNING
|
||||
* SPAM_WATCH
|
||||
* SOLID_CONTRIBUTOR
|
||||
* HELPFUL_USER
|
||||
|
||||
```yaml
|
||||
actions:
|
||||
- kind: modnote
|
||||
type: SPAM_WATCH
|
||||
content: 'a note only mods can see message' # optional
|
||||
referenceActivity: boolean # if true the Note will be linked to the Activity being processed
|
||||
```
|
||||
|
||||
# Mod Action Filter
|
||||
|
||||
ContextMod can use **Mod Actions** (from moderation log) and **Mod Notes** in an [Author Filter](/docs/subreddit/components/README.md#author-filter).
|
||||
|
||||
## API Usage
|
||||
|
||||
Notes/Actions are **not** included in the data Reddit returns for either an Author or an Activity. This means that, in most cases, ContextMod is required to make **one additional API call to Reddit during Activity processing** if Notes/Actions as used as part of an **Author Filter**.
|
||||
|
||||
The impact of this additional call is greatest when the Author Filter is used as part of a **Comment Check** or running for **every Activity** such as part of a Run. Take this example:
|
||||
|
||||
No Mod Action filtering
|
||||
|
||||
* CM makes 1 api call to return new comments, find 10 new comments across 6 users
|
||||
* Processing each comment, with no other filters, requires 0 additional calls
|
||||
* At the end of processing 10 comments, CM has used a total of 1 api call.
|
||||
|
||||
Mod Action Filtering Used
|
||||
|
||||
* CM makes 1 api call to return new comments, find 10 new comments across 6 users
|
||||
* Processing each comment, with a mod action filter, requires 1 additional api call per user
|
||||
* At the end of processing 10 comments, CM has used a total of **7 api calls**
|
||||
|
||||
### When To Use?
|
||||
|
||||
In general,**do not** use Mod Actions in a Filter if:
|
||||
|
||||
* The filter is on a [**Comment** Check](/docs/subreddit/components/README.md#checks) and your subreddit has a high volume of Comments
|
||||
* The filter is on a [Run](/docs/subreddit/components/README.md#runs) and your subreddit has a high volume of Activities
|
||||
|
||||
If you need Mod Notes-like functionality for a high volume subreddit consider using [Toolbox UserNotes](/docs/subreddit/components/userNotes) instead.
|
||||
|
||||
In general, **do** use Mod Actions in a Filter if:
|
||||
|
||||
* The filter is on a [**Submission** Check](/docs/subreddit/components/README.md#checks)
|
||||
* The filter is part of an [Author **Rule**](/docs/subreddit/components/README.md#author) that is processed as **late as possible in the rule order for a Check**
|
||||
* Your subreddit has a low volume of Activities (less than 100 combined submissions/comments in a 10 minute period, for example)
|
||||
* The filter is on an Action
|
||||
|
||||
## Usage and Examples
|
||||
|
||||
Filter by Mod Actions/Notes on an Author Filter are done using the `modActions` property:
|
||||
|
||||
```yaml
|
||||
age: '> 1 month'
|
||||
# ...
|
||||
modActions:
|
||||
- ...
|
||||
```
|
||||
|
||||
There two valid shapes for the Mod Action criteria: [ModLogCriteria](https://json-schema.app/view/%23%2Fdefinitions%2FModLogCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fedge%2Fsrc%2FSchema%2FApp.json) and [ModNoteCriteria](https://json-schema.app/view/%23%2Fdefinitions%2FModNoteCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fedge%2Fsrc%2FSchema%2FApp.json).
|
||||
|
||||
### ModLogCriteria
|
||||
|
||||
Used for filtering by **Moderation Log** actions *and/or general notes*.
|
||||
|
||||
* `activityType` -- Optional. If Mod Action is associated with an activity specify it here. A list or one of:
|
||||
* `submission`
|
||||
* `comment`
|
||||
* `type` -- Optional. The type of Mod Log Action. A list or one of:
|
||||
* `INVITE`
|
||||
* `NOTE`
|
||||
* `REMOVAL`
|
||||
* `SPAM`
|
||||
* `APPROVAL`
|
||||
* `description` -- additional mod log details (string) to filter by -- not documented by reddit. Can be string or regex string-like `/.* test/i`
|
||||
* `details` -- additional mod log details (string) to filter by -- not documented by reddit. Can be string or regex string-like `/.* test/i`
|
||||
|
||||
```yaml
|
||||
activityType: submission
|
||||
type:
|
||||
- REMOVAL
|
||||
- SPAM
|
||||
search: total
|
||||
count: '> 3 in 1 week'
|
||||
```
|
||||
### ModNoteCriteria
|
||||
|
||||
Inherits `activityType` from ModLogCriteria. If either of the below properties in included on the criteria then any other ModLogCriteria-specific properties are **ignored**.
|
||||
|
||||
* `note` -- the contents of the note to match against. Can be one of or a list of strings/regex string-like `/.* test/i`
|
||||
* `noteType` -- If specified by the note, the note type (see [Mod Note Action](#mod-note-action) type). Can be one of or a list of strings/regex string-like `/.* test/i`
|
||||
|
||||
```yaml
|
||||
noteType: SOLID_CONTRIBUTOR
|
||||
search: total
|
||||
count: '> 3 in 1 week'
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
Author has more than 2 submission approvals in the last month
|
||||
|
||||
```yaml
|
||||
type: APPROVAL
|
||||
activityType: submission
|
||||
search: total
|
||||
count: '> 2 in 1 month'
|
||||
```
|
||||
|
||||
Author has at least 1 BAN note
|
||||
|
||||
```yaml
|
||||
noteType: BAN
|
||||
search: total
|
||||
count: '>= 1'
|
||||
```
|
||||
|
||||
Author has at least 3 notes which include the words "self" and "promotion" in the last month
|
||||
|
||||
```yaml
|
||||
note: '/self.*promo/i'
|
||||
activityType: submission
|
||||
search: total
|
||||
count: '>= 3 in 1 month'
|
||||
```
|
||||
183
docs/subreddit/components/sentiment/README.md
Normal file
183
docs/subreddit/components/sentiment/README.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# Table of Contents
|
||||
|
||||
* [Overview](#overview)
|
||||
* [Pros And Cons](#pros-and-cons)
|
||||
* [Technical Overview](#technical-overview)
|
||||
* [Sentiment Values](#sentiment-values)
|
||||
* [Usage](#usage)
|
||||
* [Testing Sentiment Value](#testing-sentiment-value)
|
||||
* [Numerical](#numerical)
|
||||
* [Text](#text)
|
||||
* [Sentiment Rule](#sentiment-rule)
|
||||
* [Historical](#historical)
|
||||
* [Examples](#examples)
|
||||
|
||||
# Overview
|
||||
|
||||
[Sentiment Analysis](https://monkeylearn.com/sentiment-analysis/) (SA) is a form of [Natural Language Processing](https://monkeylearn.com/natural-language-processing/) (NLP) used to extract the overall [sentiment](https://www.merriam-webster.com/dictionary/sentiment) (emotional intent) from a piece of text. Simply, SA is used to determine how positive or negative the emotion of a sentence is.
|
||||
|
||||
Examples:
|
||||
|
||||
* "I love how curly your hair is" -- very positive
|
||||
* "The United States is over 200 years old" -- neutral
|
||||
* "Frankly, your face is disgusting and I would hate to meet you" -- very negative
|
||||
|
||||
SA can be a powerful signal for determining the intent of a user's comment/submission. However, it should not be the **only** tool as it comes with both strengths and weaknesses.
|
||||
|
||||
## Pros and Cons
|
||||
|
||||
Pros
|
||||
|
||||
* In terms of Reddit API usage, SA is **free**. It requires no API calls and is computationally trivial.
|
||||
* Extremely powerful signal for intent since it analyzes the actual text content of an activity
|
||||
* Requires almost no setup to use
|
||||
* Can be used as a substitute for regex/keyword matching when looking for hateful/toxic comments
|
||||
* English language comprehension is very thorough
|
||||
* Uses 3 independent algorithms to evaluate sentiment
|
||||
* Understands common english slang, internet slang, and emojis
|
||||
|
||||
Cons
|
||||
|
||||
* Language limited -- only supported for English (most thorough), French, German, and Spanish
|
||||
* Less accurate for small word count content (less than 4 words)
|
||||
* Does not understand sarcasm/jokes
|
||||
* Accuracy depends on use of common words
|
||||
* Accuracy depends on clear intent
|
||||
* Heavy nuance, obscure word choice, and hidden meanings are not understood
|
||||
|
||||
## Technical Overview
|
||||
|
||||
ContextMod attempts to identify the language of the content it is processing. Based on its confidence of the language it will use up to three different NLP libraries to extract sentiment:
|
||||
|
||||
* [NLP.js](https://github.com/axa-group/nlp.js/blob/master/docs/v3/sentiment-analysis.md) (english, french, german, and spanish)
|
||||
* [vaderSentiment-js](https://github.com/vaderSentiment/vaderSentiment-js/) (english only)
|
||||
* [wink-sentiment](https://github.com/winkjs/wink-sentiment) (english only)
|
||||
|
||||
The above libraries make use of these Sentiment Analysis algorithms:
|
||||
|
||||
* VADER https://github.com/cjhutto/vaderSentiment
|
||||
* AFINN http://corpustext.com/reference/sentiment_afinn.html
|
||||
* Senticon https://ieeexplore.ieee.org/document/8721408
|
||||
* Pattern https://github.com/clips/pattern
|
||||
* wink https://github.com/winkjs/wink-sentiment (modified AFINN with emojis)
|
||||
|
||||
Each library produces a normalized score: the sum of all the valence values for each recognized token in its lexicon, divided by the number of words/tokens.
|
||||
|
||||
ContextMod takes each normalized score and adjusts it to be between -1 and +1. It then adds finds the average of all normalized score to produce a final sentiment between -1 and +1.
|
||||
|
||||
# Sentiment Values
|
||||
|
||||
Each piece of content ContextMod analyses produces a score from -1 to +1 to represent the sentiment of that content
|
||||
|
||||
| Score | Sentiment |
|
||||
|-------|--------------------|
|
||||
| -1 | |
|
||||
| -0.6 | Extremely Negative |
|
||||
| -0.3 | Very Negative |
|
||||
| -0.1 | Negative |
|
||||
| 0 | Neutral |
|
||||
| 0.1 | Positive |
|
||||
| 0.3 | Very Positive |
|
||||
| 0.6 | Extremely Positive |
|
||||
| 1 | |
|
||||
|
||||
# Usage
|
||||
|
||||
## Testing Sentiment Value
|
||||
|
||||
Testing for sentiment in the Sentiment Rule is done using either a **text** or **numerical** comparison.
|
||||
|
||||
### Numerical
|
||||
|
||||
Similar to other numerical comparisons in CM -- use an equality operator and the number to test for:
|
||||
|
||||
* `> 0.1` -- sentiment is at least positive
|
||||
* `<= -0.1` -- sentiment is not negative
|
||||
|
||||
Testing for *only* neutral sentiment should be done use a text comparison (below).
|
||||
|
||||
### Text
|
||||
|
||||
Use any of the **Sentiment** text values from the above table to form a test:
|
||||
|
||||
* `is very positive`
|
||||
* `is neutral`
|
||||
* `is extremely negative`
|
||||
|
||||
You may also use the `not` operator:
|
||||
|
||||
* `is not negative`
|
||||
* `is not very negative`
|
||||
* `is not neutral`
|
||||
|
||||
## Sentiment Rule
|
||||
|
||||
An example rule that tests the current comment/submission to see if it has negative sentiment:
|
||||
|
||||
```yaml
|
||||
sentiment: 'is negative'
|
||||
```
|
||||
|
||||
It's very simple :)
|
||||
|
||||
### Historical
|
||||
|
||||
You may also test the Sentiment of Activities from the user's history. (Note: this may use an API call to get history)
|
||||
|
||||
```yaml
|
||||
sentiment: 'is negative'
|
||||
historical:
|
||||
window:
|
||||
count: 50
|
||||
mustMatchCurrent: true # optional, the initial activity being tested must test true ("is positive" must be true) before historical tests are run
|
||||
sentimentVal: 'is very negative' # optional, if the sentiment test to use for historical content is different than the initial test
|
||||
totalMatching: '> 3' # optional, a comparison for how many historical activities must match sentimentVal
|
||||
```
|
||||
|
||||
# Examples
|
||||
|
||||
#### Check with Rules for recent problem subreddit activity and negative sentiment in comment
|
||||
|
||||
```yaml
|
||||
name: Probably Toxic Comment
|
||||
kind: comment
|
||||
rules:
|
||||
- kind: recentActivity
|
||||
thresholds:
|
||||
- aProblemSubreddit
|
||||
- kind: sentiment
|
||||
name: negsentiment
|
||||
sentiment: 'is very negative'
|
||||
actions:
|
||||
- kind: report
|
||||
content: 'Sentiment of {{rules.negsentiment.averageScore}} {{rules.negsentiment.sentimentTest}}'
|
||||
```
|
||||
|
||||
#### Check with Rules for recent problem subreddit activity and negative sentiment in comment history from problem subreddits
|
||||
|
||||
```yaml
|
||||
name: Toxic Comment With History
|
||||
kind: comment
|
||||
rules:
|
||||
- kind: recentActivity
|
||||
thresholds:
|
||||
- aProblemSubreddit
|
||||
- aSecondProblemSubreddit
|
||||
- kind: sentiment
|
||||
sentiment: 'is very negative'
|
||||
historical:
|
||||
sentimentVal: 'is negative'
|
||||
mustMatchCurrent: true
|
||||
totalMatching: '> 1'
|
||||
window:
|
||||
count: 100
|
||||
filterOn:
|
||||
post:
|
||||
subreddits:
|
||||
include:
|
||||
- name:
|
||||
- aProblemSubreddit
|
||||
- aSecondProblemSubreddit
|
||||
actions:
|
||||
- kind: remove
|
||||
```
|
||||
@@ -6,6 +6,16 @@ Context Mod supports reading and writing [User Notes](https://www.reddit.com/r/t
|
||||
|
||||
[Click here for the Toolbox Quickstart Guide](https://www.reddit.com/r/toolbox/wiki/docs/quick_start)
|
||||
|
||||
Valid Note Types:
|
||||
|
||||
* `gooduser`
|
||||
* `spamwatch`
|
||||
* `spamwarn`
|
||||
* `abusewarn`
|
||||
* `ban`
|
||||
* `permban`
|
||||
* `botban`
|
||||
|
||||
## Filter
|
||||
|
||||
User Notes are an additional criteria on [AuthorCriteria](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorCriteria?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) that can be used alongside other Author properties for both [filtering rules and in the AuthorRule.](/docs/subreddit/components/author/)
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
|
||||
## Web Dashboard Tips
|
||||
|
||||
* Click the **Help** button at the top of the page to get a **guided tour of the dashboard**
|
||||
* Use the [**Overview** section](/docs/images/botOperations.png) to control the bot at a high-level
|
||||
* You can **manually run** the bot on any activity (comment/submission) by pasting its permalink into the [input field below the Overview section](/docs/images/runInput.png) and hitting one of the **run buttons**
|
||||
* **Dry run** will make the bot run on the activity but it will only **pretend** to run actions, if triggered. This is super useful for testing your config without consequences
|
||||
|
||||
4894
package-lock.json
generated
4894
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
48
package.json
48
package.json
@@ -1,19 +1,22 @@
|
||||
{
|
||||
"name": "redditcontextbot",
|
||||
"version": "0.5.1",
|
||||
"version": "0.11.4",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "nyc ./node_modules/.bin/_mocha 'tests/**/*.test.ts'",
|
||||
"build": "tsc",
|
||||
"build": "tsc && npm run bundle-front",
|
||||
"bundle-front": "browserify src/Web/assets/browser.js | terser --compress --mangle > src/Web/assets/public/browserBundle.js",
|
||||
"start": "node src/index.js run",
|
||||
"schema": "npm run -s schema-app & npm run -s schema-ruleset & npm run -s schema-rule & npm run -s schema-action & npm run -s schema-config",
|
||||
"schema-app": "typescript-json-schema tsconfig.json JSONConfig --out src/Schema/App.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-ruleset": "typescript-json-schema tsconfig.json RuleSetJson --out src/Schema/RuleSet.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-rule": "typescript-json-schema tsconfig.json RuleJson --out src/Schema/Rule.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-action": "typescript-json-schema tsconfig.json ActionJson --out src/Schema/Action.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema": "npm run -s schema-app & npm run -s schema-ruleset & npm run -s schema-rule & npm run -s schema-action & npm run -s schema-check & npm run -s schema-run & npm run -s schema-config",
|
||||
"schema-app": "typescript-json-schema tsconfig.json SubredditConfigData --out src/Schema/App.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-ruleset": "typescript-json-schema tsconfig.json RuleSetConfigData --out src/Schema/RuleSet.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-rule": "typescript-json-schema tsconfig.json RuleConfigData --out src/Schema/Rule.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-check": "typescript-json-schema tsconfig.json ActivityCheckConfigValue --out src/Schema/Check.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-run": "typescript-json-schema tsconfig.json RunConfigValue --out src/Schema/Run.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-action": "typescript-json-schema tsconfig.json ActionConfigData --out src/Schema/Action.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schema-config": "typescript-json-schema tsconfig.json OperatorJsonConfig --out src/Schema/OperatorConfig.json --required --tsNodeRegister --refs --validationKeywords deprecationMessage",
|
||||
"schemaNotWorking": "./node_modules/.bin/ts-json-schema-generator -f tsconfig.json -p src/JsonConfig.ts -t JSONConfig --out src/Schema/vegaSchema.json",
|
||||
"schemaNotWorking": "./node_modules/.bin/ts-json-schema-generator -f tsconfig.json -p src/SubredditConfigData.ts -t JSONConfig --out src/Schema/vegaSchema.json",
|
||||
"circular": "madge --circular --extensions ts src/index.ts",
|
||||
"circular-graph": "madge --image graph.svg --circular --extensions ts src/index.ts",
|
||||
"postinstall": "patch-package",
|
||||
@@ -29,6 +32,15 @@
|
||||
"dependencies": {
|
||||
"@awaitjs/express": "^0.8.0",
|
||||
"@googleapis/youtube": "^2.0.0",
|
||||
"@influxdata/influxdb-client": "^1.27.0",
|
||||
"@influxdata/influxdb-client-apis": "^1.27.0",
|
||||
"@nlpjs/core": "^4.23.4",
|
||||
"@nlpjs/lang-de": "^4.23.4",
|
||||
"@nlpjs/lang-en": "^4.23.4",
|
||||
"@nlpjs/lang-es": "^4.23.4",
|
||||
"@nlpjs/lang-fr": "^4.23.4",
|
||||
"@nlpjs/language": "^4.22.7",
|
||||
"@nlpjs/nlp": "^4.23.5",
|
||||
"@stdlib/regexp-regexp": "^0.0.6",
|
||||
"ajv": "^7.2.4",
|
||||
"ansi-regex": ">=5.0.1",
|
||||
@@ -39,11 +51,10 @@
|
||||
"cache-manager-redis-store": "^2.0.0",
|
||||
"commander": "^8.0.0",
|
||||
"comment-json": "^4.1.1",
|
||||
"connect-typeorm": "github:FoxxMD/connect-typeorm#typeormBump",
|
||||
"connect-typeorm": "^2.0.0",
|
||||
"cookie-parser": "^1.3.5",
|
||||
"dayjs": "^1.10.5",
|
||||
"deepmerge": "^4.2.2",
|
||||
"delimiter-stream": "^3.0.1",
|
||||
"ejs": "^3.1.6",
|
||||
"env-cmd": "^10.1.0",
|
||||
"es6-error": "^4.1.1",
|
||||
@@ -52,16 +63,15 @@
|
||||
"express-session-cache-manager": "^1.0.2",
|
||||
"express-socket.io-session": "^1.3.5",
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
"fuse.js": "^6.4.6",
|
||||
"globrex": "^0.1.2",
|
||||
"got": "^11.8.2",
|
||||
"he": "^1.2.0",
|
||||
"http-proxy": "^1.18.1",
|
||||
"image-size": "^1.0.0",
|
||||
"json5": "^2.2.0",
|
||||
"jsonwebtoken": "^8.5.1",
|
||||
"leven": "^3.1.0",
|
||||
"lodash": "^4.17.21",
|
||||
"logform": "^2.4.0",
|
||||
"lru-cache": "^6.0.0",
|
||||
"migrate": "github:johsunds/node-migrate#49b0054de0a9295857aa8b8eea9a3cdeb2643913",
|
||||
"mustache": "^4.2.0",
|
||||
@@ -77,9 +87,7 @@
|
||||
"patch-package": "^6.4.7",
|
||||
"pixelmatch": "^5.2.1",
|
||||
"pony-cause": "^1.1.1",
|
||||
"pretty-print-json": "^1.0.3",
|
||||
"reflect-metadata": "^0.1.13",
|
||||
"safe-stable-stringify": "^1.1.1",
|
||||
"snoostorm": "^1.5.2",
|
||||
"snoowrap": "^1.23.0",
|
||||
"socket.io": "^4.1.3",
|
||||
@@ -87,10 +95,12 @@
|
||||
"string-similarity": "^4.0.4",
|
||||
"tcp-port-used": "^1.0.2",
|
||||
"triple-beam": "^1.3.0",
|
||||
"typeorm": "^0.3.4",
|
||||
"typeorm": "^0.3.7",
|
||||
"typeorm-logger-adaptor": "^1.1.0",
|
||||
"typescript": "^4.3.4",
|
||||
"unique-names-generator": "^4.7.1",
|
||||
"vader-sentiment": "^1.1.3",
|
||||
"webhook-discord": "^3.7.7",
|
||||
"wink-sentiment": "^5.0.2",
|
||||
"winston": "github:FoxxMD/winston#fbab8de969ecee578981c77846156c7f43b5f01e",
|
||||
"winston-daily-rotate-file": "^4.5.5",
|
||||
"winston-duplex": "^0.1.1",
|
||||
@@ -106,6 +116,7 @@
|
||||
"@types/cache-manager": "^3.4.2",
|
||||
"@types/cache-manager-redis-store": "^2.0.0",
|
||||
"@types/chai": "^4.3.0",
|
||||
"@types/chai-as-promised": "^7.1.5",
|
||||
"@types/cookie-parser": "^1.4.2",
|
||||
"@types/express": "^4.17.13",
|
||||
"@types/express-session": "^1.17.4",
|
||||
@@ -130,16 +141,19 @@
|
||||
"@types/string-similarity": "^4.0.0",
|
||||
"@types/tcp-port-used": "^1.0.0",
|
||||
"@types/triple-beam": "^1.3.2",
|
||||
"browserify": "^17.0.0",
|
||||
"chai": "^4.3.6",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
"mocha": "^9.2.1",
|
||||
"nyc": "^15.1.0",
|
||||
"source-map-support": "^0.5.21",
|
||||
"terser": "^5.13.1",
|
||||
"ts-essentials": "^9.1.2",
|
||||
"ts-json-schema-generator": "^0.93.0",
|
||||
"ts-mockito": "^2.6.1",
|
||||
"ts-node": "^10.7.0",
|
||||
"tsconfig-paths": "^3.13.0",
|
||||
"typescript": "^4.3.4",
|
||||
"typescript": "^4.6.4",
|
||||
"typescript-json-schema": "~0.53"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
|
||||
@@ -17,6 +17,7 @@ import {DispatchAction, DispatchActionJson} from "./DispatchAction";
|
||||
import {CancelDispatchAction, CancelDispatchActionJson} from "./CancelDispatchAction";
|
||||
import ContributorAction, {ContributorActionJson} from "./ContributorAction";
|
||||
import {StructuredFilter} from "../Common/Infrastructure/Filters/FilterShapes";
|
||||
import {ModNoteAction, ModNoteActionJson} from "./ModNoteAction";
|
||||
|
||||
export function actionFactory
|
||||
(config: StructuredActionJson, logger: Logger, subredditName: string, resources: SubredditResources, client: ExtendedSnoowrap, emitter: EventEmitter): Action {
|
||||
@@ -47,6 +48,8 @@ export function actionFactory
|
||||
return new CancelDispatchAction({...config as StructuredFilter<CancelDispatchActionJson>, logger, subredditName, resources, client, emitter})
|
||||
case 'contributor':
|
||||
return new ContributorAction({...config as StructuredFilter<ContributorActionJson>, logger, subredditName, resources, client, emitter})
|
||||
case 'modnote':
|
||||
return new ModNoteAction({...config as StructuredFilter<ModNoteActionJson>, logger, subredditName, resources, client, emitter})
|
||||
default:
|
||||
throw new Error('rule "kind" was not recognized.');
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import Comment from "snoowrap/dist/objects/Comment";
|
||||
import {RuleResultEntity} from "../Common/Entities/RuleResultEntity";
|
||||
import {runCheckOptions} from "../Subreddit/Manager";
|
||||
import {ActionTarget, ActionTypes} from "../Common/Infrastructure/Atomic";
|
||||
import {asComment, asSubmission} from "../util";
|
||||
|
||||
export class ApproveAction extends Action {
|
||||
|
||||
@@ -29,22 +30,24 @@ export class ApproveAction extends Action {
|
||||
const dryRun = this.getRuntimeAwareDryrun(options);
|
||||
const touchedEntities = [];
|
||||
|
||||
const realTargets = item instanceof Submission ? ['self'] : this.targets;
|
||||
const realTargets = asSubmission(item) ? ['self'] : this.targets;
|
||||
|
||||
let msg: string[] = [];
|
||||
|
||||
for(const target of realTargets) {
|
||||
let targetItem = item;
|
||||
if(target !== 'self' && item instanceof Comment) {
|
||||
if(target !== 'self' && asComment(item)) {
|
||||
targetItem = await this.resources.getActivity(this.client.getSubmission(item.link_id));
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
if (targetItem.approved) {
|
||||
const msg = `${target === 'self' ? 'Item' : 'Comment\'s parent Submission'} is already approved`;
|
||||
msg.push(`${target === 'self' ? 'Item' : 'Comment\'s parent Submission'} is already approved??`);
|
||||
this.logger.warn(msg);
|
||||
return {
|
||||
dryRun,
|
||||
success: false,
|
||||
result: msg
|
||||
result: msg.join('|')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,6 +56,9 @@ export class ApproveAction extends Action {
|
||||
if(target !== 'self' && !(targetItem instanceof Submission)) {
|
||||
// @ts-ignore
|
||||
targetItem = await this.client.getSubmission((item as Comment).link_id).fetch();
|
||||
msg.push(`Approving parent Submission ${targetItem.name}`);
|
||||
} else {
|
||||
msg.push(`Approving self ${targetItem.name}`);
|
||||
}
|
||||
// @ts-ignore
|
||||
touchedEntities.push(await targetItem.approve());
|
||||
@@ -70,6 +76,7 @@ export class ApproveAction extends Action {
|
||||
}
|
||||
|
||||
return {
|
||||
result: msg.join(' | '),
|
||||
dryRun,
|
||||
success: true,
|
||||
touchedEntities
|
||||
|
||||
@@ -6,6 +6,10 @@ import {ActionProcessResult, Footer, RuleResult} from "../Common/interfaces";
|
||||
import {RuleResultEntity} from "../Common/Entities/RuleResultEntity";
|
||||
import {runCheckOptions} from "../Subreddit/Manager";
|
||||
import {ActionTypes} from "../Common/Infrastructure/Atomic";
|
||||
import {truncateStringToLength} from "../util";
|
||||
|
||||
const truncate = truncateStringToLength(100);
|
||||
const truncateLongMessage = truncateStringToLength(200);
|
||||
|
||||
export class BanAction extends Action {
|
||||
|
||||
@@ -37,15 +41,17 @@ export class BanAction extends Action {
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResultEntity[], options: runCheckOptions): Promise<ActionProcessResult> {
|
||||
const dryRun = this.getRuntimeAwareDryrun(options);
|
||||
const content = this.message === undefined ? undefined : await this.resources.getContent(this.message, item.subreddit);
|
||||
const renderedBody = content === undefined ? undefined : await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
const renderedBody = this.message === undefined ? undefined : await this.resources.renderContent(this.message, item, ruleResults);
|
||||
const renderedContent = renderedBody === undefined ? undefined : `${renderedBody}${await this.resources.generateFooter(item, this.footer)}`;
|
||||
|
||||
const renderedReason = this.reason === undefined ? undefined : truncate(await this.resources.renderContent(this.reason, item, ruleResults));
|
||||
const renderedNote = this.note === undefined ? undefined : truncate(await this.resources.renderContent(this.note, item, ruleResults));
|
||||
|
||||
const touchedEntities = [];
|
||||
let banPieces = [];
|
||||
banPieces.push(`Message: ${renderedContent === undefined ? 'None' : `${renderedContent.length > 100 ? `\r\n${renderedContent}` : renderedContent}`}`);
|
||||
banPieces.push(`Reason: ${this.reason || 'None'}`);
|
||||
banPieces.push(`Note: ${this.note || 'None'}`);
|
||||
banPieces.push(`Message: ${renderedContent === undefined ? 'None' : `${renderedContent.length > 100 ? `\r\n${truncateLongMessage(renderedContent)}` : renderedContent}`}`);
|
||||
banPieces.push(`Reason: ${renderedReason || 'None'}`);
|
||||
banPieces.push(`Note: ${renderedNote || 'None'}`);
|
||||
const durText = this.duration === undefined ? 'permanently' : `for ${this.duration} days`;
|
||||
this.logger.info(`Banning ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`);
|
||||
this.logger.verbose(`\r\n${banPieces.join('\r\n')}`);
|
||||
@@ -56,8 +62,8 @@ export class BanAction extends Action {
|
||||
const bannedUser = await fetchedSub.banUser({
|
||||
name: fetchedName,
|
||||
banMessage: renderedContent === undefined ? undefined : renderedContent,
|
||||
banReason: this.reason,
|
||||
banNote: this.note,
|
||||
banReason: renderedReason,
|
||||
banNote: renderedNote,
|
||||
duration: this.duration
|
||||
});
|
||||
touchedEntities.push(bannedUser);
|
||||
@@ -65,7 +71,7 @@ export class BanAction extends Action {
|
||||
return {
|
||||
dryRun,
|
||||
success: true,
|
||||
result: `Banned ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`,
|
||||
result: `Banned ${item.author.name} ${durText}${renderedReason !== undefined ? ` (${renderedReason})` : ''}`,
|
||||
touchedEntities
|
||||
};
|
||||
}
|
||||
@@ -97,8 +103,10 @@ export interface BanActionConfig extends ActionConfig, Footer {
|
||||
* */
|
||||
message?: string
|
||||
/**
|
||||
* Reason for ban.
|
||||
* @maxLength 100
|
||||
* Reason for ban. Can use Templating.
|
||||
*
|
||||
* If the length expands to more than 100 characters it will truncated with "..."
|
||||
*
|
||||
* @examples ["repeat spam"]
|
||||
* */
|
||||
reason?: string
|
||||
@@ -110,8 +118,10 @@ export interface BanActionConfig extends ActionConfig, Footer {
|
||||
* */
|
||||
duration?: number
|
||||
/**
|
||||
* A mod note for this ban
|
||||
* @maxLength 100
|
||||
* A mod note for this ban. Can use Templating.
|
||||
*
|
||||
* If the length expands to more than 100 characters it will truncated with "..."
|
||||
*
|
||||
* @examples ["Sock puppet for u/AnotherUser"]
|
||||
* */
|
||||
note?: string
|
||||
|
||||
@@ -71,12 +71,7 @@ export class CancelDispatchAction extends Action {
|
||||
} else {
|
||||
matchedDispatchIdentifier = this.identifiers.filter(x => x !== null).includes(x.identifier);
|
||||
}
|
||||
const matched = matchedId && matchedDispatchIdentifier;
|
||||
if(matched && x.processing) {
|
||||
this.logger.debug(`Cannot remove ${isSubmission(x.activity) ? 'Submission' : 'Comment'} ${x.activity.name} because it is currently processing`);
|
||||
return false;
|
||||
}
|
||||
return matched;
|
||||
return matchedId && matchedDispatchIdentifier;
|
||||
});
|
||||
let cancelCrit;
|
||||
if (this.identifiers === undefined) {
|
||||
|
||||
@@ -50,8 +50,9 @@ export class MessageAction extends Action {
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResultEntity[], options: runCheckOptions): Promise<ActionProcessResult> {
|
||||
const dryRun = this.getRuntimeAwareDryrun(options);
|
||||
const content = await this.resources.getContent(this.content);
|
||||
const body = await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
|
||||
const body = await this.resources.renderContent(this.content, item, ruleResults);
|
||||
const subject = this.title === undefined ? `Concerning your ${isSubmission(item) ? 'Submission' : 'Comment'}` : await this.resources.renderContent(this.title, item, ruleResults);
|
||||
|
||||
const footer = await this.resources.generateFooter(item, this.footer);
|
||||
|
||||
@@ -80,7 +81,7 @@ export class MessageAction extends Action {
|
||||
text: renderedContent,
|
||||
// @ts-ignore
|
||||
fromSubreddit: this.asSubreddit ? await item.subreddit.fetch() : undefined,
|
||||
subject: this.title || `Concerning your ${isSubmission(item) ? 'Submission' : 'Comment'}`,
|
||||
subject: subject,
|
||||
};
|
||||
|
||||
const msgPreview = `\r\n
|
||||
|
||||
108
src/Action/ModNoteAction.ts
Normal file
108
src/Action/ModNoteAction.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
import {ActionJson, ActionConfig, ActionOptions} from "./index";
|
||||
import Action from "./index";
|
||||
import {Comment} from "snoowrap";
|
||||
import {renderContent} from "../Utils/SnoowrapUtils";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {ActionProcessResult, RichContent} from "../Common/interfaces";
|
||||
import {toModNoteLabel} from "../util";
|
||||
import {RuleResultEntity} from "../Common/Entities/RuleResultEntity";
|
||||
import {runCheckOptions} from "../Subreddit/Manager";
|
||||
import {ActionTypes, ModUserNoteLabel} from "../Common/Infrastructure/Atomic";
|
||||
import {ModNote} from "../Subreddit/ModNotes/ModNote";
|
||||
|
||||
|
||||
export class ModNoteAction extends Action {
|
||||
content: string;
|
||||
type?: string;
|
||||
allowDuplicate: boolean;
|
||||
referenceActivity: boolean
|
||||
|
||||
constructor(options: ModNoteActionOptions) {
|
||||
super(options);
|
||||
const {type, content = '', allowDuplicate = false, referenceActivity = true} = options;
|
||||
this.type = type;
|
||||
this.content = content;
|
||||
this.allowDuplicate = allowDuplicate;
|
||||
this.referenceActivity = referenceActivity;
|
||||
}
|
||||
|
||||
getKind(): ActionTypes {
|
||||
return 'modnote';
|
||||
}
|
||||
|
||||
protected getSpecificPremise(): object {
|
||||
return {
|
||||
content: this.content,
|
||||
type: this.type,
|
||||
allowDuplicate: this.allowDuplicate,
|
||||
referenceActivity: this.referenceActivity,
|
||||
}
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResultEntity[], options: runCheckOptions): Promise<ActionProcessResult> {
|
||||
const dryRun = this.getRuntimeAwareDryrun(options);
|
||||
|
||||
const modLabel = this.type !== undefined ? toModNoteLabel(this.type) : undefined;
|
||||
|
||||
const content = await this.resources.getContent(this.content, item.subreddit);
|
||||
const renderedContent = await renderContent(content, item, ruleResults, this.resources.userNotes);
|
||||
this.logger.verbose(`Note:\r\n(${this.type}) ${renderedContent}`);
|
||||
|
||||
// TODO see what changes are made for bulk fetch of notes before implementing this
|
||||
// https://www.reddit.com/r/redditdev/comments/t8w861/new_mod_notes_api/
|
||||
// if (!this.allowDuplicate) {
|
||||
// const notes = await this.resources.userNotes.getUserNotes(item.author);
|
||||
// let existingNote = notes.find((x) => x.link !== null && x.link.includes(item.id));
|
||||
// if(existingNote === undefined && notes.length > 0) {
|
||||
// const lastNote = notes[notes.length - 1];
|
||||
// // possibly notes don't have a reference link so check if last one has same text
|
||||
// if(lastNote.link === null && lastNote.text === renderedContent) {
|
||||
// existingNote = lastNote;
|
||||
// }
|
||||
// }
|
||||
// if (existingNote !== undefined && existingNote.noteType === this.type) {
|
||||
// this.logger.info(`Will not add note because one already exists for this Activity (${existingNote.time.local().format()}) and allowDuplicate=false`);
|
||||
// return {
|
||||
// dryRun,
|
||||
// success: false,
|
||||
// result: `Will not add note because one already exists for this Activity (${existingNote.time.local().format()}) and allowDuplicate=false`
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
if (!dryRun) {
|
||||
await this.resources.addModNote({
|
||||
label: modLabel,
|
||||
note: renderedContent,
|
||||
activity: this.referenceActivity ? item : undefined,
|
||||
subreddit: this.resources.subreddit,
|
||||
user: item.author
|
||||
});
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
dryRun,
|
||||
result: `${modLabel !== undefined ? `(${modLabel})` : ''} ${renderedContent}`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export interface ModNoteActionConfig extends ActionConfig, RichContent {
|
||||
/**
|
||||
* Add Note even if a Note already exists for this Activity
|
||||
* @examples [false]
|
||||
* @default false
|
||||
* */
|
||||
allowDuplicate?: boolean,
|
||||
type?: ModUserNoteLabel
|
||||
referenceActivity?: boolean
|
||||
}
|
||||
|
||||
export interface ModNoteActionOptions extends Omit<ModNoteActionConfig, 'authorIs' | 'itemIs'>, ActionOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a Toolbox User Note to the Author of this Activity
|
||||
* */
|
||||
export interface ModNoteActionJson extends ModNoteActionConfig, ActionJson {
|
||||
kind: 'modnote'
|
||||
}
|
||||
@@ -4,13 +4,16 @@ import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {activityIsRemoved} from "../Utils/SnoowrapUtils";
|
||||
import {ActionProcessResult, RuleResult} from "../Common/interfaces";
|
||||
import dayjs from "dayjs";
|
||||
import {isSubmission} from "../util";
|
||||
import {isSubmission, truncateStringToLength} from "../util";
|
||||
import {RuleResultEntity} from "../Common/Entities/RuleResultEntity";
|
||||
import {runCheckOptions} from "../Subreddit/Manager";
|
||||
import {ActionTypes} from "../Common/Infrastructure/Atomic";
|
||||
|
||||
const truncate = truncateStringToLength(100);
|
||||
export class RemoveAction extends Action {
|
||||
spam: boolean;
|
||||
note?: string;
|
||||
reasonId?: string;
|
||||
|
||||
getKind(): ActionTypes {
|
||||
return 'remove';
|
||||
@@ -20,21 +23,54 @@ export class RemoveAction extends Action {
|
||||
super(options);
|
||||
const {
|
||||
spam = false,
|
||||
note,
|
||||
reasonId,
|
||||
} = options;
|
||||
this.spam = spam;
|
||||
this.note = note;
|
||||
this.reasonId = reasonId;
|
||||
}
|
||||
|
||||
async process(item: Comment | Submission, ruleResults: RuleResultEntity[], options: runCheckOptions): Promise<ActionProcessResult> {
|
||||
const dryRun = this.getRuntimeAwareDryrun(options);
|
||||
const touchedEntities = [];
|
||||
let removeSummary = [];
|
||||
// issue with snoowrap typings, doesn't think prop exists on Submission
|
||||
// @ts-ignore
|
||||
if (activityIsRemoved(item)) {
|
||||
this.logger.warn('It looks like this Item is already removed!');
|
||||
}
|
||||
if (this.spam) {
|
||||
removeSummary.push('Marked as SPAM');
|
||||
this.logger.verbose('Marking as spam on removal');
|
||||
}
|
||||
const renderedNote = this.note === undefined ? undefined : await this.resources.renderContent(this.note, item, ruleResults);
|
||||
let foundReasonId: string | undefined;
|
||||
let foundReason: string | undefined;
|
||||
|
||||
if(this.reasonId !== undefined) {
|
||||
const reason = await this.resources.getSubredditRemovalReasonById(this.reasonId);
|
||||
if(reason === undefined) {
|
||||
const reasonWarn = [`Could not find any Removal Reason with the ID ${this.reasonId}!`];
|
||||
if(renderedNote === undefined) {
|
||||
reasonWarn.push('Cannot add any Removal Reason because note is also empty!');
|
||||
} else {
|
||||
reasonWarn.push('Will add Removal Reason but only with note.');
|
||||
}
|
||||
this.logger.warn(reasonWarn.join(''));
|
||||
} else {
|
||||
foundReason = truncate(reason.title);
|
||||
foundReasonId = reason.id;
|
||||
removeSummary.push(`Reason: ${truncate(foundReason)} (${foundReasonId})`);
|
||||
}
|
||||
}
|
||||
|
||||
if(renderedNote !== undefined) {
|
||||
removeSummary.push(`Note: ${truncate(renderedNote)}`);
|
||||
}
|
||||
|
||||
this.logger.verbose(removeSummary.join(' | '));
|
||||
|
||||
if (!dryRun) {
|
||||
// @ts-ignore
|
||||
await item.remove({spam: this.spam});
|
||||
@@ -44,6 +80,18 @@ export class RemoveAction extends Action {
|
||||
// @ts-ignore
|
||||
item.removed = true;
|
||||
}
|
||||
|
||||
if(foundReasonId !== undefined || renderedNote !== undefined) {
|
||||
await this.client.addRemovalReason(item, renderedNote, foundReasonId);
|
||||
item.mod_reason_by = this.resources.botAccount as string;
|
||||
if(renderedNote !== undefined) {
|
||||
item.removal_reason = renderedNote;
|
||||
}
|
||||
if(foundReason !== undefined) {
|
||||
item.mod_reason_title = foundReason;
|
||||
}
|
||||
}
|
||||
|
||||
await this.resources.resetCacheForItem(item);
|
||||
touchedEntities.push(item);
|
||||
}
|
||||
@@ -66,7 +114,22 @@ export interface RemoveOptions extends Omit<RemoveActionConfig, 'authorIs' | 'it
|
||||
}
|
||||
|
||||
export interface RemoveActionConfig extends ActionConfig {
|
||||
/** (Optional) Mark Activity as spam */
|
||||
spam?: boolean
|
||||
/** (Optional) A mod-readable note added to the removal reason for this Activity. Can use Templating.
|
||||
*
|
||||
* This note (and removal reasons) are only visible on New Reddit
|
||||
* */
|
||||
note?: string
|
||||
/** (Optional) The ID of the Removal Reason to use
|
||||
*
|
||||
* Removal reasons are only visible on New Reddit
|
||||
*
|
||||
* To find IDs for removal reasons check the "Removal Reasons" popup located in the CM dashboard config editor for your subreddit
|
||||
*
|
||||
* More info on Removal Reasons: https://mods.reddithelp.com/hc/en-us/articles/360010094892-Removal-Reasons
|
||||
* */
|
||||
reasonId?: string
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -7,12 +7,12 @@ import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {ActionProcessResult, RuleResult} from "../Common/interfaces";
|
||||
import {RuleResultEntity} from "../Common/Entities/RuleResultEntity";
|
||||
import {runCheckOptions} from "../Subreddit/Manager";
|
||||
import {ActionTypes} from "../Common/Infrastructure/Atomic";
|
||||
import {ActionTypes, UserNoteType} from "../Common/Infrastructure/Atomic";
|
||||
|
||||
|
||||
export class UserNoteAction extends Action {
|
||||
content: string;
|
||||
type: string;
|
||||
type: UserNoteType;
|
||||
allowDuplicate: boolean;
|
||||
|
||||
constructor(options: UserNoteActionOptions) {
|
||||
|
||||
68
src/App.ts
68
src/App.ts
@@ -4,24 +4,27 @@ import {getLogger} from "./Utils/loggerFactory";
|
||||
import {DatabaseMigrationOptions, OperatorConfig, OperatorConfigWithFileContext, OperatorFileConfig} from "./Common/interfaces";
|
||||
import Bot from "./Bot";
|
||||
import LoggedError from "./Utils/LoggedError";
|
||||
import {mergeArr, sleep} from "./util";
|
||||
import {copyFile} from "fs/promises";
|
||||
import {generateRandomName, mergeArr, sleep} from "./util";
|
||||
import {copyFile, open} from "fs/promises";
|
||||
import {constants} from "fs";
|
||||
import {Connection} from "typeorm";
|
||||
import {Connection, DataSource, Repository} from "typeorm";
|
||||
import {ErrorWithCause} from "pony-cause";
|
||||
import {MigrationService} from "./Common/MigrationService";
|
||||
import {Invokee} from "./Common/Infrastructure/Atomic";
|
||||
import {DatabaseConfig} from "./Common/Infrastructure/Database";
|
||||
import {InviteData} from "./Web/Common/interfaces";
|
||||
import {BotInvite} from "./Common/Entities/BotInvite";
|
||||
|
||||
export class App {
|
||||
|
||||
bots: Bot[] = [];
|
||||
logger: Logger;
|
||||
dbLogger: Logger;
|
||||
database: Connection
|
||||
database: DataSource
|
||||
startedAt: Dayjs = dayjs();
|
||||
ranMigrations: boolean = false;
|
||||
migrationBlocker?: string;
|
||||
friendly?: string;
|
||||
|
||||
config: OperatorConfig;
|
||||
|
||||
@@ -30,6 +33,7 @@ export class App {
|
||||
fileConfig: OperatorFileConfig;
|
||||
|
||||
migrationService: MigrationService;
|
||||
inviteRepo: Repository<BotInvite>;
|
||||
|
||||
constructor(config: OperatorConfigWithFileContext) {
|
||||
const {
|
||||
@@ -49,6 +53,8 @@ export class App {
|
||||
this.logger = getLogger(config.logging);
|
||||
this.dbLogger = this.logger.child({labels: ['Database']}, mergeArr);
|
||||
this.database = database;
|
||||
this.inviteRepo = this.database.getRepository(BotInvite);
|
||||
this.friendly = this.config.api.friendly;
|
||||
|
||||
this.logger.info(`Operators: ${name.length === 0 ? 'None Specified' : name.join(', ')}`)
|
||||
|
||||
@@ -114,6 +120,8 @@ export class App {
|
||||
return;
|
||||
}
|
||||
|
||||
await this.checkFriendlyName();
|
||||
|
||||
if(this.bots.length > 0) {
|
||||
this.logger.info('Bots already exist, will stop and destroy these before building new ones.');
|
||||
await this.destroy(causedBy);
|
||||
@@ -133,7 +141,7 @@ export class App {
|
||||
for (const b of this.bots) {
|
||||
if (b.error === undefined) {
|
||||
try {
|
||||
await b.testClient();
|
||||
await b.init();
|
||||
await b.buildManagers();
|
||||
await sleep(2000);
|
||||
b.runManagers(causedBy).catch((err) => {
|
||||
@@ -161,4 +169,54 @@ export class App {
|
||||
await b.destroy(causedBy);
|
||||
}
|
||||
}
|
||||
|
||||
async checkFriendlyName() {
|
||||
if(this.friendly === undefined) {
|
||||
let randFriendly: string = generateRandomName();
|
||||
this.logger.verbose(`No friendly name set for Server. Generated: ${randFriendly}`);
|
||||
|
||||
const exists = async (name: string) => {
|
||||
const existing = await this.inviteRepo.findBy({instance: name});
|
||||
return existing.length > 0;
|
||||
}
|
||||
while (await exists(randFriendly)) {
|
||||
let oldFriendly = randFriendly;
|
||||
randFriendly = generateRandomName();
|
||||
this.logger.verbose(`${oldFriendly} already exists! Generated: ${randFriendly}`);
|
||||
}
|
||||
|
||||
this.friendly = randFriendly;
|
||||
this.fileConfig.document.setFriendlyName(this.friendly);
|
||||
|
||||
const handle = await open(this.fileConfig.document.location as string, 'w');
|
||||
await handle.writeFile(this.fileConfig.document.toString());
|
||||
await handle.close();
|
||||
this.logger.verbose(`Wrote ${randFriendly} as friendly server name to config.`);
|
||||
}
|
||||
}
|
||||
|
||||
async getInviteById(id: string): Promise<BotInvite | undefined> {
|
||||
const invite = await this.inviteRepo.findOne({where: {id, instance: this.friendly}});
|
||||
if(invite === null) {
|
||||
return undefined;
|
||||
}
|
||||
return invite;
|
||||
}
|
||||
|
||||
async getInviteIds(): Promise<string[]> {
|
||||
if(!this.ranMigrations) {
|
||||
// not ready!
|
||||
return [];
|
||||
}
|
||||
const invites = await this.inviteRepo.findBy({instance: this.friendly});
|
||||
return invites.map(x => x.id);
|
||||
}
|
||||
|
||||
async addInvite(data: InviteData): Promise<InviteData> {
|
||||
return await this.inviteRepo.save(new BotInvite(data));
|
||||
}
|
||||
|
||||
async deleteInvite(id: string): Promise<void> {
|
||||
await this.inviteRepo.delete({ id });
|
||||
}
|
||||
}
|
||||
|
||||
457
src/Bot/index.ts
457
src/Bot/index.ts
@@ -13,13 +13,13 @@ import {
|
||||
USER
|
||||
} from "../Common/interfaces";
|
||||
import {
|
||||
createRetryHandler, difference,
|
||||
createRetryHandler, symmetricalDifference,
|
||||
formatNumber, getExceptionMessage, getUserAgent,
|
||||
mergeArr,
|
||||
parseBool,
|
||||
parseDuration, parseMatchMessage, parseRedditEntity,
|
||||
parseSubredditName, RetryOptions,
|
||||
sleep
|
||||
parseSubredditName, partition, RetryOptions,
|
||||
sleep, intersect
|
||||
} from "../util";
|
||||
import {Manager} from "../Subreddit/Manager";
|
||||
import {ExtendedSnoowrap, ProxiedSnoowrap} from "../Utils/SnoowrapClients";
|
||||
@@ -41,14 +41,21 @@ import {ManagerRunState} from "../Common/Entities/EntityRunState/ManagerRunState
|
||||
import {Invokee, PollOn} from "../Common/Infrastructure/Atomic";
|
||||
import {FilterCriteriaDefaults} from "../Common/Infrastructure/Filters/FilterShapes";
|
||||
import {snooLogWrapper} from "../Utils/loggerFactory";
|
||||
import {InfluxClient} from "../Common/Influx/InfluxClient";
|
||||
import {Point} from "@influxdata/influxdb-client";
|
||||
import {BotInstanceFunctions, NormalizedManagerResponse} from "../Web/Common/interfaces";
|
||||
import {AuthorEntity} from "../Common/Entities/AuthorEntity";
|
||||
import {Guest, GuestEntityData} from "../Common/Entities/Guest/GuestInterfaces";
|
||||
import {guestEntitiesToAll, guestEntityToApiGuest} from "../Common/Entities/Guest/GuestEntity";
|
||||
|
||||
class Bot {
|
||||
class Bot implements BotInstanceFunctions {
|
||||
|
||||
client!: ExtendedSnoowrap;
|
||||
logger!: Logger;
|
||||
logs: LogInfo[] = [];
|
||||
wikiLocation: string;
|
||||
dryRun?: true | undefined;
|
||||
inited: boolean = false;
|
||||
running: boolean = false;
|
||||
subreddits: string[];
|
||||
excludeSubreddits: string[];
|
||||
@@ -71,6 +78,7 @@ class Bot {
|
||||
botName?: string;
|
||||
botLink?: string;
|
||||
botAccount?: string;
|
||||
botUser?: RedditUser;
|
||||
maxWorkers: number;
|
||||
startedAt: Dayjs = dayjs();
|
||||
sharedStreams: PollOn[] = [];
|
||||
@@ -90,9 +98,13 @@ class Bot {
|
||||
|
||||
config: BotInstanceConfig;
|
||||
|
||||
influxClients: InfluxClient[] = [];
|
||||
|
||||
database: DataSource
|
||||
invokeeRepo: Repository<InvokeeType>;
|
||||
runTypeRepo: Repository<RunStateType>;
|
||||
managerRepo: Repository<ManagerEntity>;
|
||||
authorRepo: Repository<AuthorEntity>;
|
||||
botEntity!: BotEntity
|
||||
|
||||
getBotName = () => {
|
||||
@@ -154,6 +166,8 @@ class Bot {
|
||||
this.database = database;
|
||||
this.invokeeRepo = this.database.getRepository(InvokeeType);
|
||||
this.runTypeRepo = this.database.getRepository(RunStateType);
|
||||
this.managerRepo = this.database.getRepository(ManagerEntity);
|
||||
this.authorRepo = this.database.getRepository(AuthorEntity);
|
||||
this.config = config;
|
||||
this.dryRun = parseBool(dryRun) === true ? true : undefined;
|
||||
this.softLimit = softLimit;
|
||||
@@ -177,8 +191,11 @@ class Bot {
|
||||
|
||||
this.logger.stream().on('log', (log: LogInfo) => {
|
||||
if(log.bot !== undefined && log.bot === this.getBotName() && log.subreddit === undefined) {
|
||||
const combinedLogs = [log, ...this.logs];
|
||||
this.logs = combinedLogs.slice(0, 301);
|
||||
this.logs.unshift(log);
|
||||
if(this.logs.length > 300) {
|
||||
// remove all elements starting from the 300th index (301st item)
|
||||
this.logs.splice(300);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -311,33 +328,22 @@ class Bot {
|
||||
}
|
||||
}
|
||||
|
||||
async testClient(initial = true) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
await this.client.getMe();
|
||||
this.logger.info('Test API call successful');
|
||||
} catch (err: any) {
|
||||
if (initial) {
|
||||
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
|
||||
}
|
||||
const hint = getExceptionMessage(err, {
|
||||
401: 'Likely a credential is missing or incorrect. Check clientId, clientSecret, refreshToken, and accessToken',
|
||||
400: 'Credentials may have been invalidated manually or by reddit due to behavior',
|
||||
});
|
||||
let msg = `Error occurred while testing Reddit API client${hint !== undefined ? `: ${hint}` : ''}`;
|
||||
this.error = msg;
|
||||
const clientError = new CMError(msg, {cause: err});
|
||||
clientError.logged = true;
|
||||
this.logger.error(clientError);
|
||||
throw clientError;
|
||||
}
|
||||
}
|
||||
async init() {
|
||||
|
||||
if(this.inited) {
|
||||
return;
|
||||
}
|
||||
|
||||
let user: RedditUser;
|
||||
try {
|
||||
user = await this.testClient();
|
||||
} catch(err: any) {
|
||||
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the Bot from running.');
|
||||
throw err;
|
||||
}
|
||||
|
||||
async buildManagers(subreddits: string[] = []) {
|
||||
let availSubs = [];
|
||||
// @ts-ignore
|
||||
const user = await this.client.getMe().fetch();
|
||||
this.cacheManager.botName = user.name;
|
||||
this.botUser = user;
|
||||
this.botLink = `https://reddit.com/user/${user.name}`;
|
||||
this.botAccount = `u/${user.name}`;
|
||||
this.logger.info(`Reddit API Limit Remaining: ${this.client.ratelimitRemaining}`);
|
||||
@@ -364,35 +370,78 @@ class Bot {
|
||||
this.botEntity = b;
|
||||
}
|
||||
|
||||
if(this.config.opInflux !== undefined) {
|
||||
this.influxClients.push(this.config.opInflux.childClient(this.logger, {bot: user.name}));
|
||||
if(this.config.influxConfig !== undefined) {
|
||||
const iClient = new InfluxClient(this.config.influxConfig, this.logger, {bot: user.name});
|
||||
await iClient.isReady();
|
||||
this.influxClients.push(iClient);
|
||||
}
|
||||
}
|
||||
|
||||
this.inited = true;
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
async testClient(initial = true) {
|
||||
try {
|
||||
// @ts-ignore
|
||||
const user = this.client.getMe().fetch();
|
||||
this.logger.info('Test API call successful');
|
||||
return user;
|
||||
} catch (err: any) {
|
||||
if (initial) {
|
||||
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
|
||||
}
|
||||
const hint = getExceptionMessage(err, {
|
||||
401: 'Likely a credential is missing or incorrect. Check clientId, clientSecret, refreshToken, and accessToken',
|
||||
400: 'Credentials may have been invalidated manually or by reddit due to behavior',
|
||||
});
|
||||
let msg = `Error occurred while testing Reddit API client${hint !== undefined ? `: ${hint}` : ''}`;
|
||||
this.error = msg;
|
||||
const clientError = new CMError(msg, {cause: err});
|
||||
clientError.logged = true;
|
||||
this.logger.error(clientError);
|
||||
throw clientError;
|
||||
}
|
||||
}
|
||||
|
||||
async buildManagers(subreddits: string[] = []) {
|
||||
await this.init();
|
||||
|
||||
this.logger.verbose('Syncing subreddits to moderate with managers...');
|
||||
|
||||
let availSubs: Subreddit[] = [];
|
||||
|
||||
let subListing = await this.client.getModeratedSubreddits({count: 100});
|
||||
while(!subListing.isFinished) {
|
||||
subListing = await subListing.fetchMore({amount: 100});
|
||||
}
|
||||
availSubs = subListing.filter(x => x.display_name !== `u_${user.name}`);
|
||||
availSubs = subListing.filter(x => x.display_name !== `u_${this.botUser?.name}`);
|
||||
|
||||
this.logger.info(`u/${user.name} is a moderator of these subreddits: ${availSubs.map(x => x.display_name_prefixed).join(', ')}`);
|
||||
this.logger.verbose(`${this.botAccount} is a moderator of these subreddits: ${availSubs.map(x => x.display_name_prefixed).join(', ')}`);
|
||||
|
||||
let subsToRun: Subreddit[] = [];
|
||||
const subsToUse = subreddits.length > 0 ? subreddits.map(parseSubredditName) : this.subreddits;
|
||||
if (subsToUse.length > 0) {
|
||||
this.logger.info(`Operator-defined subreddit constraints detected (CLI argument or environmental variable), will try to run on: ${subsToUse.join(', ')}`);
|
||||
for (const sub of subsToUse) {
|
||||
this.logger.info(`Operator-specified subreddit constraints detected, will only use these: ${subsToUse.join(', ')}`);
|
||||
const availSubsCI = availSubs.map(x => x.display_name.toLowerCase());
|
||||
const [foundSubs, notFoundSubs] = partition(subsToUse, (aSub) => availSubsCI.includes(aSub.toLowerCase()));
|
||||
if(notFoundSubs.length > 0) {
|
||||
this.logger.warn(`Will not run some operator-specified subreddits because they are not modded by, or do not have appropriate mod permissions for, this bot: ${notFoundSubs.join(', ')}`);
|
||||
}
|
||||
|
||||
for (const sub of foundSubs) {
|
||||
const asub = availSubs.find(x => x.display_name.toLowerCase() === sub.toLowerCase())
|
||||
if (asub === undefined) {
|
||||
this.logger.warn(`Will not run on ${sub} because is not modded by, or does not have appropriate permissions to mod with, for this client.`);
|
||||
} else {
|
||||
// @ts-ignore
|
||||
const fetchedSub = await asub.fetch();
|
||||
subsToRun.push(fetchedSub);
|
||||
}
|
||||
subsToRun.push(asub as Subreddit);
|
||||
}
|
||||
} else {
|
||||
if(this.excludeSubreddits.length > 0) {
|
||||
this.logger.info(`Will run on all moderated subreddits but own profile and user-defined excluded: ${this.excludeSubreddits.join(', ')}`);
|
||||
this.logger.info(`Will run on all moderated subreddits EXCEPT own profile and operator-defined excluded: ${this.excludeSubreddits.join(', ')}`);
|
||||
const normalExcludes = this.excludeSubreddits.map(x => x.toLowerCase());
|
||||
subsToRun = availSubs.filter(x => !normalExcludes.includes(x.display_name.toLowerCase()));
|
||||
} else {
|
||||
this.logger.info(`No user-defined subreddit constraints detected, will run on all moderated subreddits EXCEPT own profile (${this.botAccount})`);
|
||||
this.logger.info(`No operator-defined subreddit constraints detected, will run on all moderated subreddits EXCEPT own profile (${this.botAccount})`);
|
||||
subsToRun = availSubs;
|
||||
}
|
||||
}
|
||||
@@ -415,30 +464,66 @@ class Bot {
|
||||
return acc;
|
||||
}
|
||||
}, []);
|
||||
const notMatched = difference(normalizedOverrideNames, subsToRunNames);
|
||||
const notMatched = symmetricalDifference(normalizedOverrideNames, subsToRunNames);
|
||||
if(notMatched.length > 0) {
|
||||
this.logger.warn(`There are overrides defined for subreddits the bot is not running. Check your spelling! Overrides not matched: ${notMatched.join(', ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
let subManagersChanged = false;
|
||||
|
||||
// get configs for subs we want to run on and build/validate them
|
||||
const subsToRunNames = subsToRun.map(x => x.display_name.toLowerCase());
|
||||
|
||||
// first stop and remove any managers with subreddits not in subsToRun
|
||||
// -- this covers scenario where bot is running and mods of a subreddit de-mod the bot
|
||||
// -- or where the include/exclude subs list changed from operator (not yet implemented)
|
||||
if(this.subManagers.length > 0) {
|
||||
let index = 0;
|
||||
for(const manager of this.subManagers) {
|
||||
if(!subsToRunNames.includes(manager.subreddit.display_name.toLowerCase())) {
|
||||
subManagersChanged = true;
|
||||
// determine if bot was de-modded
|
||||
const deModded = !availSubs.some(x => x.display_name.toLowerCase() === manager.subreddit.display_name.toLowerCase());
|
||||
this.logger.warn(`Stopping and removing manager for ${manager.subreddit.display_name.toLowerCase()} because it is ${deModded ? 'no longer moderated by this bot' : 'not in the list of subreddits to moderate'}`);
|
||||
await manager.destroy('system', {reason: deModded ? 'No longer moderated by this bot' : 'Subreddit is not in moderated list'});
|
||||
this.subManagers.splice(index, 1);
|
||||
}
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
// then create any managers that don't already exist
|
||||
// -- covers init scenario
|
||||
// -- and in-situ adding subreddits IE bot is modded to a new subreddit while CM is running
|
||||
const subsToInit: string[] = [];
|
||||
for (const sub of subsToRun) {
|
||||
try {
|
||||
this.subManagers.push(await this.createManager(sub));
|
||||
} catch (err: any) {
|
||||
if(!this.subManagers.some(x => x.subreddit.display_name === sub.display_name)) {
|
||||
subManagersChanged = true;
|
||||
this.logger.info(`Manager for ${sub.display_name_prefixed} not found in existing managers. Creating now...`);
|
||||
subsToInit.push(sub.display_name);
|
||||
try {
|
||||
this.subManagers.push(await this.createManager(sub));
|
||||
} catch (err: any) {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
for(const m of this.subManagers) {
|
||||
for(const subName of subsToInit) {
|
||||
try {
|
||||
await this.initManager(m);
|
||||
const m = this.subManagers.find(x => x.subreddit.display_name === subName);
|
||||
await this.initManager(m as Manager);
|
||||
} catch (err: any) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
this.parseSharedStreams();
|
||||
if(!subManagersChanged) {
|
||||
this.logger.verbose('All managers were already synced!');
|
||||
} else {
|
||||
this.parseSharedStreams();
|
||||
}
|
||||
|
||||
return subManagersChanged;
|
||||
}
|
||||
|
||||
parseSharedStreams() {
|
||||
@@ -559,7 +644,7 @@ class Bot {
|
||||
}
|
||||
}
|
||||
|
||||
async createManager(sub: Subreddit): Promise<Manager> {
|
||||
async createManager(subVal: Subreddit): Promise<Manager> {
|
||||
const {
|
||||
flowControlDefaults: {
|
||||
maxGotoDepth: botMaxDefault
|
||||
@@ -570,6 +655,15 @@ class Bot {
|
||||
} = {}
|
||||
} = this.config;
|
||||
|
||||
let sub = subVal;
|
||||
// make sure the subreddit is fully fetched
|
||||
// @ts-ignore
|
||||
if(subVal._hasFetched === false) {
|
||||
// @ts-ignore
|
||||
sub = await subVal.fetch();
|
||||
}
|
||||
|
||||
|
||||
const override = overrides.find(x => {
|
||||
const configName = parseRedditEntity(x.name).name;
|
||||
if(configName !== undefined) {
|
||||
@@ -588,13 +682,12 @@ class Bot {
|
||||
} = {},
|
||||
} = override || {};
|
||||
|
||||
const managerRepo = this.database.getRepository(ManagerEntity);
|
||||
const subRepo = this.database.getRepository(SubredditEntity)
|
||||
let subreddit = await subRepo.findOne({where: {id: sub.name}});
|
||||
if(subreddit === null) {
|
||||
subreddit = await subRepo.save(new SubredditEntity({id: sub.name, name: sub.display_name}))
|
||||
}
|
||||
let managerEntity = await managerRepo.findOne({
|
||||
let managerEntity = await this.managerRepo.findOne({
|
||||
where: {
|
||||
bot: {
|
||||
id: this.botEntity.id
|
||||
@@ -603,12 +696,15 @@ class Bot {
|
||||
id: subreddit.id
|
||||
}
|
||||
},
|
||||
relations: {
|
||||
guests: true
|
||||
}
|
||||
});
|
||||
if(managerEntity === undefined || managerEntity === null) {
|
||||
const invokee = await this.invokeeRepo.findOneBy({name: SYSTEM}) as InvokeeType;
|
||||
const runType = await this.runTypeRepo.findOneBy({name: STOPPED}) as RunStateType;
|
||||
|
||||
managerEntity = await managerRepo.save(new ManagerEntity({
|
||||
managerEntity = await this.managerRepo.save(new ManagerEntity({
|
||||
name: sub.display_name,
|
||||
bot: this.botEntity,
|
||||
subreddit: subreddit as SubredditEntity,
|
||||
@@ -630,6 +726,7 @@ class Bot {
|
||||
managerEntity: managerEntity as ManagerEntity,
|
||||
statDefaults: (statDefaultsFromOverride ?? databaseStatisticsDefaults) as DatabaseStatisticsOperatorConfig,
|
||||
retention,
|
||||
influxClients: this.influxClients,
|
||||
});
|
||||
// all errors from managers will count towards bot-level retry count
|
||||
manager.on('error', async (err) => await this.panicOnRetries(err));
|
||||
@@ -669,21 +766,6 @@ class Bot {
|
||||
await this.client.getSubreddit(name).acceptModeratorInvite();
|
||||
this.logger.info(`Accepted moderator invite for r/${name}!`);
|
||||
await this.cacheManager.deletePendingSubredditInvite(name);
|
||||
// @ts-ignore
|
||||
const sub = await this.client.getSubreddit(name);
|
||||
this.logger.info(`Attempting to add manager for r/${name}`);
|
||||
try {
|
||||
const manager = await this.createManager(sub);
|
||||
this.logger.info(`Starting manager for r/${name}`);
|
||||
this.subManagers.push(manager);
|
||||
await this.initManager(manager);
|
||||
await manager.start('system', {reason: 'Caused by creation due to moderator invite'});
|
||||
await this.runSharedStreams();
|
||||
} catch (err: any) {
|
||||
if (!(err instanceof LoggedError)) {
|
||||
this.logger.error(err);
|
||||
}
|
||||
}
|
||||
} catch (err: any) {
|
||||
if (err.message.includes('NO_INVITE_FOUND')) {
|
||||
this.logger.warn(`No pending moderation invite for r/${name} was found`);
|
||||
@@ -742,9 +824,15 @@ class Bot {
|
||||
async healthLoop() {
|
||||
while (this.running) {
|
||||
await sleep(5000);
|
||||
const time = dayjs().valueOf()
|
||||
await this.apiHealthCheck(time);
|
||||
await this.guestModCleanup();
|
||||
if (!this.running) {
|
||||
break;
|
||||
}
|
||||
for(const m of this.subManagers) {
|
||||
await m.writeHealthMetrics(time);
|
||||
}
|
||||
const now = dayjs();
|
||||
if (now.isSameOrAfter(this.nextNannyCheck)) {
|
||||
try {
|
||||
@@ -757,8 +845,17 @@ class Bot {
|
||||
}
|
||||
if(now.isSameOrAfter(this.nextHeartbeat)) {
|
||||
try {
|
||||
await this.heartbeat();
|
||||
|
||||
// run sanity check to see if there is a service issue
|
||||
try {
|
||||
await this.testClient(false);
|
||||
} catch (err: any) {
|
||||
throw new SimpleError(`Something isn't right! This could be a Reddit API issue (service is down? buggy??) or an issue with the Bot account. Will not run heartbeat operations and will wait until next heartbeat (${dayjs.duration(this.nextHeartbeat.diff(dayjs())).humanize()}) to try again`);
|
||||
}
|
||||
|
||||
await this.checkModInvites();
|
||||
await this.buildManagers();
|
||||
await this.heartbeat();
|
||||
} catch (err: any) {
|
||||
this.logger.error(`Error occurred during heartbeat check: ${err.message}`);
|
||||
}
|
||||
@@ -770,6 +867,73 @@ class Bot {
|
||||
this.emitter.emit('healthStopped');
|
||||
}
|
||||
|
||||
getApiUsageSummary() {
|
||||
const depletion = this.apiEstDepletion === undefined ? 'Not Calculated' : this.apiEstDepletion.humanize();
|
||||
return`API Usage Rolling Avg: ${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${depletion} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`;
|
||||
}
|
||||
|
||||
async apiHealthCheck(time?: number) {
|
||||
|
||||
const rollingSample = this.apiSample.slice(0, 7)
|
||||
rollingSample.unshift(this.client.ratelimitRemaining);
|
||||
this.apiSample = rollingSample;
|
||||
const diff = this.apiSample.reduceRight((acc: number[], curr, index) => {
|
||||
if (this.apiSample[index + 1] !== undefined) {
|
||||
const d = Math.abs(curr - this.apiSample[index + 1]);
|
||||
if (d === 0) {
|
||||
return [...acc, 0];
|
||||
}
|
||||
return [...acc, d / 10];
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
const diffTotal = diff.reduce((acc, curr) => acc + curr, 0);
|
||||
if(diffTotal === 0 || diff.length === 0) {
|
||||
this.apiRollingAvg = 0;
|
||||
} else {
|
||||
this.apiRollingAvg = diffTotal / diff.length; // api requests per second
|
||||
}
|
||||
this.depletedInSecs = this.apiRollingAvg === 0 ? Number.POSITIVE_INFINITY : this.client.ratelimitRemaining / this.apiRollingAvg; // number of seconds until current remaining limit is 0
|
||||
// if depletion/api usage is 0 we need a sane value to use here for both displaying in logs as well as for api nanny. 10 years seems reasonable
|
||||
this.apiEstDepletion = dayjs.duration((this.depletedInSecs === Number.POSITIVE_INFINITY ? {years: 10} : {seconds: this.depletedInSecs}));
|
||||
|
||||
if(this.influxClients.length > 0) {
|
||||
const apiMeasure = new Point('apiHealth')
|
||||
.intField('remaining', this.client.ratelimitRemaining)
|
||||
.stringField('nannyMod', this.nannyMode ?? 'none');
|
||||
|
||||
if(time !== undefined) {
|
||||
apiMeasure.timestamp(time);
|
||||
}
|
||||
|
||||
if(this.apiSample.length > 1) {
|
||||
const curr = this.apiSample[0];
|
||||
const last = this.apiSample[1];
|
||||
if(curr <= last) {
|
||||
apiMeasure.intField('used', last - curr);
|
||||
}
|
||||
}
|
||||
|
||||
for(const iclient of this.influxClients) {
|
||||
await iclient.writePoint(apiMeasure);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
async guestModCleanup() {
|
||||
const now = dayjs();
|
||||
|
||||
for(const m of this.subManagers) {
|
||||
const expiredGuests = m.managerEntity.getGuests().filter(x => x.expiresAt.isBefore(now));
|
||||
if(expiredGuests.length > 0) {
|
||||
m.managerEntity.removeGuestById(expiredGuests.map(x => x.id));
|
||||
m.logger.info(`Removed expired Guest Mods: ${expiredGuests.map(x => x.author.name).join(', ')}`);
|
||||
await this.managerRepo.save(m.managerEntity);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async retentionCleanup() {
|
||||
const now = dayjs();
|
||||
if(now.isSameOrAfter(this.nextRetentionCheck)) {
|
||||
@@ -783,15 +947,8 @@ class Bot {
|
||||
}
|
||||
|
||||
async heartbeat() {
|
||||
const heartbeat = `HEARTBEAT -- API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ~${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion === undefined ? 'N/A' : this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`
|
||||
this.logger.info(heartbeat);
|
||||
this.logger.info(`HEARTBEAT -- ${this.getApiUsageSummary()}`);
|
||||
|
||||
// run sanity check to see if there is a service issue
|
||||
try {
|
||||
await this.testClient(false);
|
||||
} catch (err: any) {
|
||||
throw new SimpleError(`Something isn't right! This could be a Reddit API issue (service is down? buggy??) or an issue with the Bot account. Will not run heartbeat operations and will wait until next heartbeat (${dayjs.duration(this.nextHeartbeat.diff(dayjs())).humanize()}) to try again`);
|
||||
}
|
||||
let startedAny = false;
|
||||
|
||||
for (const s of this.subManagers) {
|
||||
@@ -844,6 +1001,7 @@ class Bot {
|
||||
|
||||
async runApiNanny() {
|
||||
try {
|
||||
this.logger.debug(this.getApiUsageSummary());
|
||||
this.nextExpiration = dayjs(this.client.ratelimitExpiration);
|
||||
const nowish = dayjs().add(10, 'second');
|
||||
if (nowish.isAfter(this.nextExpiration)) {
|
||||
@@ -867,30 +1025,12 @@ class Bot {
|
||||
}
|
||||
this.nextExpiration = dayjs(this.client.ratelimitExpiration);
|
||||
}
|
||||
const rollingSample = this.apiSample.slice(0, 7)
|
||||
rollingSample.unshift(this.client.ratelimitRemaining);
|
||||
this.apiSample = rollingSample;
|
||||
const diff = this.apiSample.reduceRight((acc: number[], curr, index) => {
|
||||
if (this.apiSample[index + 1] !== undefined) {
|
||||
const d = Math.abs(curr - this.apiSample[index + 1]);
|
||||
if (d === 0) {
|
||||
return [...acc, 0];
|
||||
}
|
||||
return [...acc, d / 10];
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
this.apiRollingAvg = diff.reduce((acc, curr) => acc + curr, 0) / diff.length; // api requests per second
|
||||
this.depletedInSecs = this.client.ratelimitRemaining / this.apiRollingAvg; // number of seconds until current remaining limit is 0
|
||||
this.apiEstDepletion = dayjs.duration({seconds: this.depletedInSecs});
|
||||
this.logger.debug(`API Usage Rolling Avg: ${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`);
|
||||
|
||||
|
||||
let hardLimitHit = false;
|
||||
if (typeof this.hardLimit === 'string') {
|
||||
if (typeof this.hardLimit === 'string' && this.apiEstDepletion !== undefined) {
|
||||
const hardDur = parseDuration(this.hardLimit);
|
||||
hardLimitHit = hardDur.asSeconds() > this.apiEstDepletion.asSeconds();
|
||||
} else {
|
||||
} else if(typeof this.hardLimit === 'number') {
|
||||
hardLimitHit = this.hardLimit > this.client.ratelimitRemaining;
|
||||
}
|
||||
|
||||
@@ -899,7 +1039,6 @@ class Bot {
|
||||
return;
|
||||
}
|
||||
this.logger.info(`Detected HARD LIMIT of ${this.hardLimit} remaining`, {leaf: 'Api Nanny'});
|
||||
this.logger.info(`API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ${this.apiRollingAvg}/s | Est Depletion: ${this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`, {leaf: 'Api Nanny'});
|
||||
this.logger.info(`All subreddit event polling has been paused`, {leaf: 'Api Nanny'});
|
||||
|
||||
for (const m of this.subManagers) {
|
||||
@@ -916,10 +1055,10 @@ class Bot {
|
||||
}
|
||||
|
||||
let softLimitHit = false;
|
||||
if (typeof this.softLimit === 'string') {
|
||||
if (typeof this.softLimit === 'string' && this.apiEstDepletion !== undefined) {
|
||||
const softDur = parseDuration(this.softLimit);
|
||||
softLimitHit = softDur.asSeconds() > this.apiEstDepletion.asSeconds();
|
||||
} else {
|
||||
} else if(typeof this.softLimit === 'number') {
|
||||
softLimitHit = this.softLimit > this.client.ratelimitRemaining;
|
||||
}
|
||||
|
||||
@@ -928,7 +1067,6 @@ class Bot {
|
||||
return;
|
||||
}
|
||||
this.logger.info(`Detected SOFT LIMIT of ${this.softLimit} remaining`, {leaf: 'Api Nanny'});
|
||||
this.logger.info(`API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`, {leaf: 'Api Nanny'});
|
||||
this.logger.info('Trying to detect heavy usage subreddits...', {leaf: 'Api Nanny'});
|
||||
let threshold = 0.5;
|
||||
let offenders = this.subManagers.filter(x => {
|
||||
@@ -986,6 +1124,117 @@ class Bot {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
getManagerNames(): string[] {
|
||||
return this.subManagers.map(x => x.displayLabel);
|
||||
}
|
||||
|
||||
getSubreddits(normalized = true): string[] {
|
||||
return normalized ? this.subManagers.map(x => parseRedditEntity(x.subreddit.display_name).name) : this.subManagers.map(x => x.subreddit.display_name);
|
||||
}
|
||||
|
||||
getGuestManagers(user: string): NormalizedManagerResponse[] {
|
||||
return this.subManagers.filter(x => x.managerEntity.getGuests().map(y => y.author.name).includes(user)).map(x => x.toNormalizedManager());
|
||||
}
|
||||
|
||||
getGuestSubreddits(user: string): string[] {
|
||||
return this.getGuestManagers(user).map(x => x.subredditNormal);
|
||||
}
|
||||
|
||||
getAccessibleSubreddits(user: string, subreddits: string[] = []): string[] {
|
||||
const normalSubs = subreddits.map(x => parseRedditEntity(x).name);
|
||||
const moderatedSubs = intersect(normalSubs, this.getSubreddits());
|
||||
const guestSubs = this.getGuestSubreddits(user);
|
||||
return Array.from(new Set([...guestSubs, ...moderatedSubs]));
|
||||
}
|
||||
|
||||
canUserAccessBot(user: string, subreddits: string[] = []) {
|
||||
return this.getAccessibleSubreddits(user, subreddits).length > 0;
|
||||
}
|
||||
|
||||
canUserAccessSubreddit(subreddit: string, user: string, subreddits: string[] = []): boolean {
|
||||
return this.getAccessibleSubreddits(user, subreddits).includes(parseRedditEntity(subreddit).name);
|
||||
}
|
||||
|
||||
async addGuest(userVal: string | string[], expiresAt: Dayjs, managerVal?: string | string[]) {
|
||||
let managerNames: string[];
|
||||
if(typeof managerVal === 'string') {
|
||||
managerNames = [managerVal];
|
||||
} else if(Array.isArray(managerVal)) {
|
||||
managerNames = managerVal;
|
||||
} else {
|
||||
managerNames = this.subManagers.map(x => x.subreddit.display_name);
|
||||
}
|
||||
|
||||
const cleanSubredditNames = managerNames.map(x => parseRedditEntity(x).name);
|
||||
const userNames = typeof userVal === 'string' ? [userVal] : userVal;
|
||||
const cleanUsers = userNames.map(x => parseRedditEntity(x.trim(), 'user').name);
|
||||
|
||||
const users: AuthorEntity[] = [];
|
||||
|
||||
for(const uName of cleanUsers) {
|
||||
let user = await this.authorRepo.findOne({
|
||||
where: {
|
||||
name: uName,
|
||||
}
|
||||
});
|
||||
|
||||
if(user === null) {
|
||||
users.push(await this.authorRepo.save(new AuthorEntity({name: uName})));
|
||||
} else {
|
||||
users.push(user);
|
||||
}
|
||||
}
|
||||
|
||||
const newGuestData = users.map(x => ({author: x, expiresAt})) as GuestEntityData[];
|
||||
|
||||
let newGuests = new Map<string, Guest[]>();
|
||||
const updatedManagerEntities: ManagerEntity[] = [];
|
||||
for(const m of this.subManagers) {
|
||||
if(!cleanSubredditNames.includes(m.subreddit.display_name)) {
|
||||
continue;
|
||||
}
|
||||
const filteredGuests = m.managerEntity.addGuest(newGuestData);
|
||||
updatedManagerEntities.push(m.managerEntity);
|
||||
newGuests.set(m.displayLabel, filteredGuests.map(x => guestEntityToApiGuest(x)));
|
||||
m.logger.info(`Added ${cleanUsers.join(', ')} as Guest`);
|
||||
}
|
||||
|
||||
await this.managerRepo.save(updatedManagerEntities);
|
||||
|
||||
return newGuests;
|
||||
}
|
||||
|
||||
async removeGuest(userVal: string | string[], managerVal?: string | string[]) {
|
||||
let managerNames: string[];
|
||||
if(typeof managerVal === 'string') {
|
||||
managerNames = [managerVal];
|
||||
} else if(Array.isArray(managerVal)) {
|
||||
managerNames = managerVal;
|
||||
} else {
|
||||
managerNames = this.subManagers.map(x => x.subreddit.display_name);
|
||||
}
|
||||
|
||||
const cleanSubredditNames = managerNames.map(x => parseRedditEntity(x).name);
|
||||
const userNames = typeof userVal === 'string' ? [userVal] : userVal;
|
||||
const cleanUsers = userNames.map(x => parseRedditEntity(x.trim(), 'user').name);
|
||||
|
||||
let newGuests = new Map<string, Guest[]>();
|
||||
const updatedManagerEntities: ManagerEntity[] = [];
|
||||
for(const m of this.subManagers) {
|
||||
if(!cleanSubredditNames.includes(m.subreddit.display_name)) {
|
||||
continue;
|
||||
}
|
||||
const filteredGuests = m.managerEntity.removeGuestByUser(cleanUsers);
|
||||
updatedManagerEntities.push(m.managerEntity);
|
||||
newGuests.set(m.displayLabel, filteredGuests.map(x => guestEntityToApiGuest(x)));
|
||||
m.logger.info(`Removed ${cleanUsers.join(', ')} from Guests`);
|
||||
}
|
||||
|
||||
await this.managerRepo.save(updatedManagerEntities);
|
||||
|
||||
return newGuests;
|
||||
}
|
||||
}
|
||||
|
||||
export default Bot;
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
import {RuleSet, IRuleSet, RuleSetJson, RuleSetObjectJson, isRuleSetJSON} from "../Rule/RuleSet";
|
||||
import {IRule, Rule, RuleJSONConfig} from "../Rule";
|
||||
import Action, {ActionConfig, ActionJson, StructuredActionJson} from "../Action";
|
||||
import {RuleSet, RuleSetConfigData, RuleSetConfigHydratedData, RuleSetConfigObject} from "../Rule/RuleSet";
|
||||
import {Rule} from "../Rule";
|
||||
import Action, {ActionConfig} from "../Action";
|
||||
import {Logger} from "winston";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {Comment, Submission} from "snoowrap";
|
||||
import {actionFactory} from "../Action/ActionFactory";
|
||||
import {ruleFactory} from "../Rule/RuleFactory";
|
||||
import {
|
||||
asPostBehaviorOptionConfig,
|
||||
boolToString,
|
||||
createAjvFactory, determineNewResults,
|
||||
FAIL, isRuleSetResult,
|
||||
createAjvFactory,
|
||||
FAIL,
|
||||
isRuleSetResult,
|
||||
mergeArr,
|
||||
PASS,
|
||||
resultsSummary,
|
||||
ruleNamesFromResults,
|
||||
truncateStringToLength
|
||||
} from "../util";
|
||||
import {
|
||||
@@ -22,19 +21,17 @@ import {
|
||||
CheckSummary,
|
||||
JoinCondition,
|
||||
NotificationEventPayload,
|
||||
PostBehavior, PostBehaviorOptionConfig, PostBehaviorOptionConfigStrong, PostBehaviorStrong,
|
||||
RuleResult,
|
||||
RuleSetResult, UserResultCache
|
||||
PostBehavior,
|
||||
PostBehaviorOptionConfigStrong,
|
||||
PostBehaviorStrong,
|
||||
RuleSetResult
|
||||
} from "../Common/interfaces";
|
||||
import * as RuleSchema from '../Schema/Rule.json';
|
||||
import * as RuleSetSchema from '../Schema/RuleSet.json';
|
||||
import * as ActionSchema from '../Schema/Action.json';
|
||||
import {
|
||||
ActionJson as ActionTypeJson
|
||||
} from "../Common/types";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
|
||||
import {ActionProcessingError, CheckProcessingError, isRateLimitError} from "../Utils/Errors";
|
||||
import {ActionProcessingError, CheckProcessingError} from "../Utils/Errors";
|
||||
import {ErrorWithCause, stackWithCauses} from "pony-cause";
|
||||
import {runCheckOptions} from "../Subreddit/Manager";
|
||||
import EventEmitter from "events";
|
||||
@@ -47,24 +44,28 @@ import {RunnableBase} from "../Common/RunnableBase";
|
||||
import {ActionResultEntity} from "../Common/Entities/ActionResultEntity";
|
||||
import {RuleSetResultEntity} from "../Common/Entities/RuleSetResultEntity";
|
||||
import {CheckToRuleResultEntity} from "../Common/Entities/RunnableAssociation/CheckToRuleResultEntity";
|
||||
import {
|
||||
JoinOperands,
|
||||
PostBehaviorType,
|
||||
RecordOutputType,
|
||||
recordOutputTypes
|
||||
} from "../Common/Infrastructure/Atomic";
|
||||
import {
|
||||
MinimalOrFullFilter,
|
||||
MinimalOrFullFilterJson
|
||||
} from "../Common/Infrastructure/Filters/FilterShapes";
|
||||
import {
|
||||
CommentState,
|
||||
SubmissionState,
|
||||
} from "../Common/Infrastructure/Filters/FilterCriteria";
|
||||
import {JoinOperands, PostBehaviorType, RecordOutputType, recordOutputTypes} from "../Common/Infrastructure/Atomic";
|
||||
import {CommentState, SubmissionState,} from "../Common/Infrastructure/Filters/FilterCriteria";
|
||||
import {ActivityType} from "../Common/Infrastructure/Reddit";
|
||||
import {RunnableBaseJson, RunnableBaseOptions, StructuredRunnableBase} from "../Common/Infrastructure/Runnable";
|
||||
import {RuleJson, StructuredRuleObjectJson, StructuredRuleSetObjectJson} from "../Common/Infrastructure/RuleShapes";
|
||||
import {ActionObjectJson, StructuredActionObjectJson} from "../Common/Infrastructure/ActionShapes";
|
||||
import {
|
||||
RunnableBaseJson,
|
||||
RunnableBaseOptions,
|
||||
StructuredRunnableBase,
|
||||
TypedRunnableBaseData, TypedStructuredRunnableBase
|
||||
} from "../Common/Infrastructure/Runnable";
|
||||
import {
|
||||
RuleConfigData, RuleConfigHydratedData,
|
||||
RuleConfigObject,
|
||||
StructuredRuleConfigObject,
|
||||
StructuredRuleSetConfigObject
|
||||
} from "../Common/Infrastructure/RuleShapes";
|
||||
import {
|
||||
ActionConfigData,
|
||||
ActionConfigHydratedData,
|
||||
ActionConfigObject,
|
||||
StructuredActionObjectJson
|
||||
} from "../Common/Infrastructure/ActionShapes";
|
||||
import {IncludesData} from "../Common/Infrastructure/Includes";
|
||||
|
||||
const checkLogName = truncateStringToLength(25);
|
||||
|
||||
@@ -155,7 +156,7 @@ export abstract class Check extends RunnableBase implements Omit<ICheck, 'postTr
|
||||
if(asPostBehaviorOptionConfig(postFail)) {
|
||||
const {
|
||||
behavior = 'next',
|
||||
recordTo = false
|
||||
recordTo = ['influx']
|
||||
} = postFail;
|
||||
let recordStrong: RecordOutputType[] = [];
|
||||
if(typeof recordTo === 'boolean') {
|
||||
@@ -174,7 +175,7 @@ export abstract class Check extends RunnableBase implements Omit<ICheck, 'postTr
|
||||
} else {
|
||||
this.postFail = {
|
||||
behavior: postFail,
|
||||
recordTo: []
|
||||
recordTo: ['influx']
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,12 +193,12 @@ export abstract class Check extends RunnableBase implements Omit<ICheck, 'postTr
|
||||
let ruleErrors: any = [];
|
||||
if (valid) {
|
||||
const ruleConfig = r;
|
||||
this.rules.push(new RuleSet({...ruleConfig as StructuredRuleSetObjectJson, logger: this.logger, subredditName, resources: this.resources, client: this.client}));
|
||||
this.rules.push(new RuleSet({...ruleConfig as StructuredRuleSetConfigObject, logger: this.logger, subredditName, resources: this.resources, client: this.client}));
|
||||
} else {
|
||||
setErrors = ajv.errors;
|
||||
valid = ajv.validate(RuleSchema, r);
|
||||
if (valid) {
|
||||
this.rules.push(ruleFactory(r as StructuredRuleObjectJson, this.logger, subredditName, this.resources, this.client));
|
||||
this.rules.push(ruleFactory(r as StructuredRuleConfigObject, this.logger, subredditName, this.resources, this.client));
|
||||
} else {
|
||||
ruleErrors = ajv.errors;
|
||||
const leastErrorType = setErrors.length < ruleErrors ? 'RuleSet' : 'Rule';
|
||||
@@ -434,7 +435,7 @@ export abstract class Check extends RunnableBase implements Omit<ICheck, 'postTr
|
||||
checkSum.postBehavior = this.postFail.behavior;
|
||||
}
|
||||
|
||||
behaviorT = checkSum.triggered ? 'Trigger' : 'Fail';
|
||||
behaviorT = checkResult.triggered ? 'Trigger' : 'Fail';
|
||||
|
||||
switch (checkSum.postBehavior.toLowerCase()) {
|
||||
case 'next':
|
||||
@@ -605,7 +606,7 @@ export interface ICheck extends JoinCondition, PostBehavior, RunnableBaseJson {
|
||||
}
|
||||
|
||||
export interface CheckOptions extends Omit<ICheck, 'authorIs' | 'itemIs'>, RunnableBaseOptions {
|
||||
rules: Array<StructuredRuleSetObjectJson | StructuredRuleObjectJson>;
|
||||
rules: Array<RuleConfigObject | RuleSetConfigObject>;
|
||||
actions: ActionConfig[];
|
||||
logger: Logger;
|
||||
subredditName: string;
|
||||
@@ -616,7 +617,15 @@ export interface CheckOptions extends Omit<ICheck, 'authorIs' | 'itemIs'>, Runna
|
||||
emitter: EventEmitter
|
||||
}
|
||||
|
||||
export interface CheckJson extends ICheck {
|
||||
/*
|
||||
* Can contain actions/rules as:
|
||||
* - full objects
|
||||
* - string to hydrate IE "url:fsdfd"
|
||||
* - named string IE "namedRule"
|
||||
*
|
||||
* Also can contain itemIs/authorIs as full object or named filter
|
||||
* */
|
||||
export interface CheckConfigData extends ICheck, RunnableBaseJson {
|
||||
/**
|
||||
* The type of event (new submission or new comment) this check should be run against
|
||||
* @examples ["submission", "comment"]
|
||||
@@ -631,7 +640,7 @@ export interface CheckJson extends ICheck {
|
||||
*
|
||||
* **If `rules` is an empty array or not present then `actions` are performed immediately.**
|
||||
* */
|
||||
rules?: Array<RuleSetJson | RuleJson>
|
||||
rules?: (RuleSetConfigData | RuleConfigData | string | IncludesData)[]
|
||||
/**
|
||||
* The `Actions` to run after the check is successfully triggered. ALL `Actions` will run in the order they are listed
|
||||
*
|
||||
@@ -639,7 +648,7 @@ export interface CheckJson extends ICheck {
|
||||
*
|
||||
* @examples [[{"kind": "comment", "content": "this is the content of the comment", "distinguish": true}, {"kind": "lock"}]]
|
||||
* */
|
||||
actions?: Array<ActionTypeJson>
|
||||
actions?: ActionConfigData[]
|
||||
|
||||
/**
|
||||
* If notifications are configured and this is `true` then an `eventActioned` event will be sent when this check is triggered.
|
||||
@@ -651,9 +660,49 @@ export interface CheckJson extends ICheck {
|
||||
cacheUserResult?: UserResultCacheOptions;
|
||||
}
|
||||
|
||||
export interface SubmissionCheckJson extends CheckJson {
|
||||
export interface SubmissionCheckConfigData extends CheckConfigData, TypedRunnableBaseData<SubmissionState> {
|
||||
kind: 'submission'
|
||||
itemIs?: MinimalOrFullFilterJson<SubmissionState>
|
||||
}
|
||||
|
||||
export interface CommentCheckConfigData extends CheckConfigData, TypedRunnableBaseData<CommentState> {
|
||||
kind: 'comment'
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Can contain actions/rules as:
|
||||
* - full objects
|
||||
* - named string IE "namedRule"
|
||||
*
|
||||
* Also can contain itemIs/authorIs as full object or named filter
|
||||
* */
|
||||
export interface CheckConfigHydratedData extends CheckConfigData {
|
||||
rules?: (RuleSetConfigHydratedData | RuleConfigHydratedData)[]
|
||||
actions?: ActionConfigHydratedData[]
|
||||
}
|
||||
|
||||
export interface SubmissionCheckConfigHydratedData extends CheckConfigHydratedData, TypedRunnableBaseData<SubmissionState> {
|
||||
kind: 'submission'
|
||||
}
|
||||
|
||||
export interface CommentCheckConfigHydratedData extends CheckConfigHydratedData, TypedRunnableBaseData<CommentState> {
|
||||
kind: 'comment'
|
||||
}
|
||||
|
||||
/*
|
||||
* All actions/rules/filters should now be full objects
|
||||
* */
|
||||
export interface CheckConfigObject extends Omit<CheckConfigHydratedData, 'itemIs' | 'authorIs'>, StructuredRunnableBase {
|
||||
rules: Array<RuleSetConfigObject | RuleConfigObject>
|
||||
actions: Array<ActionConfigObject>
|
||||
}
|
||||
|
||||
export interface SubmissionCheckConfigObject extends Omit<CheckConfigObject, 'itemIs' | 'author'>, TypedStructuredRunnableBase<SubmissionState> {
|
||||
kind: 'submission'
|
||||
}
|
||||
|
||||
export interface CommentCheckConfigObject extends Omit<CheckConfigObject, 'itemIs' | 'author'>, TypedStructuredRunnableBase<CommentState> {
|
||||
kind: 'comment'
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -691,33 +740,18 @@ export const userResultCacheDefault: Required<UserResultCacheOptions> = {
|
||||
runActions: true,
|
||||
}
|
||||
|
||||
export interface CommentCheckJson extends CheckJson {
|
||||
kind: 'comment'
|
||||
itemIs?: MinimalOrFullFilterJson<CommentState>
|
||||
}
|
||||
|
||||
export const asStructuredCommentCheckJson = (val: any): val is CommentCheckStructuredJson => {
|
||||
export const asStructuredCommentCheckJson = (val: any): val is CommentCheckConfigObject => {
|
||||
return val.kind === 'comment';
|
||||
}
|
||||
|
||||
export const asStructuredSubmissionCheckJson = (val: any): val is SubmissionCheckStructuredJson => {
|
||||
export const asStructuredSubmissionCheckJson = (val: any): val is SubmissionCheckConfigObject => {
|
||||
return val.kind === 'submission';
|
||||
}
|
||||
|
||||
export type CheckStructuredJson = SubmissionCheckStructuredJson | CommentCheckStructuredJson;
|
||||
// export interface CheckStructuredJson extends CheckJson {
|
||||
// rules: Array<RuleSetObjectJson | RuleObjectJson>
|
||||
// actions: Array<ActionObjectJson>
|
||||
// }
|
||||
export type ActivityCheckConfigValue = string | IncludesData | SubmissionCheckConfigData | CommentCheckConfigData;
|
||||
|
||||
export interface SubmissionCheckStructuredJson extends Omit<SubmissionCheckJson, 'authorIs' | 'itemIs' | 'rules'>, StructuredRunnableBase {
|
||||
rules: Array<StructuredRuleSetObjectJson | StructuredRuleObjectJson>
|
||||
actions: Array<ActionObjectJson>
|
||||
itemIs?: MinimalOrFullFilter<SubmissionState>
|
||||
}
|
||||
export type ActivityCheckConfigData = Exclude<ActivityCheckConfigValue, IncludesData>;
|
||||
|
||||
export interface CommentCheckStructuredJson extends Omit<CommentCheckJson, 'authorIs' | 'itemIs' | 'rules'>, StructuredRunnableBase {
|
||||
rules: Array<StructuredRuleSetObjectJson | StructuredRuleObjectJson>
|
||||
actions: Array<ActionObjectJson>
|
||||
itemIs?: MinimalOrFullFilter<CommentState>
|
||||
}
|
||||
export type ActivityCheckConfigHydratedData = SubmissionCheckConfigHydratedData | CommentCheckConfigHydratedData;
|
||||
|
||||
export type ActivityCheckObject = SubmissionCheckConfigObject | CommentCheckConfigObject;
|
||||
|
||||
@@ -6,4 +6,5 @@ export interface ConfigToObjectOptions {
|
||||
location?: string,
|
||||
jsonDocFunc?: (content: string, location?: string) => AbstractConfigDocument<OperatorJsonConfig>,
|
||||
yamlDocFunc?: (content: string, location?: string) => AbstractConfigDocument<YamlDocument>
|
||||
allowArrays?: boolean
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ export const parseFromJsonOrYamlToObject = (content: string, options?: ConfigToO
|
||||
location,
|
||||
jsonDocFunc = (content: string, location?: string) => new JsonConfigDocument(content, location),
|
||||
yamlDocFunc = (content: string, location?: string) => new YamlConfigDocument(content, location),
|
||||
allowArrays = false,
|
||||
} = options || {};
|
||||
|
||||
try {
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
import YamlConfigDocument from "../YamlConfigDocument";
|
||||
import JsonConfigDocument from "../JsonConfigDocument";
|
||||
import {YAMLMap, YAMLSeq} from "yaml";
|
||||
import {BotInstanceJsonConfig, OperatorJsonConfig} from "../../interfaces";
|
||||
import {YAMLMap, YAMLSeq, Pair, Scalar} from "yaml";
|
||||
import {BotInstanceJsonConfig, OperatorJsonConfig, WebCredentials} from "../../interfaces";
|
||||
import {assign} from 'comment-json';
|
||||
|
||||
export interface OperatorConfigDocumentInterface {
|
||||
addBot(botData: BotInstanceJsonConfig): void;
|
||||
setFriendlyName(name: string): void;
|
||||
setWebCredentials(data: Required<WebCredentials>): void;
|
||||
setOperator(name: string): void;
|
||||
toJS(): OperatorJsonConfig;
|
||||
}
|
||||
|
||||
@@ -15,10 +18,12 @@ export class YamlOperatorConfigDocument extends YamlConfigDocument implements Op
|
||||
if (bots === undefined) {
|
||||
this.parsed.add({key: 'bots', value: [botData]});
|
||||
} else if (botData.name !== undefined) {
|
||||
// overwrite if we find an existing
|
||||
// granularly overwrite (merge) if we find an existing
|
||||
const existingIndex = bots.items.findIndex(x => (x as YAMLMap).get('name') === botData.name);
|
||||
if (existingIndex !== -1) {
|
||||
this.parsed.setIn(['bots', existingIndex], botData);
|
||||
const botObj = this.parsed.getIn(['bots', existingIndex]) as YAMLMap;
|
||||
const mergedVal = mergeObjectToYaml(botData, botObj);
|
||||
this.parsed.setIn(['bots', existingIndex], mergedVal);
|
||||
} else {
|
||||
this.parsed.addIn(['bots'], botData);
|
||||
}
|
||||
@@ -27,11 +32,41 @@ export class YamlOperatorConfigDocument extends YamlConfigDocument implements Op
|
||||
}
|
||||
}
|
||||
|
||||
setFriendlyName(name: string) {
|
||||
this.parsed.addIn(['api', 'friendly'], name);
|
||||
}
|
||||
|
||||
setWebCredentials(data: Required<WebCredentials>) {
|
||||
this.parsed.addIn(['web', 'credentials'], data);
|
||||
}
|
||||
|
||||
setOperator(name: string) {
|
||||
this.parsed.addIn(['operator', 'name'], name);
|
||||
}
|
||||
|
||||
toJS(): OperatorJsonConfig {
|
||||
return super.toJS();
|
||||
}
|
||||
}
|
||||
|
||||
export const mergeObjectToYaml = (source: object, target: YAMLMap) => {
|
||||
for (const [k, v] of Object.entries(source)) {
|
||||
if (target.has(k)) {
|
||||
const targetProp = target.get(k);
|
||||
if (targetProp instanceof YAMLMap && typeof v === 'object') {
|
||||
const merged = mergeObjectToYaml(v, targetProp);
|
||||
target.set(k, merged)
|
||||
} else {
|
||||
// since target prop and value are not both objects don't bother merging, just overwrite (primitive or array)
|
||||
target.set(k, v);
|
||||
}
|
||||
} else {
|
||||
target.add({key: k, value: v});
|
||||
}
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
export class JsonOperatorConfigDocument extends JsonConfigDocument implements OperatorConfigDocumentInterface {
|
||||
addBot(botData: BotInstanceJsonConfig) {
|
||||
if (this.parsed.bots === undefined) {
|
||||
@@ -48,6 +83,23 @@ export class JsonOperatorConfigDocument extends JsonConfigDocument implements Op
|
||||
}
|
||||
}
|
||||
|
||||
setFriendlyName(name: string) {
|
||||
const api = this.parsed.api || {};
|
||||
this.parsed.api = {...api, friendly: name};
|
||||
}
|
||||
|
||||
setWebCredentials(data: Required<WebCredentials>) {
|
||||
const {
|
||||
web = {},
|
||||
} = this.parsed;
|
||||
|
||||
this.parsed.web = {...web, credentials: data};
|
||||
}
|
||||
|
||||
setOperator(name: string) {
|
||||
this.parsed.operator = { name };
|
||||
}
|
||||
|
||||
toJS(): OperatorJsonConfig {
|
||||
return super.toJS();
|
||||
}
|
||||
|
||||
@@ -79,6 +79,7 @@ export class ActionPremise extends TimeAwareRandomBaseEntity {
|
||||
this.active = data.active ?? true;
|
||||
this.configHash = objectHash.sha1(data.config);
|
||||
this.manager = data.manager;
|
||||
this.managerId = data.manager.id;
|
||||
this.name = data.name;
|
||||
|
||||
const {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import {Entity, Column, PrimaryGeneratedColumn, ManyToOne, PrimaryColumn, OneToMany, OneToOne, Index} from "typeorm";
|
||||
import {Entity, Column, ManyToOne, PrimaryColumn, OneToMany, Index} from "typeorm";
|
||||
import {AuthorEntity} from "./AuthorEntity";
|
||||
import {Subreddit} from "./Subreddit";
|
||||
import {CMEvent} from "./CMEvent";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {Comment} from "snoowrap";
|
||||
import {asComment, getActivityAuthorName, parseRedditFullname, redditThingTypeToPrefix} from "../../util";
|
||||
import {ActivityType} from "../Infrastructure/Reddit";
|
||||
import {activityReports, ActivityType, Report, SnoowrapActivity} from "../Infrastructure/Reddit";
|
||||
import {ActivityReport} from "./ActivityReport";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
|
||||
export interface ActivityEntityOptions {
|
||||
id: string
|
||||
@@ -15,6 +15,7 @@ export interface ActivityEntityOptions {
|
||||
permalink: string
|
||||
author: AuthorEntity
|
||||
submission?: Activity
|
||||
reports?: ActivityReport[]
|
||||
}
|
||||
|
||||
@Entity()
|
||||
@@ -69,6 +70,9 @@ export class Activity {
|
||||
@OneToMany(type => Activity, obj => obj.submission, {nullable: true})
|
||||
comments!: Activity[];
|
||||
|
||||
@OneToMany(type => ActivityReport, act => act.activity, {cascade: ['insert'], eager: true})
|
||||
reports: ActivityReport[] | undefined
|
||||
|
||||
constructor(data?: ActivityEntityOptions) {
|
||||
if(data !== undefined) {
|
||||
this.type = data.type;
|
||||
@@ -78,10 +82,76 @@ export class Activity {
|
||||
this.permalink = data.permalink;
|
||||
this.author = data.author;
|
||||
this.submission = data.submission;
|
||||
this.reports = data.reports !== undefined ? data.reports : undefined;
|
||||
}
|
||||
}
|
||||
|
||||
static fromSnoowrapActivity(subreddit: Subreddit, activity: (Submission | Comment)) {
|
||||
/**
|
||||
* @param {SnoowrapActivity} activity
|
||||
* @param {Dayjs|undefined} lastKnownStateTimestamp Override the last good state (useful when tracked through polling)
|
||||
* */
|
||||
syncReports(activity: SnoowrapActivity, lastKnownStateTimestamp?: Dayjs) {
|
||||
if(activity.num_reports > 0 && (this.reports === undefined || activity.num_reports !== this.reports.length)) {
|
||||
if(this.reports === undefined) {
|
||||
this.reports = [];
|
||||
}
|
||||
const reports = activityReports(activity);
|
||||
// match up existing reports
|
||||
const usedReportEntities: string[] = [];
|
||||
const unsyncedReports: Report[] = [];
|
||||
for(const r of reports) {
|
||||
const matchedEntity = this.reports.find(x => !usedReportEntities.includes(x.id) && x.matchReport(r));
|
||||
if(matchedEntity !== undefined) {
|
||||
usedReportEntities.push(matchedEntity.id);
|
||||
} else {
|
||||
// found an unsynced report
|
||||
unsyncedReports.push(r);
|
||||
}
|
||||
}
|
||||
|
||||
// ideally we only have one report but it's possible (probable) there are more
|
||||
//
|
||||
// to simplify tracking over time we will spread out the "create time" for each report to be between NOW
|
||||
// and the last recorded report, or if no reports then the create time of the activity
|
||||
|
||||
// -- the assumptions about tracking should be good enough for most users because:
|
||||
// * default poll interval is 30 seconds so even if there are more than one reports in that time the resolution is high enough for accurate usage (most mods will use "> 1 report in 1 minute" or larger timescales)
|
||||
// * for populating existing reports (CM has not been tracking since activity creation) we don't want to bunch up all reports at the timestamp which could create false positives,
|
||||
// it's more likely that reports would be spread out than all occurring at the same time.
|
||||
|
||||
// TODO additionally, will allow users to specify minimum required granularity to use when filtering by reports over time
|
||||
|
||||
let lastRecordedTime = lastKnownStateTimestamp;
|
||||
if(lastKnownStateTimestamp === undefined) {
|
||||
lastRecordedTime = this.reports.length > 0 ?
|
||||
// get the latest create date for existing reports
|
||||
this.reports.reduce((acc, curr) => curr.createdAt.isAfter(acc) ? curr.createdAt : acc, dayjs('2000-1-1'))
|
||||
// if no reports then use activity create date
|
||||
: dayjs(activity.created_utc * 1000);
|
||||
}
|
||||
|
||||
// find the amount of time between now and last good timestamp
|
||||
const missingTimespan = dayjs.duration(dayjs().diff(lastRecordedTime));
|
||||
const granularity = Math.floor(missingTimespan.asSeconds());
|
||||
|
||||
// each report will have its create date spaced out (mostly) equally between now and the last good timestamp
|
||||
//
|
||||
// if only one report stick it in exact middle
|
||||
// if more than one than decrease span by 1/4 so that we don't end up having reports dead-on the last timestamp
|
||||
const increment = Math.floor(unsyncedReports.length === 1 ? (granularity / 2) : ((granularity / 1.25) / unsyncedReports.length));
|
||||
|
||||
for(let i = 0; i < unsyncedReports.length; i++) {
|
||||
const r = new ActivityReport({...unsyncedReports[i], activity: this, granularity});
|
||||
r.createdAt = dayjs().subtract(increment * (i + 1), 'seconds');
|
||||
this.reports.push(r);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static fromSnoowrapActivity(subreddit: Subreddit, activity: SnoowrapActivity, lastKnownStateTimestamp?: dayjs.Dayjs | undefined) {
|
||||
let submission: Activity | undefined;
|
||||
let type: ActivityType = 'submission';
|
||||
let content: string;
|
||||
@@ -99,7 +169,7 @@ export class Activity {
|
||||
const author = new AuthorEntity();
|
||||
author.name = getActivityAuthorName(activity.author);
|
||||
|
||||
return new Activity({
|
||||
const entity = new Activity({
|
||||
id: activity.name,
|
||||
subreddit,
|
||||
type,
|
||||
@@ -107,6 +177,10 @@ export class Activity {
|
||||
permalink: activity.permalink,
|
||||
author,
|
||||
submission
|
||||
})
|
||||
});
|
||||
|
||||
entity.syncReports(activity, lastKnownStateTimestamp);
|
||||
|
||||
return entity;
|
||||
}
|
||||
}
|
||||
|
||||
57
src/Common/Entities/ActivityReport.ts
Normal file
57
src/Common/Entities/ActivityReport.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
import {
|
||||
Entity,
|
||||
Column,
|
||||
ManyToOne, JoinColumn, AfterLoad,
|
||||
} from "typeorm";
|
||||
import {Activity} from "./Activity";
|
||||
import {ManagerEntity} from "./ManagerEntity";
|
||||
import {TimeAwareRandomBaseEntity} from "./Base/TimeAwareRandomBaseEntity";
|
||||
import {Report, ReportType} from "../Infrastructure/Reddit";
|
||||
|
||||
@Entity()
|
||||
export class ActivityReport extends TimeAwareRandomBaseEntity {
|
||||
|
||||
@Column({nullable: false, length: 500})
|
||||
reason!: string
|
||||
|
||||
@Column({nullable: false, length: 20})
|
||||
type!: ReportType
|
||||
|
||||
@Column({nullable: true, length: 100})
|
||||
author?: string
|
||||
|
||||
@Column("int", {nullable: false})
|
||||
granularity: number = 0;
|
||||
|
||||
@ManyToOne(type => Activity, act => act.reports, {cascade: ['update']})
|
||||
@JoinColumn({name: 'activityId'})
|
||||
activity!: Activity;
|
||||
|
||||
@Column({nullable: false, name: 'activityId'})
|
||||
activityId!: string
|
||||
|
||||
constructor(data?: Report & { activity: Activity, granularity: number }) {
|
||||
super();
|
||||
if (data !== undefined) {
|
||||
this.reason = data.reason;
|
||||
this.type = data.type;
|
||||
this.author = data.author;
|
||||
this.activity = data.activity;
|
||||
this.activityId = data.activity.id;
|
||||
this.granularity = data.granularity
|
||||
}
|
||||
}
|
||||
|
||||
matchReport(report: Report): boolean {
|
||||
return this.reason === report.reason
|
||||
&& this.type === report.type
|
||||
&& this.author === report.author;
|
||||
}
|
||||
|
||||
@AfterLoad()
|
||||
convertPrimitives() {
|
||||
if(this.author === null) {
|
||||
this.author = undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,4 +12,10 @@ export class AuthorEntity {
|
||||
|
||||
@OneToMany(type => Activity, act => act.author)
|
||||
activities!: Activity[]
|
||||
|
||||
constructor(data?: any) {
|
||||
if(data !== undefined) {
|
||||
this.name = data.name;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,68 @@
|
||||
import {Entity, Column, PrimaryColumn, OneToMany, PrimaryGeneratedColumn} from "typeorm";
|
||||
import {ManagerEntity} from "./ManagerEntity";
|
||||
import {RandomIdBaseEntity} from "./Base/RandomIdBaseEntity";
|
||||
import {BotGuestEntity, ManagerGuestEntity} from "./Guest/GuestEntity";
|
||||
import {Guest, GuestEntityData, HasGuests} from "./Guest/GuestInterfaces";
|
||||
import {SubredditInvite} from "./SubredditInvite";
|
||||
|
||||
@Entity()
|
||||
export class Bot extends RandomIdBaseEntity {
|
||||
export class Bot extends RandomIdBaseEntity implements HasGuests {
|
||||
|
||||
@Column("varchar", {length: 200})
|
||||
name!: string;
|
||||
|
||||
@OneToMany(type => ManagerEntity, obj => obj.bot)
|
||||
managers!: Promise<ManagerEntity[]>
|
||||
|
||||
@OneToMany(type => BotGuestEntity, obj => obj.guestOf, {eager: true, cascade: ['insert', 'remove', 'update']})
|
||||
guests!: BotGuestEntity[]
|
||||
|
||||
@OneToMany(type => SubredditInvite, obj => obj.bot, {eager: true, cascade: ['insert', 'remove', 'update']})
|
||||
subredditInvites!: SubredditInvite[]
|
||||
|
||||
getGuests() {
|
||||
const g = this.guests;
|
||||
if (g === undefined) {
|
||||
return [];
|
||||
}
|
||||
//return g.map(x => ({id: x.id, name: x.author.name, expiresAt: x.expiresAt})) as Guest[];
|
||||
return g;
|
||||
}
|
||||
|
||||
addGuest(val: GuestEntityData | GuestEntityData[]) {
|
||||
const reqGuests = Array.isArray(val) ? val : [val];
|
||||
const guests = this.guests;
|
||||
for (const g of reqGuests) {
|
||||
const existing = guests.find(x => x.author.name.toLowerCase() === g.author.name.toLowerCase());
|
||||
if (existing !== undefined) {
|
||||
// update existing guest expiresAt
|
||||
existing.expiresAt = g.expiresAt;
|
||||
} else {
|
||||
guests.push(new BotGuestEntity({...g, guestOf: this}));
|
||||
}
|
||||
}
|
||||
this.guests = guests
|
||||
return guests;
|
||||
}
|
||||
|
||||
removeGuestById(val: string | string[]) {
|
||||
const reqGuests = Array.isArray(val) ? val : [val];
|
||||
const guests = this.guests;
|
||||
const filteredGuests = guests.filter(x => reqGuests.includes(x.id));
|
||||
this.guests = filteredGuests;
|
||||
return filteredGuests;
|
||||
}
|
||||
|
||||
removeGuestByUser(val: string | string[]) {
|
||||
const reqGuests = (Array.isArray(val) ? val : [val]).map(x => x.trim().toLowerCase());
|
||||
const guests = this.guests;
|
||||
const filteredGuests = guests.filter(x => reqGuests.includes(x.author.name.toLowerCase()));
|
||||
this.guests =filteredGuests;
|
||||
return filteredGuests;
|
||||
}
|
||||
|
||||
removeGuests() {
|
||||
this.guests = []
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
import {Column, Entity, PrimaryColumn} from "typeorm";
|
||||
import {TimeAwareBaseEntity} from "../Entities/Base/TimeAwareBaseEntity";
|
||||
import {TimeAwareBaseEntity} from "./Base/TimeAwareBaseEntity";
|
||||
import {InviteData} from "../../Web/Common/interfaces";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {TimeAwareRandomBaseEntity} from "./Base/TimeAwareRandomBaseEntity";
|
||||
import {parseRedditEntity} from "../../util";
|
||||
|
||||
@Entity()
|
||||
export class Invite extends TimeAwareBaseEntity implements InviteData {
|
||||
|
||||
@PrimaryColumn('varchar', {length: 255})
|
||||
id!: string
|
||||
@Entity({name: 'BotInvite'})
|
||||
export class BotInvite extends TimeAwareRandomBaseEntity implements InviteData {
|
||||
|
||||
@Column("varchar", {length: 50})
|
||||
clientId!: string;
|
||||
@@ -30,6 +29,12 @@ export class Invite extends TimeAwareBaseEntity implements InviteData {
|
||||
@Column()
|
||||
overwrite?: boolean;
|
||||
|
||||
@Column("simple-json")
|
||||
guests?: string[]
|
||||
|
||||
@Column("text")
|
||||
initialConfig?: string
|
||||
|
||||
@Column("simple-json", {nullable: true})
|
||||
subreddits?: string[];
|
||||
|
||||
@@ -51,10 +56,9 @@ export class Invite extends TimeAwareBaseEntity implements InviteData {
|
||||
}
|
||||
}
|
||||
|
||||
constructor(data?: InviteData & { id: string, expiresIn?: number }) {
|
||||
constructor(data?: InviteData) {
|
||||
super();
|
||||
if (data !== undefined) {
|
||||
this.id = data.id;
|
||||
this.permissions = data.permissions;
|
||||
this.subreddits = data.subreddits;
|
||||
this.instance = data.instance;
|
||||
@@ -63,9 +67,16 @@ export class Invite extends TimeAwareBaseEntity implements InviteData {
|
||||
this.redirectUri = data.redirectUri;
|
||||
this.creator = data.creator;
|
||||
this.overwrite = data.overwrite;
|
||||
this.initialConfig = data.initialConfig;
|
||||
if(data.guests !== undefined && data.guests !== null && data.guests.length > 0) {
|
||||
const cleanGuests = data.guests.filter(x => x !== '').map(x => parseRedditEntity(x, 'user').name);
|
||||
if(cleanGuests.length > 0) {
|
||||
this.guests = cleanGuests;
|
||||
}
|
||||
}
|
||||
|
||||
if (data.expiresIn !== undefined && data.expiresIn !== 0) {
|
||||
this.expiresAt = dayjs().add(data.expiresIn, 'seconds');
|
||||
if (data.expiresAt !== undefined && data.expiresAt !== 0) {
|
||||
this.expiresAt = dayjs(data.expiresAt);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -152,6 +152,9 @@ export class DispatchedEntity extends TimeAwareRandomBaseEntity {
|
||||
|
||||
async toActivityDispatch(client: ExtendedSnoowrap): Promise<ActivityDispatch> {
|
||||
const redditThing = parseRedditFullname(this.activityId);
|
||||
if(redditThing === undefined) {
|
||||
throw new Error(`Could not parse reddit ID from value '${this.activityId}'`);
|
||||
}
|
||||
let activity: Comment | Submission;
|
||||
if (redditThing?.type === 'comment') {
|
||||
// @ts-ignore
|
||||
@@ -161,12 +164,12 @@ export class DispatchedEntity extends TimeAwareRandomBaseEntity {
|
||||
activity = await client.getSubmission(redditThing.id);
|
||||
}
|
||||
activity.author = new RedditUser({name: this.author}, client, false);
|
||||
activity.id = redditThing.id;
|
||||
return {
|
||||
id: this.id,
|
||||
queuedAt: this.createdAt,
|
||||
activity,
|
||||
delay: this.delay,
|
||||
processing: false,
|
||||
action: this.action,
|
||||
goto: this.goto,
|
||||
onExistingFound: this.onExistingFound,
|
||||
|
||||
119
src/Common/Entities/Guest/GuestEntity.ts
Normal file
119
src/Common/Entities/Guest/GuestEntity.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
import {ChildEntity, Column, Entity, JoinColumn, ManyToOne, TableInheritance} from "typeorm";
|
||||
import {AuthorEntity} from "../AuthorEntity";
|
||||
import { ManagerEntity } from "../ManagerEntity";
|
||||
import { Bot } from "../Bot";
|
||||
import {TimeAwareRandomBaseEntity} from "../Base/TimeAwareRandomBaseEntity";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {Guest, GuestAll, GuestEntityData} from "./GuestInterfaces";
|
||||
|
||||
export interface GuestOptions<T extends ManagerEntity | Bot> extends GuestEntityData {
|
||||
guestOf: T
|
||||
}
|
||||
|
||||
@Entity({name: 'Guests'})
|
||||
@TableInheritance({ column: { type: "varchar", name: "type" } })
|
||||
export abstract class GuestEntity<T extends ManagerEntity | Bot> extends TimeAwareRandomBaseEntity {
|
||||
|
||||
@ManyToOne(type => AuthorEntity, undefined, {cascade: ['insert'], eager: true})
|
||||
@JoinColumn({name: 'authorName'})
|
||||
author!: AuthorEntity;
|
||||
|
||||
@Column({ name: 'expiresAt', nullable: true })
|
||||
_expiresAt?: Date = new Date();
|
||||
|
||||
public get expiresAt(): Dayjs {
|
||||
return dayjs(this._expiresAt);
|
||||
}
|
||||
|
||||
public set expiresAt(d: Dayjs | undefined) {
|
||||
if(d === undefined) {
|
||||
this._expiresAt = d;
|
||||
} else {
|
||||
this._expiresAt = d.utc().toDate();
|
||||
}
|
||||
}
|
||||
|
||||
expiresAtTimestamp(): number | undefined {
|
||||
if(this._expiresAt !== undefined) {
|
||||
return this.expiresAt.valueOf();
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
protected constructor(data?: GuestOptions<T>) {
|
||||
super();
|
||||
if(data !== undefined) {
|
||||
this.author = data.author;
|
||||
this.expiresAt = data.expiresAt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ChildEntity('manager')
|
||||
export class ManagerGuestEntity extends GuestEntity<ManagerEntity> {
|
||||
|
||||
type: string = 'manager';
|
||||
|
||||
@ManyToOne(type => ManagerEntity, act => act.guests, {nullable: false, orphanedRowAction: 'delete'})
|
||||
@JoinColumn({name: 'guestOfId', referencedColumnName: 'id'})
|
||||
guestOf!: ManagerEntity
|
||||
|
||||
constructor(data?: GuestOptions<ManagerEntity>) {
|
||||
super(data);
|
||||
if(data !== undefined) {
|
||||
this.guestOf = data.guestOf;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ChildEntity('bot')
|
||||
export class BotGuestEntity extends GuestEntity<Bot> {
|
||||
|
||||
type: string = 'bot';
|
||||
|
||||
@ManyToOne(type => Bot, act => act.guests, {nullable: false, orphanedRowAction: 'delete'})
|
||||
@JoinColumn({name: 'guestOfId', referencedColumnName: 'id'})
|
||||
guestOf!: Bot
|
||||
|
||||
constructor(data?: GuestOptions<Bot>) {
|
||||
super(data);
|
||||
if(data !== undefined) {
|
||||
this.guestOf = data.guestOf;
|
||||
this.author = data.author;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const guestEntityToApiGuest = (val: GuestEntity<any>): Guest => {
|
||||
return {
|
||||
id: val.id,
|
||||
name: val.author.name,
|
||||
expiresAt: val.expiresAtTimestamp(),
|
||||
}
|
||||
}
|
||||
|
||||
interface ContextualGuest extends Guest {
|
||||
subreddit: string
|
||||
}
|
||||
|
||||
export const guestEntitiesToAll = (val: Map<string, Guest[]>): GuestAll[] => {
|
||||
const contextualGuests: ContextualGuest[] = Array.from(val.entries()).map(([sub, guests]) => guests.map(y => ({...y, subreddit: sub} as ContextualGuest))).flat(3);
|
||||
|
||||
const userMap = contextualGuests.reduce((acc, curr) => {
|
||||
let u: GuestAll | undefined = acc.get(curr.name);
|
||||
if (u === undefined) {
|
||||
u = {name: curr.name, expiresAt: curr.expiresAt, subreddits: [curr.subreddit]};
|
||||
} else {
|
||||
if (!u.subreddits.includes(curr.subreddit)) {
|
||||
u.subreddits.push(curr.subreddit);
|
||||
}
|
||||
if ((u.expiresAt === undefined && curr.expiresAt !== undefined) || (u.expiresAt !== undefined && curr.expiresAt !== undefined && curr.expiresAt < u.expiresAt)) {
|
||||
u.expiresAt = curr.expiresAt;
|
||||
}
|
||||
}
|
||||
acc.set(curr.name, u);
|
||||
return acc;
|
||||
}, new Map<string, GuestAll>());
|
||||
|
||||
return Array.from(userMap.values());
|
||||
}
|
||||
28
src/Common/Entities/Guest/GuestInterfaces.ts
Normal file
28
src/Common/Entities/Guest/GuestInterfaces.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { Dayjs } from "dayjs"
|
||||
import {AuthorEntity} from "../AuthorEntity";
|
||||
|
||||
export interface Guest {
|
||||
id: string
|
||||
name: string
|
||||
expiresAt?: number
|
||||
}
|
||||
|
||||
export interface GuestAll {
|
||||
name: string
|
||||
expiresAt?: number
|
||||
subreddits: string[]
|
||||
}
|
||||
|
||||
|
||||
export interface GuestEntityData {
|
||||
expiresAt?: Dayjs
|
||||
author: AuthorEntity
|
||||
}
|
||||
|
||||
export interface HasGuests {
|
||||
getGuests: () => GuestEntityData[]
|
||||
addGuest: (val: GuestEntityData | GuestEntityData[]) => GuestEntityData[]
|
||||
removeGuestById: (val: string | string[]) => GuestEntityData[]
|
||||
removeGuestByUser: (val: string | string[]) => GuestEntityData[]
|
||||
removeGuests: () => GuestEntityData[]
|
||||
}
|
||||
@@ -15,12 +15,14 @@ import {RunEntity} from "./RunEntity";
|
||||
import {Bot} from "./Bot";
|
||||
import {RandomIdBaseEntity} from "./Base/RandomIdBaseEntity";
|
||||
import {ManagerRunState} from "./EntityRunState/ManagerRunState";
|
||||
import { QueueRunState } from "./EntityRunState/QueueRunState";
|
||||
import {QueueRunState} from "./EntityRunState/QueueRunState";
|
||||
import {EventsRunState} from "./EntityRunState/EventsRunState";
|
||||
import {RulePremise} from "./RulePremise";
|
||||
import {ActionPremise} from "./ActionPremise";
|
||||
import { RunningStateTypes } from "../../Subreddit/Manager";
|
||||
import {RunningStateTypes} from "../../Subreddit/Manager";
|
||||
import {EntityRunState} from "./EntityRunState/EntityRunState";
|
||||
import {GuestEntity, ManagerGuestEntity} from "./Guest/GuestEntity";
|
||||
import {Guest, GuestEntityData, HasGuests} from "./Guest/GuestInterfaces";
|
||||
|
||||
export interface ManagerEntityOptions {
|
||||
name: string
|
||||
@@ -36,7 +38,7 @@ export type RunningStateEntities = {
|
||||
};
|
||||
|
||||
@Entity({name: 'Manager'})
|
||||
export class ManagerEntity extends RandomIdBaseEntity implements RunningStateEntities {
|
||||
export class ManagerEntity extends RandomIdBaseEntity implements RunningStateEntities, HasGuests {
|
||||
|
||||
@Column("varchar", {length: 200})
|
||||
name!: string;
|
||||
@@ -56,12 +58,15 @@ export class ManagerEntity extends RandomIdBaseEntity implements RunningStateEnt
|
||||
@OneToMany(type => ActionPremise, obj => obj.manager)
|
||||
actions!: Promise<ActionPremise[]>
|
||||
|
||||
@OneToMany(type => CheckEntity, obj => obj.manager) // note: we will create author property in the Photo class below
|
||||
@OneToMany(type => CheckEntity, obj => obj.manager)
|
||||
checks!: Promise<CheckEntity[]>
|
||||
|
||||
@OneToMany(type => RunEntity, obj => obj.manager) // note: we will create author property in the Photo class below
|
||||
@OneToMany(type => RunEntity, obj => obj.manager)
|
||||
runs!: Promise<RunEntity[]>
|
||||
|
||||
@OneToMany(type => ManagerGuestEntity, obj => obj.guestOf, {eager: true, cascade: ['insert', 'remove', 'update']})
|
||||
guests!: ManagerGuestEntity[]
|
||||
|
||||
@OneToOne(() => EventsRunState, {cascade: ['insert', 'update'], eager: true})
|
||||
@JoinColumn()
|
||||
eventsState!: EventsRunState
|
||||
@@ -85,4 +90,50 @@ export class ManagerEntity extends RandomIdBaseEntity implements RunningStateEnt
|
||||
this.managerState = data.managerState;
|
||||
}
|
||||
}
|
||||
|
||||
getGuests(): ManagerGuestEntity[] {
|
||||
const g = this.guests;
|
||||
if (g === undefined) {
|
||||
return [];
|
||||
}
|
||||
//return g.map(x => ({id: x.id, name: x.author.name, expiresAt: x.expiresAt})) as Guest[];
|
||||
return g;
|
||||
}
|
||||
|
||||
addGuest(val: GuestEntityData | GuestEntityData[]) {
|
||||
const reqGuests = Array.isArray(val) ? val : [val];
|
||||
const guests = this.getGuests();
|
||||
for (const g of reqGuests) {
|
||||
const existing = guests.find(x => x.author.name.toLowerCase() === g.author.name.toLowerCase());
|
||||
if (existing !== undefined) {
|
||||
// update existing guest expiresAt
|
||||
existing.expiresAt = g.expiresAt;
|
||||
} else {
|
||||
guests.push(new ManagerGuestEntity({...g, guestOf: this}));
|
||||
}
|
||||
}
|
||||
this.guests = guests;
|
||||
return guests;
|
||||
}
|
||||
|
||||
removeGuestById(val: string | string[]) {
|
||||
const reqGuests = Array.isArray(val) ? val : [val];
|
||||
const guests = this.getGuests();
|
||||
const filteredGuests = guests.filter(x => !reqGuests.includes(x.id));
|
||||
this.guests = filteredGuests
|
||||
return filteredGuests;
|
||||
}
|
||||
|
||||
removeGuestByUser(val: string | string[]) {
|
||||
const reqGuests = (Array.isArray(val) ? val : [val]).map(x => x.trim().toLowerCase());
|
||||
const guests = this.getGuests();
|
||||
const filteredGuests = guests.filter(x => !reqGuests.includes(x.author.name.toLowerCase()));
|
||||
this.guests = filteredGuests;
|
||||
return filteredGuests;
|
||||
}
|
||||
|
||||
removeGuests() {
|
||||
this.guests = [];
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,6 +83,7 @@ export class RulePremise extends TimeAwareRandomBaseEntity {
|
||||
this.active = data.active ?? true;
|
||||
this.configHash = objectHash.sha1(data.config);
|
||||
this.manager = data.manager;
|
||||
this.managerId = data.manager.id;
|
||||
this.name = data.name;
|
||||
|
||||
const {
|
||||
|
||||
54
src/Common/Entities/SubredditInvite.ts
Normal file
54
src/Common/Entities/SubredditInvite.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
import {Column, Entity, JoinColumn, ManyToOne, PrimaryColumn} from "typeorm";
|
||||
import {InviteData, SubredditInviteData} from "../../Web/Common/interfaces";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {TimeAwareRandomBaseEntity} from "./Base/TimeAwareRandomBaseEntity";
|
||||
import {AuthorEntity} from "./AuthorEntity";
|
||||
import {Bot} from "./Bot";
|
||||
|
||||
@Entity()
|
||||
export class SubredditInvite extends TimeAwareRandomBaseEntity implements SubredditInviteData {
|
||||
|
||||
subreddit!: string;
|
||||
|
||||
@Column("simple-json", {nullable: true})
|
||||
guests?: string[]
|
||||
|
||||
@Column("text")
|
||||
initialConfig?: string
|
||||
|
||||
@ManyToOne(type => Bot, bot => bot.subredditInvites, {nullable: false, orphanedRowAction: 'delete'})
|
||||
@JoinColumn({name: 'botId', referencedColumnName: 'id'})
|
||||
bot!: Bot;
|
||||
|
||||
@Column({name: 'expiresAt', nullable: true})
|
||||
_expiresAt?: Date;
|
||||
|
||||
public get expiresAt(): Dayjs | undefined {
|
||||
if (this._expiresAt === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return dayjs(this._expiresAt);
|
||||
}
|
||||
|
||||
public set expiresAt(d: Dayjs | undefined) {
|
||||
if (d === undefined) {
|
||||
this._expiresAt = d;
|
||||
} else {
|
||||
this._expiresAt = d.utc().toDate();
|
||||
}
|
||||
}
|
||||
|
||||
constructor(data?: SubredditInviteData & { expiresIn?: number }) {
|
||||
super();
|
||||
if (data !== undefined) {
|
||||
this.subreddit = data.subreddit;
|
||||
this.initialConfig = data.initialConfig;
|
||||
this.guests = data.guests;
|
||||
|
||||
|
||||
if (data.expiresIn !== undefined && data.expiresIn !== 0) {
|
||||
this.expiresAt = dayjs().add(data.expiresIn, 'seconds');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
199
src/Common/ImageComparisonService.ts
Normal file
199
src/Common/ImageComparisonService.ts
Normal file
@@ -0,0 +1,199 @@
|
||||
import {Logger} from "winston";
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import {StrongImageDetection} from "./interfaces";
|
||||
import ImageData from "./ImageData";
|
||||
import {bitsToHexLength, mergeArr} from "../util";
|
||||
import {CMError} from "../Utils/Errors";
|
||||
import {ImageHashCacheData} from "./Infrastructure/Atomic";
|
||||
import leven from "leven";
|
||||
|
||||
export interface CompareImageOptions {
|
||||
config?: StrongImageDetection
|
||||
}
|
||||
|
||||
export interface ThresholdResults {
|
||||
withinHard: boolean | undefined,
|
||||
withinSoft: boolean | undefined
|
||||
}
|
||||
|
||||
export class ImageComparisonService {
|
||||
|
||||
protected reference!: ImageData
|
||||
protected resources: SubredditResources;
|
||||
protected logger: Logger;
|
||||
protected detectionConfig: StrongImageDetection;
|
||||
|
||||
constructor(resources: SubredditResources, logger: Logger, config: StrongImageDetection) {
|
||||
this.resources = resources;
|
||||
this.logger = logger.child({labels: ['Image Detection']}, mergeArr);
|
||||
this.detectionConfig = config;
|
||||
}
|
||||
|
||||
async setReference(img: ImageData, options?: CompareImageOptions) {
|
||||
this.reference = img;
|
||||
const {config = this.detectionConfig} = options || {};
|
||||
|
||||
try {
|
||||
this.reference.setPreferredResolutionByWidth(800);
|
||||
if (config.hash.enable) {
|
||||
if (config.hash.ttl !== undefined) {
|
||||
const refHash = await this.resources.getImageHash(this.reference);
|
||||
if (refHash === undefined) {
|
||||
await this.reference.hash(config.hash.bits);
|
||||
await this.resources.setImageHash(this.reference, config.hash.ttl);
|
||||
} else if (refHash.original.length !== bitsToHexLength(config.hash.bits)) {
|
||||
this.logger.warn('Reference image hash length did not correspond to bits specified in config. Recomputing...');
|
||||
await this.reference.hash(config.hash.bits);
|
||||
await this.resources.setImageHash(this.reference, config.hash.ttl);
|
||||
} else {
|
||||
this.reference.setFromHashCache(refHash);
|
||||
}
|
||||
} else {
|
||||
await this.reference.hash(config.hash.bits);
|
||||
}
|
||||
}
|
||||
} catch (err: any) {
|
||||
throw new CMError('Could not set reference image due to an error', {cause: err});
|
||||
}
|
||||
}
|
||||
|
||||
compareDiffWithThreshold(diff: number, options?: CompareImageOptions): ThresholdResults {
|
||||
const {
|
||||
config: {
|
||||
hash: {
|
||||
hardThreshold = 5,
|
||||
softThreshold = undefined,
|
||||
} = {},
|
||||
} = this.detectionConfig
|
||||
} = options || {};
|
||||
|
||||
let hard: boolean | undefined;
|
||||
let soft: boolean | undefined;
|
||||
|
||||
if ((null !== hardThreshold && undefined !== hardThreshold)) {
|
||||
hard = diff <= hardThreshold;
|
||||
if (hard) {
|
||||
return {withinHard: hard, withinSoft: hard};
|
||||
}
|
||||
}
|
||||
|
||||
if ((null !== softThreshold && undefined !== softThreshold)) {
|
||||
soft = diff <= softThreshold;
|
||||
}
|
||||
|
||||
return {withinHard: hard, withinSoft: soft};
|
||||
}
|
||||
|
||||
async compareWithCandidate(candidate: ImageData, options?: CompareImageOptions) {
|
||||
const {config = this.detectionConfig} = options || {};
|
||||
|
||||
if (config.hash.enable) {
|
||||
await this.compareCandidateHash(candidate, options);
|
||||
}
|
||||
}
|
||||
|
||||
async compareCandidateHash(candidate: ImageData, options?: CompareImageOptions) {
|
||||
const {config = this.detectionConfig} = options || {};
|
||||
|
||||
let compareHash: Required<ImageHashCacheData> | undefined;
|
||||
if (config.hash.ttl !== undefined) {
|
||||
compareHash = await this.resources.getImageHash(candidate);
|
||||
}
|
||||
if (compareHash === undefined) {
|
||||
compareHash = await candidate.hash(config.hash.bits);
|
||||
if (config.hash.ttl !== undefined) {
|
||||
await this.resources.setImageHash(candidate, config.hash.ttl);
|
||||
}
|
||||
} else {
|
||||
candidate.setFromHashCache(compareHash);
|
||||
}
|
||||
|
||||
let diff = await this.compareImageHashes(this.reference, candidate, options);
|
||||
|
||||
let threshRes = this.compareDiffWithThreshold(diff, options);
|
||||
|
||||
if(threshRes.withinSoft !== true && threshRes.withinHard !== true) {
|
||||
// up to this point we rely naively on hashes that were:
|
||||
//
|
||||
// * from cache/db for which we do not have resolutions stored (maybe fix this??)
|
||||
// * hashes generated from PREVIEWS from reddit that should be the same *width*
|
||||
//
|
||||
// we don't have control over how reddit resizes previews or the quality of the previews
|
||||
// so if we don't get a match using our initial naive, but cpu/data lite approach,
|
||||
// then we need to check original sources to see if it's possible there has been resolution/cropping trickery
|
||||
|
||||
if(this.reference.isMaybeCropped(candidate)) {
|
||||
const [normalizedRefSharp, normalizedCandidateSharp, width, height] = await this.reference.normalizeImagesForComparison('pixel', candidate, false);
|
||||
const normalizedRef = new ImageData({width, height, path: this.reference.path});
|
||||
normalizedRef.sharpImg = normalizedRefSharp;
|
||||
const normalizedCandidate = new ImageData({width, height, path: candidate.path});
|
||||
normalizedCandidate.sharpImg = normalizedCandidateSharp;
|
||||
|
||||
const normalDiff = await this.compareImageHashes(normalizedRef, normalizedCandidate, options);
|
||||
let normalizedThreshRes = this.compareDiffWithThreshold(normalDiff, options);
|
||||
}
|
||||
}
|
||||
|
||||
/* // return image if hard is defined and diff is less
|
||||
if (null !== config.hash.hardThreshold && diff <= config.hash.hardThreshold) {
|
||||
return x;
|
||||
}
|
||||
// hard is either not defined or diff was greater than hard
|
||||
|
||||
// if soft is defined
|
||||
if (config.hash.softThreshold !== undefined) {
|
||||
// and diff is greater than soft allowance
|
||||
if (diff > config.hash.softThreshold) {
|
||||
// not similar enough
|
||||
return null;
|
||||
}
|
||||
// similar enough, will continue on to pixel (if enabled!)
|
||||
} else {
|
||||
// only hard was defined and did not pass
|
||||
return null;
|
||||
}*/
|
||||
}
|
||||
|
||||
async compareImageHashes(reference: ImageData, candidate: ImageData, options?: CompareImageOptions) {
|
||||
const {config = this.detectionConfig} = options || {};
|
||||
const {
|
||||
hash: {
|
||||
bits = 16,
|
||||
} = {},
|
||||
} = config;
|
||||
|
||||
let refHash = await reference.hash(bits);
|
||||
let compareHash = await candidate.hash(bits);
|
||||
|
||||
if (compareHash.original.length !== refHash.original.length) {
|
||||
this.logger.warn(`Hash lengths were not the same! Will need to recompute compare hash to match reference.\n\nReference: ${reference.basePath} has is ${refHash.original.length} char long | Comparing: ${candidate.basePath} has is ${compareHash} ${compareHash.original.length} long`);
|
||||
refHash = await reference.hash(bits, true, true);
|
||||
compareHash = await candidate.hash(bits, true, true);
|
||||
}
|
||||
|
||||
let diff: number;
|
||||
const odistance = leven(refHash.original, compareHash.original);
|
||||
diff = (odistance / refHash.original.length) * 100;
|
||||
|
||||
// compare flipped hash if it exists
|
||||
// if it has less difference than normal comparison then the image is probably flipped (or so different it doesn't matter)
|
||||
if (compareHash.flipped !== undefined) {
|
||||
const fdistance = leven(refHash.original, compareHash.flipped);
|
||||
const fdiff = (fdistance / refHash.original.length) * 100;
|
||||
if (fdiff < diff) {
|
||||
diff = fdiff;
|
||||
}
|
||||
}
|
||||
|
||||
return diff;
|
||||
}
|
||||
|
||||
async compareCandidatePixel() {
|
||||
// TODO
|
||||
}
|
||||
|
||||
async compareImagePixels() {
|
||||
// TODO
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,16 +1,17 @@
|
||||
import fetch from "node-fetch";
|
||||
import {Submission} from "snoowrap/dist/objects";
|
||||
import {URL} from "url";
|
||||
import {absPercentDifference, getSharpAsync, isValidImageURL} from "../util";
|
||||
import sizeOf from "image-size";
|
||||
import {absPercentDifference, getExtension, getSharpAsync, isValidImageURL} from "../util";
|
||||
import {Sharp} from "sharp";
|
||||
import {blockhash} from "./blockhash/blockhash";
|
||||
import {SimpleError} from "../Utils/Errors";
|
||||
import {blockhashAndFlipped} from "./blockhash/blockhash";
|
||||
import {CMError, SimpleError} from "../Utils/Errors";
|
||||
import {FileHandle, open} from "fs/promises";
|
||||
import {ImageHashCacheData} from "./Infrastructure/Atomic";
|
||||
|
||||
export interface ImageDataOptions {
|
||||
width?: number,
|
||||
height?: number,
|
||||
url: string,
|
||||
path: URL,
|
||||
variants?: ImageData[]
|
||||
}
|
||||
|
||||
@@ -18,19 +19,20 @@ class ImageData {
|
||||
|
||||
width?: number
|
||||
height?: number
|
||||
url: URL
|
||||
path: URL
|
||||
variants: ImageData[] = []
|
||||
preferredResolution?: [number, number]
|
||||
sharpImg!: Sharp
|
||||
hashResult!: string
|
||||
hashResult?: string
|
||||
hashResultFlipped?: string
|
||||
actualResolution?: [number, number]
|
||||
|
||||
constructor(data: ImageDataOptions, aggressive = false) {
|
||||
this.width = data.width;
|
||||
this.height = data.height;
|
||||
this.url = new URL(data.url);
|
||||
if (!aggressive && !isValidImageURL(`${this.url.origin}${this.url.pathname}`)) {
|
||||
throw new Error('URL did not end with a valid image extension');
|
||||
this.path = data.path;
|
||||
if (!aggressive && !isValidImageURL(`${this.path.origin}${this.path.pathname}`)) {
|
||||
throw new Error('Path did not end with a valid image extension');
|
||||
}
|
||||
this.variants = data.variants || [];
|
||||
}
|
||||
@@ -40,55 +42,90 @@ class ImageData {
|
||||
return await (await this.sharp()).clone().toFormat(format).toBuffer();
|
||||
}
|
||||
|
||||
async hash(bits: number, useVariantIfPossible = true): Promise<string> {
|
||||
if(this.hashResult === undefined) {
|
||||
async hash(bits: number = 16, useVariantIfPossible = true, force = false): Promise<Required<ImageHashCacheData>> {
|
||||
if (force || (this.hashResult === undefined || this.hashResultFlipped === undefined)) {
|
||||
let ref: ImageData | undefined;
|
||||
if(useVariantIfPossible && this.preferredResolution !== undefined) {
|
||||
if (useVariantIfPossible && this.preferredResolution !== undefined) {
|
||||
ref = this.getSimilarResolutionVariant(this.preferredResolution[0], this.preferredResolution[1]);
|
||||
}
|
||||
if(ref === undefined) {
|
||||
if (ref === undefined) {
|
||||
ref = this;
|
||||
}
|
||||
this.hashResult = await blockhash((await ref.sharp()).clone(), bits);
|
||||
const [hash, hashFlipped] = await blockhashAndFlipped((await ref.sharp()).clone(), bits);
|
||||
this.hashResult = hash;
|
||||
this.hashResultFlipped = hashFlipped;
|
||||
}
|
||||
return this.hashResult;
|
||||
return {original: this.hashResult, flipped: this.hashResultFlipped};
|
||||
}
|
||||
|
||||
async sharp(): Promise<Sharp> {
|
||||
if (this.sharpImg === undefined) {
|
||||
let animated = false;
|
||||
let getBuffer: () => Promise<Buffer>;
|
||||
let fileHandle: FileHandle | undefined;
|
||||
try {
|
||||
const response = await fetch(this.url.toString())
|
||||
if (response.ok) {
|
||||
const ct = response.headers.get('Content-Type');
|
||||
if (ct !== null && ct.includes('image')) {
|
||||
const sFunc = await getSharpAsync();
|
||||
// if image is animated then we want to extract the first frame and convert it to a regular image
|
||||
// so we can compare two static images later (also because sharp can't use resize() on animated images)
|
||||
if(['gif','webp'].some(x => ct.includes(x))) {
|
||||
this.sharpImg = await sFunc(await (await sFunc(await response.buffer(), {pages: 1, animated: false})).png().toBuffer());
|
||||
} else {
|
||||
this.sharpImg = await sFunc(await response.buffer());
|
||||
}
|
||||
const meta = await this.sharpImg.metadata();
|
||||
if (this.width === undefined || this.height === undefined) {
|
||||
this.width = meta.width;
|
||||
this.height = meta.height;
|
||||
}
|
||||
this.actualResolution = [meta.width as number, meta.height as number];
|
||||
} else {
|
||||
throw new SimpleError(`Content-Type for fetched URL ${this.url} did not contain "image"`);
|
||||
if (this.path.protocol === 'file:') {
|
||||
try {
|
||||
animated = ['gif', 'webp'].includes(getExtension(this.path.pathname));
|
||||
fileHandle = await open(this.path, 'r');
|
||||
getBuffer = async () => await (fileHandle as FileHandle).readFile();
|
||||
} catch (err: any) {
|
||||
throw new CMError(`Unable to retrieve local file ${this.path.toString()}`, {cause: err});
|
||||
}
|
||||
} else {
|
||||
throw new SimpleError(`URL response was not OK: (${response.status})${response.statusText}`);
|
||||
try {
|
||||
const response = await fetch(this.path.toString())
|
||||
if (response.ok) {
|
||||
const ct = response.headers.get('Content-Type');
|
||||
if (ct !== null && ct.includes('image')) {
|
||||
animated = ['gif', 'webp'].some(x => ct.includes(x));
|
||||
getBuffer = async () => await response.buffer();
|
||||
} else {
|
||||
throw new SimpleError(`Content-Type for fetched URL ${this.path.toString()} did not contain "image"`);
|
||||
}
|
||||
} else {
|
||||
throw new SimpleError(`Fetching ${this.path.toString()} => URL response was not OK: (${response.status})${response.statusText}`);
|
||||
}
|
||||
|
||||
} catch (err: any) {
|
||||
if (!(err instanceof SimpleError)) {
|
||||
throw new CMError(`Error occurred while fetching response from URL ${this.path.toString()}`, {cause: err});
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err: any) {
|
||||
throw new CMError('Unable to fetch image resource', {cause: err, isSerious: false});
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
const sFunc = await getSharpAsync();
|
||||
// if image is animated then we want to extract the first frame and convert it to a regular image
|
||||
// so we can compare two static images later (also because sharp can't use resize() on animated images)
|
||||
if (animated) {
|
||||
this.sharpImg = await sFunc(await (await sFunc(await getBuffer(), {
|
||||
pages: 1,
|
||||
animated: false
|
||||
}).trim().greyscale()).png().withMetadata().toBuffer());
|
||||
} else {
|
||||
this.sharpImg = await sFunc(await sFunc(await getBuffer()).trim().greyscale().withMetadata().toBuffer());
|
||||
}
|
||||
|
||||
if(fileHandle !== undefined) {
|
||||
await fileHandle.close();
|
||||
}
|
||||
|
||||
const meta = await this.sharpImg.metadata();
|
||||
if (this.width === undefined || this.height === undefined) {
|
||||
this.width = meta.width;
|
||||
this.height = meta.height;
|
||||
}
|
||||
this.actualResolution = [meta.width as number, meta.height as number];
|
||||
|
||||
} catch (err: any) {
|
||||
if(!(err instanceof SimpleError)) {
|
||||
throw new Error(`Error occurred while fetching response from URL: ${err.message}`);
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
throw new CMError('Error occurred while converting image buffer to Sharp object', {cause: err});
|
||||
}
|
||||
}
|
||||
return this.sharpImg;
|
||||
@@ -108,8 +145,8 @@ class ImageData {
|
||||
return this.width !== undefined && this.height !== undefined;
|
||||
}
|
||||
|
||||
get baseUrl() {
|
||||
return `${this.url.origin}${this.url.pathname}`;
|
||||
get basePath() {
|
||||
return `${this.path.origin}${this.path.pathname}`;
|
||||
}
|
||||
|
||||
setPreferredResolutionByWidth(prefWidth: number) {
|
||||
@@ -145,6 +182,25 @@ class ImageData {
|
||||
return this.width === otherImage.width && this.height === otherImage.height;
|
||||
}
|
||||
|
||||
isMaybeCropped(otherImage: ImageData, allowDiff = 10): boolean {
|
||||
if (!this.hasDimensions || !otherImage.hasDimensions) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const refWidth = this.width as number;
|
||||
const refHeight = this.height as number;
|
||||
const oWidth = otherImage.width as number;
|
||||
const oHeight = otherImage.height as number;
|
||||
|
||||
const sWidth = refWidth <= oWidth ? refWidth : oWidth;
|
||||
const sHeight = refHeight <= oHeight ? refHeight : oHeight;
|
||||
|
||||
const widthDiff = sWidth / (sWidth === refWidth ? oWidth : refWidth);
|
||||
const heightDiff = sHeight / (sHeight === refHeight ? oHeight : refHeight);
|
||||
|
||||
return widthDiff <= allowDiff || heightDiff <= allowDiff;
|
||||
}
|
||||
|
||||
async sameAspectRatio(otherImage: ImageData) {
|
||||
let thisRes = this.actualResolution;
|
||||
let otherRes = otherImage.actualResolution;
|
||||
@@ -170,12 +226,12 @@ class ImageData {
|
||||
return {width: width as number, height: height as number};
|
||||
}
|
||||
|
||||
async normalizeImagesForComparison(compareLibrary: ('pixel' | 'resemble'), imgToCompare: ImageData): Promise<[Sharp, Sharp, number, number]> {
|
||||
async normalizeImagesForComparison(compareLibrary: ('pixel' | 'resemble'), imgToCompare: ImageData, usePreferredResolution = true): Promise<[Sharp, Sharp, number, number]> {
|
||||
const sFunc = await getSharpAsync();
|
||||
|
||||
let refImage = this as ImageData;
|
||||
let compareImage = imgToCompare;
|
||||
if (this.preferredResolution !== undefined) {
|
||||
if (usePreferredResolution && this.preferredResolution !== undefined) {
|
||||
const matchingVariant = compareImage.getSimilarResolutionVariant(this.preferredResolution[0], this.preferredResolution[1]);
|
||||
if (matchingVariant !== undefined) {
|
||||
compareImage = matchingVariant;
|
||||
@@ -226,10 +282,23 @@ class ImageData {
|
||||
return [refSharp, compareSharp, width, height];
|
||||
}
|
||||
|
||||
toHashCache(): ImageHashCacheData {
|
||||
return {
|
||||
original: this.hashResult,
|
||||
flipped: this.hashResultFlipped
|
||||
}
|
||||
}
|
||||
|
||||
setFromHashCache(data: ImageHashCacheData) {
|
||||
const {original, flipped} = data;
|
||||
this.hashResult = original;
|
||||
this.hashResultFlipped = flipped;
|
||||
}
|
||||
|
||||
static fromSubmission(sub: Submission, aggressive = false): ImageData {
|
||||
const url = new URL(sub.url);
|
||||
const data: any = {
|
||||
url,
|
||||
path: url,
|
||||
};
|
||||
let variants = [];
|
||||
if (sub.preview !== undefined && sub.preview.enabled && sub.preview.images.length > 0) {
|
||||
@@ -238,7 +307,7 @@ class ImageData {
|
||||
data.width = ref.width;
|
||||
data.height = ref.height;
|
||||
|
||||
variants = firstImg.resolutions.map(x => new ImageData(x));
|
||||
variants = firstImg.resolutions.map(x => new ImageData({...x, path: new URL(x.url)}));
|
||||
data.variants = variants;
|
||||
}
|
||||
return new ImageData(data, aggressive);
|
||||
|
||||
115
src/Common/Influx/InfluxClient.ts
Normal file
115
src/Common/Influx/InfluxClient.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import {InfluxConfig} from "./interfaces";
|
||||
import {InfluxDB, Point, WriteApi, setLogger} from "@influxdata/influxdb-client";
|
||||
import {HealthAPI} from "@influxdata/influxdb-client-apis";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {Logger} from "winston";
|
||||
import {mergeArr} from "../../util";
|
||||
import {CMError} from "../../Utils/Errors";
|
||||
|
||||
export interface InfluxClientConfig extends InfluxConfig {
|
||||
client?: InfluxDB
|
||||
ready?: boolean
|
||||
}
|
||||
|
||||
export class InfluxClient {
|
||||
config: InfluxConfig;
|
||||
client: InfluxDB;
|
||||
write: WriteApi;
|
||||
health: HealthAPI;
|
||||
|
||||
tags: Record<string, string>;
|
||||
|
||||
logger: Logger;
|
||||
|
||||
ready: boolean;
|
||||
lastReadyAttempt: Dayjs | undefined;
|
||||
|
||||
constructor(config: InfluxClientConfig, logger: Logger, tags: Record<string, string> = {}) {
|
||||
|
||||
const {client, ready = false, ...rest} = config;
|
||||
|
||||
this.logger = logger.child({
|
||||
labels: ['Influx']
|
||||
}, mergeArr);
|
||||
|
||||
this.config = rest;
|
||||
this.ready = ready;
|
||||
if(client !== undefined) {
|
||||
this.client = client;
|
||||
} else {
|
||||
this.client = InfluxClient.createClient(this.config);
|
||||
setLogger(this.logger);
|
||||
}
|
||||
this.write = this.client.getWriteApi(config.credentials.org, config.credentials.bucket, 'ms');
|
||||
this.tags = tags;
|
||||
this.write.useDefaultTags(tags);
|
||||
this.health = new HealthAPI(this.client);
|
||||
}
|
||||
|
||||
async isReady() {
|
||||
if (this.ready) {
|
||||
return true;
|
||||
}
|
||||
if (this.lastReadyAttempt === undefined || dayjs().diff(this.lastReadyAttempt, 's') >= 10) {
|
||||
if (!(await this.testConnection())) {
|
||||
this.logger.warn('Influx endpoint is not ready');
|
||||
} else {
|
||||
this.ready = true;
|
||||
}
|
||||
} else {
|
||||
this.logger.debug(`Influx endpoint testing throttled. Waiting another ${10 - dayjs().diff(this.lastReadyAttempt, 's')} seconds`);
|
||||
}
|
||||
return this.ready;
|
||||
}
|
||||
|
||||
async testConnection() {
|
||||
try {
|
||||
const result = await this.health.getHealth();
|
||||
if (result.status === 'fail') {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} catch (e: any) {
|
||||
this.logger.error(new CMError(`Testing health of Influx endpoint failed`, {cause: e, isSerious: false}));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async writePoint(data: Point | Point[]) {
|
||||
if (await this.isReady()) {
|
||||
if (Array.isArray(data)) {
|
||||
this.write.writePoints(data);
|
||||
} else {
|
||||
this.write.writePoint(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async flush() {
|
||||
if (await this.isReady()) {
|
||||
try {
|
||||
await this.write.flush(true);
|
||||
} catch (e: any) {
|
||||
this.logger.error(new CMError('Failed to flush data to Influx', {cause: e}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static createClient(config: InfluxConfig): InfluxDB {
|
||||
return new InfluxDB({
|
||||
url: config.credentials.url,
|
||||
token: config.credentials.token,
|
||||
writeOptions: {
|
||||
defaultTags: config.defaultTags
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
childClient(logger: Logger, tags: Record<string, string> = {}) {
|
||||
return new InfluxClient({
|
||||
...this.config,
|
||||
client: this.client,
|
||||
ready: this.ready
|
||||
}, logger, {...this.tags, ...tags});
|
||||
}
|
||||
}
|
||||
13
src/Common/Influx/interfaces.ts
Normal file
13
src/Common/Influx/interfaces.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import {InfluxDB, WriteApi} from "@influxdata/influxdb-client/dist";
|
||||
|
||||
export interface InfluxConfig {
|
||||
credentials: InfluxCredentials
|
||||
defaultTags?: Record<string, string>
|
||||
}
|
||||
|
||||
export interface InfluxCredentials {
|
||||
url: string
|
||||
token: string
|
||||
org: string
|
||||
bucket: string
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
import {StructuredRunnableBase} from "./Runnable";
|
||||
import {ActionJson} from "../types";
|
||||
import {IncludesData} from "./Includes";
|
||||
|
||||
export type ActionObjectJson = Exclude<ActionJson, string>;
|
||||
export type StructuredActionObjectJson = Omit<ActionObjectJson, 'authorIs' | 'itemIs'> & StructuredRunnableBase
|
||||
export type ActionConfigData = ActionJson;
|
||||
export type ActionConfigHydratedData = Exclude<ActionConfigData, IncludesData>;
|
||||
export type ActionConfigObject = Exclude<ActionConfigHydratedData, string>;
|
||||
export type StructuredActionObjectJson = Omit<ActionConfigObject, 'authorIs' | 'itemIs'> & StructuredRunnableBase
|
||||
|
||||
@@ -185,4 +185,100 @@ export type ActionTypes =
|
||||
| 'userflair'
|
||||
| 'dispatch'
|
||||
| 'cancelDispatch'
|
||||
| 'contributor';
|
||||
| 'contributor'
|
||||
| 'modnote';
|
||||
|
||||
/**
|
||||
* Test the calculated VADER sentiment (compound) score for an Activity using this comparison. Can be either a numerical or natural language
|
||||
*
|
||||
* Sentiment values range from extremely negative to extremely positive in a numerical range of -1 to +1:
|
||||
*
|
||||
* * -0.6 => extremely negative
|
||||
* * -0.3 => very negative
|
||||
* * -0.1 => negative
|
||||
* * 0 => neutral
|
||||
* * 0.1 => positive
|
||||
* * 0.3 => very positive
|
||||
* * 0.6 => extremely positive
|
||||
*
|
||||
* The below examples are all equivocal. You can use either set of values as the value for `sentiment` (numerical comparisons or natural langauge)
|
||||
*
|
||||
* * `>= 0.1` = `is positive`
|
||||
* * `<= 0.3` = `is very negative`
|
||||
* * `< 0.1` = `is not positive`
|
||||
* * `> -0.3` = `is not very negative`
|
||||
*
|
||||
* Special case:
|
||||
*
|
||||
* * `is neutral` equates to `> -0.1 and < 0.1`
|
||||
* * `is not neutral` equates to `< -0.1 or > 0.1`
|
||||
*
|
||||
* ContextMod uses a normalized, weighted average from these sentiment tools:
|
||||
*
|
||||
* * NLP.js (english, french, german, and spanish) https://github.com/axa-group/nlp.js/blob/master/docs/v3/sentiment-analysis.md
|
||||
* * (english only) vaderSentiment-js https://github.com/vaderSentiment/vaderSentiment-js/
|
||||
* * (english only) wink-sentiment https://github.com/winkjs/wink-sentiment
|
||||
*
|
||||
* More about the sentiment algorithms used:
|
||||
* * VADER https://github.com/cjhutto/vaderSentiment
|
||||
* * AFINN http://corpustext.com/reference/sentiment_afinn.html
|
||||
* * Senticon https://ieeexplore.ieee.org/document/8721408
|
||||
* * Pattern https://github.com/clips/pattern
|
||||
* * wink https://github.com/winkjs/wink-sentiment
|
||||
*
|
||||
* @pattern ((>|>=|<|<=)\s*(-?\d?\.?\d+))|((not)?\s*(very|extremely)?\s*(positive|neutral|negative))
|
||||
* @examples ["is negative", "> 0.2"]
|
||||
* */
|
||||
export type VaderSentimentComparison = string;
|
||||
|
||||
export type ModUserNoteLabel =
|
||||
'BOT_BAN'
|
||||
| 'PERMA_BAN'
|
||||
| 'BAN'
|
||||
| 'ABUSE_WARNING'
|
||||
| 'SPAM_WARNING'
|
||||
| 'SPAM_WATCH'
|
||||
| 'SOLID_CONTRIBUTOR'
|
||||
| 'HELPFUL_USER';
|
||||
|
||||
export const modUserNoteLabels = ['BOT_BAN', 'PERMA_BAN', 'BAN', 'ABUSE_WARNING', 'SPAM_WARNING', 'SPAM_WATCH', 'SOLID_CONTRIBUTOR', 'HELPFUL_USER'];
|
||||
|
||||
export type ModActionType =
|
||||
'INVITE' |
|
||||
'NOTE' |
|
||||
'REMOVAL' |
|
||||
'SPAM' |
|
||||
'APPROVAL';
|
||||
|
||||
export type UserNoteType =
|
||||
'gooduser' |
|
||||
'spamwatch' |
|
||||
'spamwarn' |
|
||||
'abusewarn' |
|
||||
'ban' |
|
||||
'permban' |
|
||||
'botban' |
|
||||
string;
|
||||
|
||||
export const userNoteTypes = ['gooduser', 'spamwatch', 'spamwarn', 'abusewarn', 'ban', 'permban', 'botban'];
|
||||
|
||||
export type ConfigFragmentValidationFunc = (data: object, fetched: boolean) => boolean;
|
||||
|
||||
export interface WikiContext {
|
||||
wiki: string
|
||||
subreddit?: string
|
||||
}
|
||||
|
||||
export interface ExternalUrlContext {
|
||||
url: string
|
||||
}
|
||||
|
||||
export interface UrlContext {
|
||||
value: string
|
||||
context: WikiContext | ExternalUrlContext
|
||||
}
|
||||
|
||||
export interface ImageHashCacheData {
|
||||
original?: string
|
||||
flipped?: string
|
||||
}
|
||||
|
||||
@@ -1,15 +1,212 @@
|
||||
import {StringOperator} from "./Atomic";
|
||||
import {Duration} from "dayjs/plugin/duration";
|
||||
import InvalidRegexError from "../../Utils/InvalidRegexError";
|
||||
import dayjs, {Dayjs, OpUnitType} from "dayjs";
|
||||
import {CMError, SimpleError} from "../../Utils/Errors";
|
||||
import {escapeRegex, parseDuration, parseDurationFromString, parseStringToRegex} from "../../util";
|
||||
import {ReportType} from "./Reddit";
|
||||
|
||||
export interface DurationComparison {
|
||||
operator: StringOperator,
|
||||
duration: Duration
|
||||
}
|
||||
|
||||
export interface GenericComparison {
|
||||
export interface GenericComparison extends HasDisplayText {
|
||||
operator: StringOperator,
|
||||
value: number,
|
||||
isPercent: boolean,
|
||||
extra?: string,
|
||||
groups?: Record<string, string>
|
||||
displayText: string,
|
||||
duration?: Duration
|
||||
durationText?: string
|
||||
}
|
||||
|
||||
export interface HasDisplayText {
|
||||
displayText: string
|
||||
}
|
||||
|
||||
export interface RangedComparison extends HasDisplayText {
|
||||
range: [number, number]
|
||||
not: boolean
|
||||
}
|
||||
|
||||
export const asGenericComparison = (val: any): val is GenericComparison => {
|
||||
return typeof val === 'object' && 'value' in val;
|
||||
}
|
||||
|
||||
export const GENERIC_VALUE_COMPARISON = /^\s*(?<opStr>>|>=|<|<=)\s*(?<value>-?\d?\.?\d+)(?<extra>\s+.*)*$/
|
||||
export const GENERIC_VALUE_COMPARISON_URL = 'https://regexr.com/60dq4';
|
||||
export const parseGenericValueComparison = (val: string, options?: {
|
||||
requireDuration?: boolean,
|
||||
reg?: RegExp
|
||||
}): GenericComparison => {
|
||||
|
||||
const {
|
||||
requireDuration = false,
|
||||
reg = GENERIC_VALUE_COMPARISON,
|
||||
} = options || {};
|
||||
|
||||
const matches = val.match(reg);
|
||||
|
||||
if (matches === null) {
|
||||
throw new InvalidRegexError(reg, val)
|
||||
}
|
||||
|
||||
const groups = matches.groups as any;
|
||||
|
||||
let duration: Duration | undefined;
|
||||
let durationText: string | undefined;
|
||||
|
||||
try {
|
||||
const durationResult = parseDurationFromString(val, false);
|
||||
if(durationResult.length > 1) {
|
||||
throw new SimpleError(`Must only have one Duration value, found ${durationResult.length} in: ${val}`);
|
||||
}
|
||||
duration = durationResult[0].duration;
|
||||
durationText = durationResult[0].original;
|
||||
} catch (e) {
|
||||
// if it returns an invalid regex just means they didn't
|
||||
if (requireDuration || !(e instanceof InvalidRegexError)) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
const displayParts = [`${groups.opStr} ${groups.value}`];
|
||||
const hasPercent = typeof groups.percent === 'string' && groups.percent.trim() !== '';
|
||||
if(hasPercent) {
|
||||
displayParts.push('%');
|
||||
}
|
||||
|
||||
const {
|
||||
opStr,
|
||||
value,
|
||||
percent,
|
||||
extra,
|
||||
...rest
|
||||
} = matches.groups || {};
|
||||
|
||||
const extraGroups: Record<string,string> = {};
|
||||
let hasExtraGroups = false;
|
||||
|
||||
for(const [k,v] of Object.entries(rest)) {
|
||||
if(typeof v === 'string' && v.trim() !== '') {
|
||||
extraGroups[k] = v;
|
||||
hasExtraGroups = true;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
operator: groups.opStr as StringOperator,
|
||||
value: Number.parseFloat(groups.value),
|
||||
isPercent: hasPercent,
|
||||
extra: groups.extra,
|
||||
groups: hasExtraGroups ? extraGroups : undefined,
|
||||
displayText: displayParts.join(''),
|
||||
duration,
|
||||
durationText,
|
||||
}
|
||||
}
|
||||
const GENERIC_VALUE_PERCENT_COMPARISON = /^\s*(?<opStr>>|>=|<|<=)\s*(?<value>\d+)\s*(?<percent>%)?(?<extra>.*)$/
|
||||
const GENERIC_VALUE_PERCENT_COMPARISON_URL = 'https://regexr.com/60a16';
|
||||
export const parseGenericValueOrPercentComparison = (val: string, options?: {requireDuration: boolean}): GenericComparison => {
|
||||
return parseGenericValueComparison(val, {...(options ?? {}), reg: GENERIC_VALUE_PERCENT_COMPARISON});
|
||||
}
|
||||
/**
|
||||
* Named groups: operator, time, unit
|
||||
* */
|
||||
const DURATION_COMPARISON_REGEX: RegExp = /^\s*(?<opStr>>|>=|<|<=)\s*(?<time>\d+)\s*(?<unit>days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)\s*$/;
|
||||
const DURATION_COMPARISON_REGEX_URL = 'https://regexr.com/609n8';
|
||||
export const parseDurationComparison = (val: string): DurationComparison => {
|
||||
const result = parseGenericValueComparison(val, {requireDuration: true});
|
||||
if(result.isPercent) {
|
||||
throw new InvalidRegexError(DURATION_COMPARISON_REGEX, val, DURATION_COMPARISON_REGEX_URL, 'Duration comparison value cannot be a percentage');
|
||||
}
|
||||
if(result.value < 0) {
|
||||
throw new InvalidRegexError(DURATION_COMPARISON_REGEX, val, DURATION_COMPARISON_REGEX_URL,'Duration value cannot be negative');
|
||||
}
|
||||
return {
|
||||
operator: result.operator as StringOperator,
|
||||
duration: result.duration as Duration
|
||||
}
|
||||
}
|
||||
export const dateComparisonTextOp = (val1: Dayjs, strOp: StringOperator, val2: Dayjs, granularity?: OpUnitType): boolean => {
|
||||
switch (strOp) {
|
||||
case '>':
|
||||
return val1.isBefore(val2, granularity);
|
||||
case '>=':
|
||||
return val1.isSameOrBefore(val2, granularity);
|
||||
case '<':
|
||||
return val1.isAfter(val2, granularity);
|
||||
case '<=':
|
||||
return val1.isSameOrAfter(val2, granularity);
|
||||
default:
|
||||
throw new Error(`${strOp} was not a recognized operator`);
|
||||
}
|
||||
}
|
||||
export const compareDurationValue = (comp: DurationComparison, date: Dayjs) => {
|
||||
const dateToCompare = dayjs().subtract(comp.duration.asSeconds(), 'seconds');
|
||||
return dateComparisonTextOp(date, comp.operator, dateToCompare);
|
||||
}
|
||||
export const comparisonTextOp = (val1: number, strOp: string, val2: number): boolean => {
|
||||
switch (strOp) {
|
||||
case '>':
|
||||
return val1 > val2;
|
||||
case '>=':
|
||||
return val1 >= val2;
|
||||
case '<':
|
||||
return val1 < val2;
|
||||
case '<=':
|
||||
return val1 <= val2;
|
||||
default:
|
||||
throw new Error(`${strOp} was not a recognized operator`);
|
||||
}
|
||||
}
|
||||
|
||||
export interface ReportComparison extends Omit<GenericComparison, 'groups'> {
|
||||
reportType?: ReportType
|
||||
reasonRegex?: RegExp
|
||||
reasonMatch?: string
|
||||
}
|
||||
|
||||
const REPORT_COMPARISON = /^\s*(?<opStr>>|>=|<|<=)\s*(?<value>\d+)(?<percent>\s*%)?(?:\s+(?<reportType>mods?|users?))?(?:\s+(?<reasonMatch>["'].*["']|\/.*\/))?.*(?<time>\d+)?\s*(?<unit>days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\s*$/i
|
||||
const REPORT_REASON_LITERAL = /["'](.*)["']/i
|
||||
export const parseReportComparison = (str: string): ReportComparison => {
|
||||
const generic = parseGenericValueComparison(str, {reg: REPORT_COMPARISON});
|
||||
|
||||
|
||||
const {
|
||||
groups: {
|
||||
reportType,
|
||||
reasonMatch
|
||||
} = {},
|
||||
...rest
|
||||
} = generic;
|
||||
|
||||
const result: ReportComparison = {...rest, reasonMatch};
|
||||
|
||||
if(reportType !== undefined) {
|
||||
if(reportType.toLocaleLowerCase().includes('mod')) {
|
||||
result.reportType = 'mod' as ReportType;
|
||||
} else if (reportType.toLocaleLowerCase().includes('user')) {
|
||||
result.reportType = 'user' as ReportType;
|
||||
}
|
||||
}
|
||||
if(reasonMatch !== undefined) {
|
||||
const literalMatch = reasonMatch.match(REPORT_REASON_LITERAL);
|
||||
if(literalMatch !== null) {
|
||||
const cleanLiteralMatch = `/.*${escapeRegex(literalMatch[1].trim())}.*/`;
|
||||
result.reasonRegex = parseStringToRegex(cleanLiteralMatch, 'i');
|
||||
if(result.reasonRegex === undefined) {
|
||||
throw new CMError(`Could not convert reason match value to Regex: ${cleanLiteralMatch}`, {isSerious: false})
|
||||
}
|
||||
} else {
|
||||
result.reasonRegex = parseStringToRegex(reasonMatch, 'i');
|
||||
if(result.reasonRegex === undefined) {
|
||||
throw new CMError(`Could not convert reason match value to Regex: ${reasonMatch}`, {isSerious: false})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,14 @@
|
||||
import {CompareValue, CompareValueOrPercent, DurationComparor, ModeratorNameCriteria, ModeratorNames} from "../Atomic";
|
||||
import {
|
||||
CompareValue,
|
||||
CompareValueOrPercent,
|
||||
DurationComparor,
|
||||
ModeratorNameCriteria,
|
||||
ModeratorNames, ModActionType,
|
||||
ModUserNoteLabel
|
||||
} from "../Atomic";
|
||||
import {ActivityType} from "../Reddit";
|
||||
import {GenericComparison, parseGenericValueComparison} from "../Comparisons";
|
||||
import {parseStringToRegexOrLiteralSearch} from "../../../util";
|
||||
|
||||
/**
|
||||
* Different attributes a `Subreddit` can be in. Only include a property if you want to check it.
|
||||
@@ -55,42 +65,40 @@ export const defaultStrongSubredditCriteriaOptions = {
|
||||
|
||||
export type FilterCriteriaDefaultBehavior = 'replace' | 'merge';
|
||||
|
||||
export interface UserNoteCriteria {
|
||||
/**
|
||||
* User Note type key to search for
|
||||
* @examples ["spamwarn"]
|
||||
* */
|
||||
type: string;
|
||||
export interface UserSubredditHistoryCriteria {
|
||||
/**
|
||||
* Number of occurrences of this type. Ignored if `search` is `current`
|
||||
*
|
||||
* A string containing a comparison operator and/or a value to compare number of occurrences against
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign] [ascending|descending]`
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`
|
||||
*
|
||||
* If `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`
|
||||
*
|
||||
* @examples [">= 1"]
|
||||
* @default ">= 1"
|
||||
* @pattern ^\s*(?<opStr>>|>=|<|<=)\s*(?<value>\d+)\s*(?<percent>%?)\s*(?<extra>asc.*|desc.*)*$
|
||||
* @pattern ^\s*(?<opStr>>|>=|<|<=)\s*(?<value>\d+)\s*(?<percent>%?)\s*(?<duration>in\s+\d+\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\s*(?<extra>asc.*|desc.*)*$
|
||||
* */
|
||||
count?: string;
|
||||
|
||||
/**
|
||||
* How to test the notes for this Author:
|
||||
* How to test the Toolbox Notes or Mod Actions for this Author:
|
||||
*
|
||||
* ### current
|
||||
*
|
||||
* Only the most recent note is checked for `type`
|
||||
* Only the most recent note is checked for criteria
|
||||
*
|
||||
* ### total
|
||||
*
|
||||
* The `count` comparison of `type` must be found within all notes
|
||||
* `count` comparison of mod actions/notes must be found within all history
|
||||
*
|
||||
* * EX `count: > 3` => Must have more than 3 notes of `type`, total
|
||||
* * EX `count: <= 25%` => Must have 25% or less of notes of `type`, total
|
||||
* * EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week
|
||||
*
|
||||
* ### consecutive
|
||||
*
|
||||
* The `count` **number** of `type` notes must be found in a row.
|
||||
* The `count` **number** of mod actions/notes must be found in a row.
|
||||
*
|
||||
* You may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`
|
||||
*
|
||||
@@ -104,7 +112,126 @@ export interface UserNoteCriteria {
|
||||
search?: 'current' | 'consecutive' | 'total'
|
||||
}
|
||||
|
||||
export const authorCriteriaProperties = ['name', 'flairCssClass', 'flairText', 'flairTemplate', 'isMod', 'userNotes', 'age', 'linkKarma', 'commentKarma', 'totalKarma', 'verified', 'shadowBanned', 'description', 'isContributor'];
|
||||
export interface UserNoteCriteria extends UserSubredditHistoryCriteria {
|
||||
/**
|
||||
* User Note type key to search for
|
||||
* @examples ["spamwarn"]
|
||||
* */
|
||||
type: string;
|
||||
}
|
||||
|
||||
export interface ModActionCriteria extends UserSubredditHistoryCriteria {
|
||||
type?: ModActionType | ModActionType[]
|
||||
activityType?: ActivityType | ActivityType[]
|
||||
}
|
||||
|
||||
export interface FullModActionCriteria extends Omit<ModActionCriteria, 'count'> {
|
||||
type?: ModActionType[]
|
||||
count?: GenericComparison
|
||||
activityType?: ActivityType[]
|
||||
}
|
||||
|
||||
export interface ModNoteCriteria extends ModActionCriteria {
|
||||
noteType?: ModUserNoteLabel | ModUserNoteLabel[]
|
||||
note?: string | string[]
|
||||
}
|
||||
|
||||
export interface FullModNoteCriteria extends FullModActionCriteria, Omit<ModNoteCriteria, 'note' | 'count' | 'type' | 'activityType'> {
|
||||
noteType?: ModUserNoteLabel[]
|
||||
note?: RegExp[]
|
||||
}
|
||||
|
||||
const arrayableModNoteProps = ['activityType','noteType','note'];
|
||||
|
||||
export const asModNoteCriteria = (val: any): val is ModNoteCriteria => {
|
||||
return val !== null && typeof val === 'object' && ('noteType' in val || 'note' in val);
|
||||
}
|
||||
|
||||
export const toFullModNoteCriteria = (val: ModNoteCriteria): FullModNoteCriteria => {
|
||||
|
||||
const result = Object.entries(val).reduce((acc: FullModNoteCriteria, curr) => {
|
||||
const [k,v] = curr;
|
||||
|
||||
if(v === undefined) {
|
||||
return acc;
|
||||
}
|
||||
|
||||
const rawVal = arrayableModNoteProps.includes(k) && !Array.isArray(v) ? [v] : v;
|
||||
|
||||
switch(k) {
|
||||
case 'search':
|
||||
acc.search = rawVal;
|
||||
break;
|
||||
case 'count':
|
||||
acc.count = parseGenericValueComparison(rawVal);
|
||||
break;
|
||||
case 'activityType':
|
||||
case 'noteType':
|
||||
acc[k] = rawVal;
|
||||
break;
|
||||
case 'note':
|
||||
acc[k] = rawVal.map((x: string) => parseStringToRegexOrLiteralSearch(x))
|
||||
}
|
||||
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
result.type = ['NOTE'];
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
export interface ModLogCriteria extends ModActionCriteria {
|
||||
action?: string | string[]
|
||||
details?: string | string[]
|
||||
description?: string | string[]
|
||||
}
|
||||
|
||||
export interface FullModLogCriteria extends FullModActionCriteria, Omit<ModLogCriteria, 'action' | 'details' | 'description' | 'count' | 'type' | 'activityType'> {
|
||||
action?: RegExp[]
|
||||
details?: RegExp[]
|
||||
description?: RegExp[]
|
||||
}
|
||||
|
||||
const arrayableModLogProps = ['type','activityType','action','description','details', 'type'];
|
||||
|
||||
export const asModLogCriteria = (val: any): val is ModLogCriteria => {
|
||||
return val !== null && typeof val === 'object' && !asModNoteCriteria(val) && ('action' in val || 'details' in val || 'description' in val || 'activityType' in val || 'search' in val || 'count' in val || 'type' in val);
|
||||
}
|
||||
|
||||
export const toFullModLogCriteria = (val: ModLogCriteria): FullModLogCriteria => {
|
||||
|
||||
return Object.entries(val).reduce((acc: FullModLogCriteria, curr) => {
|
||||
const [k,v] = curr;
|
||||
|
||||
if(v === undefined) {
|
||||
return acc;
|
||||
}
|
||||
|
||||
const rawVal = arrayableModLogProps.includes(k) && !Array.isArray(v) ? [v] : v;
|
||||
|
||||
switch(k) {
|
||||
case 'search':
|
||||
acc.search = rawVal;
|
||||
break;
|
||||
case 'count':
|
||||
acc.count = parseGenericValueComparison(rawVal);
|
||||
break;
|
||||
case 'activityType':
|
||||
case 'type':
|
||||
acc[k as keyof FullModLogCriteria] = rawVal;
|
||||
break;
|
||||
case 'action':
|
||||
case 'description':
|
||||
case 'details':
|
||||
acc[k as keyof FullModLogCriteria] = rawVal.map((x: string) => parseStringToRegexOrLiteralSearch(x))
|
||||
}
|
||||
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
|
||||
export const authorCriteriaProperties = ['name', 'flairCssClass', 'flairText', 'flairTemplate', 'isMod', 'userNotes', 'modActions', 'age', 'linkKarma', 'commentKarma', 'totalKarma', 'verified', 'shadowBanned', 'description', 'isContributor'];
|
||||
|
||||
/**
|
||||
* Criteria with which to test against the author of an Activity. The outcome of the test is based on:
|
||||
@@ -159,6 +286,8 @@ export interface AuthorCriteria {
|
||||
* */
|
||||
userNotes?: UserNoteCriteria[]
|
||||
|
||||
modActions?: (ModNoteCriteria | ModLogCriteria)[]
|
||||
|
||||
/**
|
||||
* Test the age of the Author's account (when it was created) against this comparison
|
||||
*
|
||||
@@ -228,7 +357,35 @@ export interface AuthorCriteria {
|
||||
* Is the author an approved user (contributor)?
|
||||
* */
|
||||
isContributor?: boolean
|
||||
} // properties calculated/derived by CM -- not provided as plain values by reddit
|
||||
}
|
||||
|
||||
/**
|
||||
* When testing AuthorCriteria test properties in order of likelihood to require an API call to complete
|
||||
* */
|
||||
export const orderedAuthorCriteriaProps: (keyof AuthorCriteria)[] = [
|
||||
'name', // never needs an api call, returned/cached with activity info
|
||||
// none of these normally need api calls unless activity is a skeleton generated by CM (not normal)
|
||||
// all are part of cached activity data
|
||||
'flairCssClass',
|
||||
'flairText',
|
||||
'flairTemplate',
|
||||
// usernotes are cached longer than author by default (5 min vs 60 seconds)
|
||||
'userNotes',
|
||||
// requires fetching/getting cached author.
|
||||
// If fetching and user is shadowbanned none of the individual author data below will be retrievable either so always do this first
|
||||
'shadowBanned',
|
||||
// individual props require fetching/getting cached
|
||||
'age',
|
||||
'linkKarma',
|
||||
'commentKarma',
|
||||
'totalKarma',
|
||||
'verified',
|
||||
'description',
|
||||
'isMod', // requires fetching mods for subreddit
|
||||
'isContributor', // requires fetching contributors for subreddit
|
||||
'modActions', // requires fetching mod notes/actions for author (shortest cache TTL)
|
||||
];
|
||||
|
||||
export interface ActivityState {
|
||||
/**
|
||||
* * true/false => test whether Activity is removed or not
|
||||
@@ -248,20 +405,40 @@ export interface ActivityState {
|
||||
approved?: boolean | ModeratorNames | ModeratorNames[] | ModeratorNameCriteria
|
||||
score?: CompareValue
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare against
|
||||
* A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>`
|
||||
* The syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`
|
||||
*
|
||||
* If only comparison and number is given then defaults to TOTAL reports on an Activity.
|
||||
*
|
||||
* * EX `> 2` => greater than 2 total reports
|
||||
*
|
||||
* Defaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:
|
||||
* Type (optional) determines which type of reports to look at:
|
||||
*
|
||||
* * EX `> 3 mod` => greater than 3 mod reports
|
||||
* * EX `>= 1 user` => greater than 1 user report
|
||||
* * `mod` -- mod reports
|
||||
* * EX `> 3 mod` => greater than 3 mod reports
|
||||
* * `user` -- user reports
|
||||
* * EX `> 3 user` => greater than 3 user reports
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* Report reason qualifiers can be:
|
||||
*
|
||||
* * enclosed double or single quotes -- report reason contains
|
||||
* * EX `> 1 "misinformation" => greater than 1 report with reason containing "misinformation"
|
||||
* * enclosed in backslashes -- match regex
|
||||
* * EX `> 1 \harassment towards .*\` => greater than 1 report with reason matching regex \harassment towards .*\
|
||||
*
|
||||
* Type and reason qualifiers can be used together:
|
||||
*
|
||||
* EX `> 2 user "misinformation" => greater than 2 user reports with reasons containing "misinformation"
|
||||
*
|
||||
* The time constraint filter reports created between NOW and [timeConstraint] in the past:
|
||||
*
|
||||
* * `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago
|
||||
* * `> 2 user "misinformation" in 2 hours` => more than 2 user reports containing "misinformation" created between NOW and 2 hours ago
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)(\s*%)?(\s+(?:mods?|users?))?(\s+(?:["'].*["']|\/.*\/))?.*(\d+)?\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\s*$
|
||||
* */
|
||||
reports?: CompareValue
|
||||
reports?: string
|
||||
age?: DurationComparor
|
||||
/**
|
||||
* Test whether the activity is present in dispatched/delayed activities
|
||||
@@ -277,7 +454,7 @@ export interface ActivityState {
|
||||
dispatched?: boolean | string | string[]
|
||||
|
||||
|
||||
// can use ActivitySource | ActivitySource[] here because of issues with generating json schema, see ActivitySource comments
|
||||
// cant use ActivitySource | ActivitySource[] here because of issues with generating json schema, see ActivitySource comments
|
||||
/**
|
||||
* Test where the current activity was sourced from.
|
||||
*
|
||||
@@ -330,6 +507,18 @@ export interface SubmissionState extends ActivityState {
|
||||
* Is the submission a reddit-hosted image or video?
|
||||
* */
|
||||
isRedditMediaDomain?: boolean
|
||||
|
||||
/**
|
||||
* Compare the upvote ratio for this Submission, expressed as a whole number
|
||||
*
|
||||
* Can be either a comparison string or a number. If a number then CM assumes upvote ratio must be greater than or equal to this.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* * `< 90` => less than 90% upvoted
|
||||
* * 45 => greater than or equal to 45% upvoted
|
||||
* */
|
||||
upvoteRatio?: number | CompareValue
|
||||
}
|
||||
|
||||
export const cmActivityProperties = ['submissionState', 'score', 'reports', 'removed', 'deleted', 'filtered', 'age', 'title'];
|
||||
|
||||
45
src/Common/Infrastructure/Includes.ts
Normal file
45
src/Common/Infrastructure/Includes.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import {ConfigFormat} from "./Atomic";
|
||||
|
||||
export interface IncludesData {
|
||||
/**
|
||||
* The special-form path to the config fragment to retrieve.
|
||||
*
|
||||
* If value starts with `wiki:` then the proceeding value will be used to get a wiki page from the current subreddit
|
||||
*
|
||||
* * EX `wiki:botconfig/mybot` tries to get `https://reddit.com/r/currentSubreddit/wiki/botconfig/mybot`
|
||||
*
|
||||
* If the value starts with `wiki:` and ends with `|someValue` then `someValue` will be used as the base subreddit for the wiki page
|
||||
*
|
||||
* * EX `wiki:replytemplates/test|ContextModBot` tries to get `https://reddit.com/r/ContextModBot/wiki/replytemplates/test`
|
||||
*
|
||||
* If the value starts with `url:` then the value is fetched as an external url and expects raw text returned
|
||||
*
|
||||
* * EX `url:https://pastebin.com/raw/38qfL7mL` tries to get the text response of `https://pastebin.com/raw/38qfL7mL`
|
||||
* */
|
||||
path: string
|
||||
/**
|
||||
* An unused hint about the content type. Not implemented yet
|
||||
* */
|
||||
type?: ConfigFormat
|
||||
/**
|
||||
* Control caching for the config fragment.
|
||||
*
|
||||
* If not specified the value for `wikiTTL` will be used
|
||||
*
|
||||
* * If true then value is cached forever
|
||||
* * If false then value is never cached
|
||||
* * If a number then the number of seconds to cache value
|
||||
* * If 'response' then CM will attempt to use Cache-Control or Expires headers from the response to determine how long to cache the value
|
||||
* */
|
||||
ttl?: number | boolean | 'response'
|
||||
}
|
||||
|
||||
export type IncludesUrl = `url:${string}`;
|
||||
export type IncludesWiki = `wiki:${string}`;
|
||||
export type IncludesString = IncludesUrl | IncludesWiki;
|
||||
|
||||
export type IncludesType = string | IncludesData;
|
||||
|
||||
export const asIncludesData = (val: any): val is IncludesData => {
|
||||
return val !== null && typeof val === 'object' && 'path' in val;
|
||||
}
|
||||
@@ -30,3 +30,61 @@ export interface CachedFetchedActivitiesResult {
|
||||
export interface FetchedActivitiesResult extends CachedFetchedActivitiesResult {
|
||||
post: SnoowrapActivity[]
|
||||
}
|
||||
|
||||
export type ReportType = 'mod' | 'user';
|
||||
|
||||
export interface Report {
|
||||
reason: string
|
||||
type: ReportType
|
||||
author?: string
|
||||
snoozed: boolean
|
||||
canSnooze: boolean
|
||||
}
|
||||
|
||||
export type RawRedditUserReport = [
|
||||
string, // reason
|
||||
number, // number of reports with this reason
|
||||
boolean, // is report snoozed
|
||||
boolean // can the reports be snoozed
|
||||
];
|
||||
|
||||
export type RawRedditModReport = [string, string];
|
||||
|
||||
export const activityReports = (activity: SnoowrapActivity): Report[] => {
|
||||
const reports: Report[] = [];
|
||||
for(const r of (activity.user_reports as unknown as RawRedditUserReport[])) {
|
||||
const report = {
|
||||
reason: r[0],
|
||||
type: 'user' as ReportType,
|
||||
snoozed: r[2],
|
||||
canSnooze: r[3]
|
||||
};
|
||||
for(let i = 0; i < r[1]; i++) {
|
||||
reports.push(report);
|
||||
}
|
||||
}
|
||||
|
||||
for(const r of (activity.mod_reports as unknown as RawRedditModReport[])) {
|
||||
reports.push({
|
||||
reason: r[0],
|
||||
type: 'mod' as ReportType,
|
||||
author: r[1],
|
||||
snoozed: false,
|
||||
canSnooze: false
|
||||
})
|
||||
}
|
||||
return reports;
|
||||
}
|
||||
|
||||
export interface RawSubredditRemovalReasonData {
|
||||
data: {
|
||||
[key: string]: SubredditRemovalReason
|
||||
},
|
||||
order: [string]
|
||||
}
|
||||
|
||||
export interface SubredditRemovalReason {
|
||||
message: string
|
||||
id: string,
|
||||
title: string
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import {StructuredRunnableBase} from "./Runnable";
|
||||
import {RuleSetObjectJson} from "../../Rule/RuleSet";
|
||||
import {RuleSetConfigObject} from "../../Rule/RuleSet";
|
||||
import {RuleObjectJsonTypes} from "../types";
|
||||
import {IncludesData} from "./Includes";
|
||||
|
||||
export type RuleJson = RuleObjectJsonTypes | string;
|
||||
export type RuleObjectJson = Exclude<RuleJson, string>
|
||||
export type StructuredRuleObjectJson = Omit<RuleObjectJson, 'authorIs' | 'itemIs'> & StructuredRunnableBase
|
||||
export type StructuredRuleSetObjectJson = Omit<RuleSetObjectJson, 'rules'> & {
|
||||
rules: StructuredRuleObjectJson[]
|
||||
export type RuleConfigData = RuleObjectJsonTypes | string | IncludesData;
|
||||
export type RuleConfigHydratedData = Exclude<RuleConfigData, IncludesData>
|
||||
export type RuleConfigObject = Exclude<RuleConfigHydratedData, string>
|
||||
export type StructuredRuleConfigObject = Omit<RuleConfigObject, 'authorIs' | 'itemIs'> & StructuredRunnableBase
|
||||
export type StructuredRuleSetConfigObject = Omit<RuleSetConfigObject, 'rules'> & {
|
||||
rules: StructuredRuleConfigObject[]
|
||||
}
|
||||
|
||||
@@ -10,11 +10,16 @@ export interface RunnableBaseOptions extends Omit<RunnableBaseJson, 'itemIs' | '
|
||||
authorIs?: MinimalOrFullFilter<AuthorCriteria>
|
||||
}
|
||||
|
||||
export interface StructuredRunnableBase {
|
||||
export interface StructuredRunnableBase extends RunnableBaseJson {
|
||||
itemIs?: MinimalOrFullFilter<TypedActivityState>
|
||||
authorIs?: MinimalOrFullFilter<AuthorCriteria>
|
||||
}
|
||||
|
||||
export interface TypedStructuredRunnableBase<T> extends TypedRunnableBaseData<T> {
|
||||
itemIs?: MinimalOrFullFilter<T>
|
||||
authorIs?: MinimalOrFullFilter<AuthorCriteria>
|
||||
}
|
||||
|
||||
export interface RunnableBaseJson {
|
||||
/**
|
||||
* A list of criteria to test the state of the `Activity` against before running the check.
|
||||
@@ -31,3 +36,10 @@ export interface RunnableBaseJson {
|
||||
* */
|
||||
authorIs?: MinimalOrFullFilterJson<AuthorCriteria>
|
||||
}
|
||||
|
||||
export interface TypedRunnableBaseData<T extends TypedActivityState> extends RunnableBaseJson {
|
||||
/**
|
||||
* If present then these Author criteria are checked before running the Check. If criteria fails then the Check will fail.
|
||||
* */
|
||||
authorIs?: MinimalOrFullFilterJson<AuthorCriteria>
|
||||
}
|
||||
|
||||
493
src/Common/LangaugeProcessing.ts
Normal file
493
src/Common/LangaugeProcessing.ts
Normal file
@@ -0,0 +1,493 @@
|
||||
import {containerBootstrap} from '@nlpjs/core';
|
||||
import {Language, LanguageGuess, LanguageType} from '@nlpjs/language';
|
||||
import {Nlp} from '@nlpjs/nlp';
|
||||
import {SentimentIntensityAnalyzer} from 'vader-sentiment';
|
||||
import wink from 'wink-sentiment';
|
||||
import {SnoowrapActivity} from "./Infrastructure/Reddit";
|
||||
import {
|
||||
asGenericComparison, comparisonTextOp,
|
||||
GenericComparison,
|
||||
parseGenericValueComparison,
|
||||
RangedComparison
|
||||
} from "./Infrastructure/Comparisons";
|
||||
import {asSubmission, between, formatNumber} from "../util";
|
||||
import {CMError, MaybeSeriousErrorWithCause} from "../Utils/Errors";
|
||||
import InvalidRegexError from "../Utils/InvalidRegexError";
|
||||
import {StringOperator} from "./Infrastructure/Atomic";
|
||||
import {LangEs} from "@nlpjs/lang-es";
|
||||
import {LangDe} from "@nlpjs/lang-de";
|
||||
import {LangEn} from "@nlpjs/lang-en";
|
||||
import {LangFr} from "@nlpjs/lang-fr";
|
||||
|
||||
export type SentimentAnalysisType = 'vader' | 'afinn' | 'senticon' | 'pattern' | 'wink';
|
||||
|
||||
export const sentimentQuantifier = {
|
||||
'extremely negative': -0.6,
|
||||
'very negative': -0.3,
|
||||
'negative': -0.1,
|
||||
'neutral': 0,
|
||||
'positive': 0.1,
|
||||
'very positive': 0.3,
|
||||
'extremely positive': 0.6,
|
||||
}
|
||||
|
||||
export const sentimentQuantifierRanges = [
|
||||
{
|
||||
range: [Number.MIN_SAFE_INTEGER, -0.6],
|
||||
quant: 'extremely negative'
|
||||
},
|
||||
{
|
||||
range: [-0.6, -0.3],
|
||||
quant: 'very negative'
|
||||
},
|
||||
{
|
||||
range: [-0.3, -0.1],
|
||||
quant: 'negative'
|
||||
},
|
||||
{
|
||||
range: [-0.1, 0.1],
|
||||
quant: 'neutral'
|
||||
},
|
||||
{
|
||||
range: [0.1, 0.3],
|
||||
quant: 'positive'
|
||||
},
|
||||
{
|
||||
range: [0.3, 0.6],
|
||||
quant: 'very positive'
|
||||
},
|
||||
{
|
||||
range: [0.6, Number.MAX_SAFE_INTEGER],
|
||||
quant: 'extremely positive'
|
||||
}
|
||||
]
|
||||
|
||||
const scoreToSentimentText = (val: number) => {
|
||||
for (const segment of sentimentQuantifierRanges) {
|
||||
if (between(val, segment.range[0], segment.range[1], false, true)) {
|
||||
return segment.quant;
|
||||
}
|
||||
}
|
||||
throw new Error('should not hit this!');
|
||||
}
|
||||
|
||||
export interface SentimentResult {
|
||||
comparative: number
|
||||
type: SentimentAnalysisType
|
||||
sentiment: string
|
||||
weight: number
|
||||
tokens: number
|
||||
matchedTokens?: number,
|
||||
usableResult: true | string
|
||||
}
|
||||
|
||||
export interface StringSentiment {
|
||||
results: SentimentResult[]
|
||||
score: number
|
||||
scoreWeighted: number
|
||||
sentiment: string
|
||||
sentimentWeighted: string
|
||||
guessedLanguage: LanguageGuessResult
|
||||
usedLanguage: LanguageType
|
||||
usableScore: boolean
|
||||
reason?: string
|
||||
}
|
||||
|
||||
export interface ActivitySentiment extends StringSentiment {
|
||||
activity: SnoowrapActivity
|
||||
}
|
||||
|
||||
export interface StringSentimentTestResult extends StringSentiment {
|
||||
passes: boolean
|
||||
test: GenericComparison | RangedComparison
|
||||
}
|
||||
|
||||
export interface ActivitySentimentTestResult extends StringSentimentTestResult {
|
||||
activity: SnoowrapActivity
|
||||
}
|
||||
|
||||
export interface ActivitySentimentOptions {
|
||||
testOn?: ('title' | 'body')[]
|
||||
/**
|
||||
* Make the analyzer assume a language if it cannot determine one itself.
|
||||
*
|
||||
* This is very useful for the analyzer when it is parsing short pieces of content. For example, if you know your subreddit is majority english speakers this will make the analyzer return "neutral" sentiment instead of "not detected language".
|
||||
*
|
||||
* Defaults to 'en'
|
||||
*
|
||||
* @example ["en"]
|
||||
* @default en
|
||||
* */
|
||||
defaultLanguage?: string | null | false
|
||||
|
||||
/**
|
||||
* Helps the analyzer coerce a low confidence language guess into a known-used languages in two ways:
|
||||
*
|
||||
* If the analyzer's
|
||||
* * *best* guess is NOT one of these
|
||||
* * but it did guess one of these
|
||||
* * and its guess is above requiredLanguageConfidence score then use the hinted language instead of best guess
|
||||
* * OR text content is very short (4 words or less)
|
||||
* * and the best guess was below the requiredLanguageConfidence score
|
||||
* * and none of guesses was a hinted language then use the defaultLanguage
|
||||
*
|
||||
* Defaults to popular romance languages: ['en', 'es', 'de', 'fr']
|
||||
*
|
||||
* @example [["en", "es", "de", "fr"]]
|
||||
* @default ["en", "es", "de", "fr"]
|
||||
* */
|
||||
languageHints?: string[]
|
||||
|
||||
/**
|
||||
* Required confidence to use a guessed language as the best guess. Score from 0 to 1.
|
||||
*
|
||||
* Defaults to 0.9
|
||||
*
|
||||
* @example [0.9]
|
||||
* @default 0.9
|
||||
* */
|
||||
requiredLanguageConfidence?: number
|
||||
}
|
||||
|
||||
export type SentimentCriteriaTest = GenericComparison | RangedComparison;
|
||||
|
||||
export const availableSentimentLanguages = ['en', 'es', 'de', 'fr'];
|
||||
|
||||
export const textComparison = /(?<not>not)?\s*(?<modifier>very|extremely)?\s*(?<sentiment>positive|neutral|negative)/i;
|
||||
|
||||
export const parseTextToNumberComparison = (val: string): RangedComparison | GenericComparison => {
|
||||
|
||||
let genericError: Error | undefined;
|
||||
try {
|
||||
return parseGenericValueComparison(val);
|
||||
} catch (e) {
|
||||
genericError = e as Error;
|
||||
// now try text match
|
||||
}
|
||||
|
||||
const matches = val.match(textComparison);
|
||||
if (matches === null) {
|
||||
const textError = new InvalidRegexError(textComparison, val);
|
||||
throw new CMError(`Sentiment value did not match a valid numeric comparison or valid text: \n ${genericError.message} \n ${textError.message}`);
|
||||
}
|
||||
const groups = matches.groups as any;
|
||||
|
||||
const negate = groups.not !== undefined && groups.not !== '';
|
||||
|
||||
if (groups.sentiment === 'neutral') {
|
||||
if (negate) {
|
||||
return {
|
||||
displayText: 'not neutral (not -0.1 to 0.1)',
|
||||
range: [-0.1, 0.1],
|
||||
not: true,
|
||||
}
|
||||
}
|
||||
return {
|
||||
displayText: 'is neutral (-0.1 to 0.1)',
|
||||
range: [-0.1, 0.1],
|
||||
not: false
|
||||
}
|
||||
}
|
||||
|
||||
const compoundSentimentText = `${groups.modifier !== undefined && groups.modifier !== '' ? `${groups.modifier} ` : ''}${groups.sentiment}`.toLocaleLowerCase();
|
||||
// @ts-ignore
|
||||
const numericVal = sentimentQuantifier[compoundSentimentText] as number;
|
||||
if (numericVal === undefined) {
|
||||
throw new CMError(`Sentiment given did not match any known phrases: '${compoundSentimentText}'`);
|
||||
}
|
||||
|
||||
let operator: StringOperator;
|
||||
if (negate) {
|
||||
operator = numericVal > 0 ? '<' : '>';
|
||||
} else {
|
||||
operator = numericVal > 0 ? '>=' : '<=';
|
||||
}
|
||||
|
||||
return {
|
||||
operator,
|
||||
value: numericVal,
|
||||
isPercent: false,
|
||||
displayText: `is${negate ? ' not ' : ' '}${compoundSentimentText} (${operator} ${numericVal})`
|
||||
}
|
||||
}
|
||||
|
||||
let nlp: Nlp;
|
||||
let container: any;
|
||||
|
||||
const bootstrapNlp = async () => {
|
||||
|
||||
container = await containerBootstrap();
|
||||
container.use(Language);
|
||||
container.use(Nlp);
|
||||
container.use(LangEs);
|
||||
container.use(LangDe);
|
||||
container.use(LangEn);
|
||||
container.use(LangFr);
|
||||
nlp = container.get('nlp');
|
||||
nlp.settings.autoSave = false;
|
||||
nlp.addLanguage('en');
|
||||
nlp.addLanguage('es');
|
||||
nlp.addLanguage('de');
|
||||
nlp.addLanguage('fr');
|
||||
nlp.nluManager.guesser.processExtraSentences();
|
||||
await nlp.train();
|
||||
}
|
||||
|
||||
export const getNlp = async () => {
|
||||
if (nlp === undefined) {
|
||||
await bootstrapNlp();
|
||||
}
|
||||
|
||||
return nlp;
|
||||
}
|
||||
|
||||
export const getActivityContent = (item: SnoowrapActivity, options?: ActivitySentimentOptions): string => {
|
||||
const {
|
||||
testOn = ['body', 'title'],
|
||||
} = options || {};
|
||||
|
||||
// determine what content we are testing
|
||||
let contents: string[] = [];
|
||||
if (asSubmission(item)) {
|
||||
for (const l of testOn) {
|
||||
switch (l) {
|
||||
case 'title':
|
||||
contents.push(item.title);
|
||||
break;
|
||||
case 'body':
|
||||
if (item.is_self) {
|
||||
contents.push(item.selftext);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
contents.push(item.body)
|
||||
}
|
||||
|
||||
return contents.join(' ');
|
||||
}
|
||||
|
||||
export const getLanguageTypeFromValue = async (val: string): Promise<LanguageType> => {
|
||||
|
||||
if (nlp === undefined) {
|
||||
await bootstrapNlp();
|
||||
}
|
||||
|
||||
const langObj = container.get('Language') as Language;
|
||||
|
||||
const cleanVal = val.trim().toLocaleLowerCase();
|
||||
|
||||
const foundLang = Object.values(langObj.languagesAlpha2).find(x => x.alpha2 === cleanVal || x.alpha3 === cleanVal || x.name.toLocaleLowerCase() === cleanVal);
|
||||
if (foundLang === undefined) {
|
||||
throw new MaybeSeriousErrorWithCause(`Could not find Language with identifier '${val}'`, {isSerious: false});
|
||||
}
|
||||
const {alpha2, alpha3, name: language} = foundLang;
|
||||
return {
|
||||
alpha2,
|
||||
alpha3,
|
||||
language
|
||||
};
|
||||
}
|
||||
|
||||
export interface LanguageGuessResult {
|
||||
bestGuess: LanguageGuess
|
||||
guesses: LanguageGuess[]
|
||||
requiredConfidence: number
|
||||
sparse: boolean
|
||||
language: LanguageType
|
||||
usedDefault: boolean
|
||||
}
|
||||
|
||||
export const getContentLanguage = async (content: string, options?: ActivitySentimentOptions): Promise<LanguageGuessResult> => {
|
||||
|
||||
const {
|
||||
defaultLanguage = 'en',
|
||||
requiredLanguageConfidence = 0.9,
|
||||
languageHints = availableSentimentLanguages
|
||||
} = options || {};
|
||||
|
||||
if (nlp === undefined) {
|
||||
await bootstrapNlp();
|
||||
}
|
||||
|
||||
const spaceNormalizedTokens = content.trim().split(' ').filter(x => x !== ''.trim());
|
||||
|
||||
const lang = container.get('Language') as Language;
|
||||
// would like to improve this https://github.com/axa-group/nlp.js/issues/761
|
||||
const guesses = lang.guess(content, null, 4);
|
||||
let bestLang = guesses[0];
|
||||
const shortContent = spaceNormalizedTokens.length <= 4;
|
||||
|
||||
const altBest = languageHints.includes(bestLang.alpha2) ? undefined : guesses.find(x => x.score >= requiredLanguageConfidence && languageHints.includes(x.alpha2));
|
||||
|
||||
// coerce best guess into a supported language that has a good enough confidence
|
||||
if(!shortContent && altBest !== undefined) {
|
||||
bestLang = altBest;
|
||||
}
|
||||
|
||||
let usedLang: LanguageType = bestLang;
|
||||
let usedDefault = false;
|
||||
|
||||
if (typeof defaultLanguage === 'string' && (bestLang.score < requiredLanguageConfidence || (shortContent && !languageHints.includes(bestLang.alpha2)))) {
|
||||
usedLang = await getLanguageTypeFromValue(defaultLanguage);
|
||||
usedDefault = true;
|
||||
}
|
||||
|
||||
return {
|
||||
guesses,
|
||||
bestGuess: bestLang,
|
||||
requiredConfidence: requiredLanguageConfidence,
|
||||
sparse: shortContent,
|
||||
language: usedLang,
|
||||
usedDefault
|
||||
}
|
||||
}
|
||||
|
||||
export const getActivitySentiment = async (item: SnoowrapActivity, options?: ActivitySentimentOptions): Promise<ActivitySentiment> => {
|
||||
|
||||
const result = await getStringSentiment(getActivityContent(item, options), options);
|
||||
|
||||
return {
|
||||
...result,
|
||||
activity: item
|
||||
}
|
||||
}
|
||||
|
||||
export const getStringSentiment = async (contentStr: string, options?: ActivitySentimentOptions): Promise<StringSentiment> => {
|
||||
|
||||
const langResult = await getContentLanguage(contentStr, options);
|
||||
|
||||
let usedLanguage: LanguageType = langResult.language;
|
||||
|
||||
const spaceNormalizedTokens = contentStr.trim().split(' ').filter(x => x !== ''.trim());
|
||||
|
||||
const results: SentimentResult[] = [];
|
||||
|
||||
const nlpResult = await nlp.process(langResult.language.alpha2, contentStr);
|
||||
|
||||
results.push({
|
||||
comparative: nlpResult.sentiment.average,
|
||||
type: nlpResult.sentiment.type as SentimentAnalysisType,
|
||||
sentiment: scoreToSentimentText(nlpResult.sentiment.average),
|
||||
weight: 1,
|
||||
matchedTokens: nlpResult.sentiment.numHits,
|
||||
tokens: nlpResult.sentiment.numWords,
|
||||
usableResult: availableSentimentLanguages.includes(langResult.language.alpha2) ? true : (nlpResult.sentiment.numHits / nlpResult.sentiment.numWords) >= 0.5 ? true : `${langResult.sparse ? 'Content was too short to guess language' : 'Unsupported language'} and less than 50% of tokens matched`,
|
||||
});
|
||||
|
||||
// only run vader/wink if either
|
||||
//
|
||||
// * content was short which means we aren't confident on language guess
|
||||
// * OR language is english (guessed or explicitly set as language fallback by user due to low confidence)
|
||||
//
|
||||
if (langResult.sparse || langResult.language.alpha2 === 'en') {
|
||||
|
||||
// neg post neu are ratios of *recognized* tokens in the content
|
||||
// when neu is close to 1 its either extremely neutral or no tokens were recognized
|
||||
const vaderScore = SentimentIntensityAnalyzer.polarity_scores(contentStr);
|
||||
const vaderRes: SentimentResult = {
|
||||
comparative: vaderScore.compound,
|
||||
type: 'vader',
|
||||
sentiment: scoreToSentimentText(vaderScore.compound),
|
||||
// may want to weight higher in the future...
|
||||
weight: 1,
|
||||
tokens: spaceNormalizedTokens.length,
|
||||
usableResult: langResult.language.alpha2 === 'en' ? true : (vaderScore.neu < 0.5 ? true : `Unable to guess language and unable to determine if more than 50% of tokens are negative or not matched`)
|
||||
};
|
||||
results.push(vaderRes);
|
||||
|
||||
const winkScore = wink(contentStr);
|
||||
const matchedTokens = winkScore.tokenizedPhrase.filter(x => x.score !== undefined);
|
||||
const matchedMeaningfulTokens = winkScore.tokenizedPhrase.filter(x => x.tag === 'word' || x.tag === 'emoji');
|
||||
// normalizedScore is range of -5 to +5 -- convert to -1 to +1
|
||||
const winkAdjusted = (winkScore.normalizedScore * 2) / 10;
|
||||
const winkRes: SentimentResult = {
|
||||
comparative: winkAdjusted,
|
||||
type: 'wink',
|
||||
sentiment: scoreToSentimentText(winkAdjusted),
|
||||
weight: 1,
|
||||
matchedTokens: matchedTokens.length,
|
||||
tokens: winkScore.tokenizedPhrase.length,
|
||||
usableResult: langResult.language.alpha2 === 'en' ? true : ((matchedTokens.length / matchedMeaningfulTokens.length) > 0.5 ? true : 'Unable to guess language and less than 50% of tokens matched')
|
||||
};
|
||||
results.push(winkRes);
|
||||
|
||||
if ((vaderRes.usableResult == true || winkRes.usableResult === true) && usedLanguage.alpha2 !== 'en') {
|
||||
// since we are confident enough to use one of these then we are assuming language is mostly english
|
||||
usedLanguage = await getLanguageTypeFromValue('en');
|
||||
}
|
||||
}
|
||||
|
||||
const score = results.reduce((acc, curr) => acc + curr.comparative, 0) / results.length;
|
||||
const sentiment = scoreToSentimentText(score);
|
||||
|
||||
const weightSum = results.reduce((acc, curr) => acc + curr.weight, 0);
|
||||
const weightedScores = results.reduce((acc, curr) => acc + (curr.weight * curr.comparative), 0);
|
||||
const weightedScore = weightedScores / weightSum;
|
||||
const weightedSentiment = scoreToSentimentText(weightedScore);
|
||||
|
||||
const actSentResult: StringSentiment = {
|
||||
results,
|
||||
score,
|
||||
sentiment,
|
||||
scoreWeighted: weightedScore,
|
||||
sentimentWeighted: weightedSentiment,
|
||||
guessedLanguage: langResult,
|
||||
usedLanguage,
|
||||
usableScore: results.filter(x => x.usableResult === true).length > 0,
|
||||
}
|
||||
|
||||
if (!actSentResult.usableScore) {
|
||||
if (actSentResult.guessedLanguage.sparse) {
|
||||
actSentResult.reason = 'Content may be supported language but was too short to guess accurately and no algorithm matched enough tokens to be considered confident.';
|
||||
} else {
|
||||
actSentResult.reason = 'Unsupported language'
|
||||
}
|
||||
}
|
||||
|
||||
return actSentResult;
|
||||
}
|
||||
|
||||
export const testActivitySentiment = async (item: SnoowrapActivity, criteria: SentimentCriteriaTest, options?: ActivitySentimentOptions): Promise<ActivitySentimentTestResult> => {
|
||||
const sentimentResult = await getActivitySentiment(item, options);
|
||||
|
||||
const testResult = testSentiment(sentimentResult, criteria);
|
||||
|
||||
return {
|
||||
...testResult,
|
||||
activity: item
|
||||
}
|
||||
}
|
||||
|
||||
export const testSentiment = (sentimentResult: StringSentiment, criteria: SentimentCriteriaTest): StringSentimentTestResult => {
|
||||
|
||||
if (!sentimentResult.usableScore) {
|
||||
return {
|
||||
passes: false,
|
||||
test: criteria,
|
||||
...sentimentResult,
|
||||
}
|
||||
}
|
||||
|
||||
if (asGenericComparison(criteria)) {
|
||||
return {
|
||||
passes: comparisonTextOp(sentimentResult.scoreWeighted, criteria.operator, criteria.value),
|
||||
test: criteria,
|
||||
...sentimentResult,
|
||||
}
|
||||
} else {
|
||||
if (criteria.not) {
|
||||
return {
|
||||
passes: sentimentResult.scoreWeighted < criteria.range[0] || sentimentResult.scoreWeighted > criteria.range[1],
|
||||
test: criteria,
|
||||
...sentimentResult,
|
||||
}
|
||||
}
|
||||
return {
|
||||
passes: sentimentResult.scoreWeighted >= criteria.range[0] || sentimentResult.scoreWeighted <= criteria.range[1],
|
||||
test: criteria,
|
||||
...sentimentResult,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import {DatabaseMigrationOptions} from "./interfaces";
|
||||
import {copyFile} from "fs/promises";
|
||||
import {constants} from "fs";
|
||||
import {ErrorWithCause} from "pony-cause";
|
||||
import {CMError} from "../Utils/Errors";
|
||||
|
||||
export interface ExistingTable {
|
||||
table: Table
|
||||
@@ -118,9 +119,10 @@ export class MigrationService {
|
||||
try {
|
||||
await this.backupDatabase();
|
||||
continueBCBackedup = true;
|
||||
} catch (err) {
|
||||
// @ts-ignore
|
||||
this.dbLogger.error(err, {leaf: 'Backup'});
|
||||
} catch (err: any) {
|
||||
if(!(err instanceof CMError) || !err.logged) {
|
||||
this.dbLogger.error(err, {leaf: 'Backup'});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
this.dbLogger.info('Configuration DID NOT specify migrations may be executed if automated backup is successful. Will not try to create a backup.');
|
||||
@@ -154,25 +156,34 @@ YOU SHOULD BACKUP YOUR EXISTING DATABASE BEFORE CONTINUING WITH MIGRATIONS.`);
|
||||
|
||||
async backupDatabase() {
|
||||
try {
|
||||
if (this.database.options.type === 'sqljs' && this.database.options.location !== undefined) {
|
||||
let location: string | undefined;
|
||||
const canBackup = ['sqljs','better-sqlite3'].includes(this.database.options.type);
|
||||
if(canBackup) {
|
||||
if(this.database.options.type === 'sqljs') {
|
||||
location = this.database.options.location === ':memory:' ? undefined : this.database.options.location;
|
||||
} else {
|
||||
location = this.database.options.database === ':memory:' || (typeof this.database.options.database !== 'string') ? undefined : this.database.options.database;
|
||||
}
|
||||
}
|
||||
if (canBackup && location !== undefined) {
|
||||
try {
|
||||
const ts = Date.now();
|
||||
const backupLocation = `${this.database.options.location}.${ts}.bak`
|
||||
const backupLocation = `${location}.${ts}.bak`
|
||||
this.dbLogger.info(`Detected sqljs (sqlite) database. Will try to make a backup at ${backupLocation}`, {leaf: 'Backup'});
|
||||
await copyFile(this.database.options.location, backupLocation, constants.COPYFILE_EXCL);
|
||||
await copyFile(location, backupLocation, constants.COPYFILE_EXCL);
|
||||
this.dbLogger.info('Successfully created backup!', {leaf: 'Backup'});
|
||||
} catch (err: any) {
|
||||
throw new ErrorWithCause('Cannot make an automated backup of your configured database.', {cause: err});
|
||||
}
|
||||
} else {
|
||||
let msg = 'Cannot make an automated backup of your configured database.';
|
||||
if (this.database.options.type !== 'sqljs') {
|
||||
msg += ' Only SQlite (sqljs database type) is implemented for automated backups right now, sorry :( You will need to manually backup your database.';
|
||||
if (!canBackup) {
|
||||
msg += ' Only SQlite (sqljs or better-sqlite3 database type) is implemented for automated backups right now, sorry :( You will need to manually backup your database.';
|
||||
} else {
|
||||
// TODO don't throw for this??
|
||||
msg += ' Database location is not defined (probably in-memory).';
|
||||
}
|
||||
throw new Error(msg);
|
||||
throw new CMError(msg, {logged: true});
|
||||
}
|
||||
} catch (e: any) {
|
||||
this.dbLogger.error(e, {leaf: 'Backup'});
|
||||
|
||||
92
src/Common/Migrations/Database/MigrationUtil.ts
Normal file
92
src/Common/Migrations/Database/MigrationUtil.ts
Normal file
@@ -0,0 +1,92 @@
|
||||
import {QueryRunner, TableIndex} from "typeorm";
|
||||
|
||||
/**
|
||||
* Boilerplate for creating generic index
|
||||
* */
|
||||
export const index = (prefix: string, columns: string[], unique = true) => new TableIndex({
|
||||
name: `IDX_${unique ? 'UN_' : ''}${prefix}_${columns.join('-')}`,
|
||||
columnNames: columns,
|
||||
isUnique: unique,
|
||||
});
|
||||
/**
|
||||
* Create index on id column
|
||||
* */
|
||||
export const idIndex = (prefix: string, unique: boolean) => index(prefix, ['id'], unique);
|
||||
|
||||
/**
|
||||
* Boilerplate primary key column for random ID
|
||||
* */
|
||||
export const randomIdColumn = () => ({
|
||||
name: 'id',
|
||||
type: 'varchar',
|
||||
length: '20',
|
||||
isPrimary: true,
|
||||
isUnique: true,
|
||||
});
|
||||
|
||||
/**
|
||||
* Create a time data column based on database type
|
||||
* */
|
||||
export const timeAtColumn = (columnName: string, dbType: string, nullable?: boolean) => {
|
||||
const dbSpecifics = dbType === 'postgres' ? {
|
||||
type: 'timestamptz'
|
||||
} : {
|
||||
type: 'datetime',
|
||||
// required to get millisecond precision on mysql/mariadb
|
||||
// https://mariadb.com/kb/en/datetime/
|
||||
// https://dev.mysql.com/doc/refman/8.0/en/fractional-seconds.html
|
||||
length: '3',
|
||||
}
|
||||
return {
|
||||
name: columnName,
|
||||
isNullable: nullable ?? false,
|
||||
...dbSpecifics
|
||||
}
|
||||
}
|
||||
export const createdAtColumn = (type: string) => timeAtColumn('createdAt', type);
|
||||
const updatedAtColumn = (type: string) => timeAtColumn('updatedAt', type);
|
||||
const createdUpdatedAtColumns = (type: string) => [
|
||||
timeAtColumn('createdAt', type),
|
||||
timeAtColumn('updatedAt', type)
|
||||
];
|
||||
export const createdAtIndex = (prefix: string) => index(prefix, ['createdAt'], false);
|
||||
|
||||
const updatedAtIndex = (prefix: string) => index(prefix, ['updatedAt'], false);
|
||||
const createdUpdatedAtIndices = (prefix: string) => {
|
||||
return [
|
||||
createdAtIndex(prefix),
|
||||
updatedAtIndex(prefix)
|
||||
]
|
||||
}
|
||||
/**
|
||||
* Boilerplate for filter (itemIs, authorIs) FK column -- uses FK is filter ID
|
||||
* */
|
||||
const filterColumn = (name: string) => ({
|
||||
name,
|
||||
type: 'varchar',
|
||||
length: '20',
|
||||
isNullable: true
|
||||
});
|
||||
const authorIsColumn = () => filterColumn('authorIs');
|
||||
const itemIsColumn = () => filterColumn('itemIs');
|
||||
export const filterColumns = () => ([authorIsColumn(), itemIsColumn()]);
|
||||
const authorIsIndex = (prefix: string) => index(prefix, ['authorIs']);
|
||||
const itemIsIndex = (prefix: string) => index(prefix, ['itemIs']);
|
||||
export const filterIndices = (prefix: string) => {
|
||||
return [
|
||||
authorIsIndex(prefix),
|
||||
itemIsIndex(prefix)
|
||||
]
|
||||
}
|
||||
|
||||
export const tableHasData = async (runner: QueryRunner, name: string): Promise<boolean | null> => {
|
||||
const countRes = await runner.query(`select count(*) from ${name}`);
|
||||
let hasRows = null;
|
||||
if (Array.isArray(countRes) && countRes[0] !== null) {
|
||||
const {
|
||||
'count(*)': count
|
||||
} = countRes[0] || {};
|
||||
hasRows = count !== 0;
|
||||
}
|
||||
return hasRows;
|
||||
}
|
||||
@@ -1,86 +1,12 @@
|
||||
import {MigrationInterface, QueryRunner, Table, TableIndex, TableColumn, TableForeignKey} from "typeorm";
|
||||
|
||||
const randomIdColumn = () => ({
|
||||
name: 'id',
|
||||
type: 'varchar',
|
||||
length: '20',
|
||||
isPrimary: true,
|
||||
isUnique: true,
|
||||
});
|
||||
|
||||
const timeAtColumn = (columnName: string, dbType: string, nullable?: boolean) => {
|
||||
const dbSpecifics = dbType === 'postgres' ? {
|
||||
type: 'timestamptz'
|
||||
} : {
|
||||
type: 'datetime',
|
||||
// required to get millisecond precision on mysql/mariadb
|
||||
// https://mariadb.com/kb/en/datetime/
|
||||
// https://dev.mysql.com/doc/refman/8.0/en/fractional-seconds.html
|
||||
length: '3',
|
||||
}
|
||||
return {
|
||||
name: columnName,
|
||||
isNullable: nullable ?? false,
|
||||
...dbSpecifics
|
||||
}
|
||||
}
|
||||
|
||||
const createdAtColumn = (type: string) => timeAtColumn('createdAt', type);
|
||||
const updatedAtColumn = (type: string) => timeAtColumn('updatedAt', type);
|
||||
|
||||
const createdUpdatedAtColumns = (type: string) => [
|
||||
timeAtColumn('createdAt', type),
|
||||
timeAtColumn('updatedAt', type)
|
||||
];
|
||||
|
||||
|
||||
const createdAtIndex = (prefix: string) => new TableIndex({
|
||||
name: `IDX_${prefix}_createdAt`,
|
||||
columnNames: ['createdAt']
|
||||
});
|
||||
|
||||
const updatedAtIndex = (prefix: string) => new TableIndex({
|
||||
name: `IDX_${prefix}_updatedAt`,
|
||||
columnNames: ['updatedAt']
|
||||
})
|
||||
|
||||
const createdUpdatedAtIndices = (prefix: string) => {
|
||||
return [
|
||||
createdAtIndex(prefix),
|
||||
updatedAtIndex(prefix)
|
||||
]
|
||||
}
|
||||
|
||||
const filterColumn = (name: string) => ({
|
||||
name,
|
||||
type: 'varchar',
|
||||
length: '20',
|
||||
isNullable: true
|
||||
});
|
||||
|
||||
const authorIsColumn = () => filterColumn('authorIs');
|
||||
const itemIsColumn = () => filterColumn('itemIs');
|
||||
|
||||
const filterColumns = () => ([authorIsColumn(), itemIsColumn()]);
|
||||
|
||||
const authorIsIndex = (prefix: string) => new TableIndex({
|
||||
name: `IDX_${prefix}_authorIs`,
|
||||
columnNames: ['authorIs'],
|
||||
isUnique: true,
|
||||
});
|
||||
|
||||
const itemIsIndex = (prefix: string) => new TableIndex({
|
||||
name: `IDX_${prefix}_itemIs`,
|
||||
columnNames: ['itemIs'],
|
||||
isUnique: true
|
||||
});
|
||||
|
||||
const filterIndices = (prefix: string) => {
|
||||
return [
|
||||
authorIsIndex(prefix),
|
||||
itemIsIndex(prefix)
|
||||
]
|
||||
}
|
||||
import {MigrationInterface, QueryRunner, Table, TableIndex} from "typeorm";
|
||||
import {
|
||||
createdAtColumn,
|
||||
createdAtIndex,
|
||||
filterColumns,
|
||||
filterIndices,
|
||||
randomIdColumn,
|
||||
timeAtColumn
|
||||
} from "../MigrationUtil";
|
||||
|
||||
export class initApi1642180264563 implements MigrationInterface {
|
||||
name = 'initApi1642180264563'
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
import {MigrationInterface, QueryRunner, Table} from "typeorm"
|
||||
import {idIndex, index} from "../MigrationUtil";
|
||||
|
||||
export class indexes1653586738904 implements MigrationInterface {
|
||||
|
||||
public async up(queryRunner: QueryRunner): Promise<void> {
|
||||
|
||||
queryRunner.connection.logger.logSchemaBuild('Starting Index Add/Update Migration');
|
||||
queryRunner.connection.logger.logSchemaBuild('IF YOU HAVE A LARGE DATABASE THIS MAY TAKE SEVERAL MINUTES! DO NOT STOP CONTEXTMOD WHILE MIGRATION IS IN PROGRESS!');
|
||||
|
||||
// unique ids due to random id
|
||||
const uniqueIdTableNames = [
|
||||
'Manager',
|
||||
'CMEvent',
|
||||
'FilterResult',
|
||||
'FilterCriteriaResult',
|
||||
'RunnableResult',
|
||||
'RulePremise',
|
||||
'RuleResult',
|
||||
'RuleSetResult',
|
||||
'ActionPremise',
|
||||
'ActionResult',
|
||||
'CheckResult',
|
||||
'RunResult'
|
||||
];
|
||||
|
||||
for (const tableName of uniqueIdTableNames) {
|
||||
const cmTable = await queryRunner.getTable(tableName);
|
||||
await queryRunner.createIndex(cmTable as Table, idIndex(tableName, true));
|
||||
}
|
||||
|
||||
// additional indexes
|
||||
|
||||
const actSource = await queryRunner.getTable('ActivitySource');
|
||||
await queryRunner.createIndex(actSource as Table, idIndex('ActivitySource', false));
|
||||
|
||||
const event = await queryRunner.getTable('CMEvent');
|
||||
await queryRunner.createIndices(event as Table, [index('CMEvent', ['activity_id'], false)]);
|
||||
|
||||
// FilterCriteriaResult criteriaId filterResultId
|
||||
|
||||
const fcrTable = await queryRunner.getTable('FilterCriteriaResult');
|
||||
await queryRunner.createIndices(fcrTable as Table, [
|
||||
index('FilterCriteriaResult', ['criteriaId'], false),
|
||||
index('FilterCriteriaResult', ['filterResultId'], false)
|
||||
]);
|
||||
|
||||
|
||||
// FilterCriteria id
|
||||
|
||||
const fcTable = await queryRunner.getTable('FilterCriteria');
|
||||
await queryRunner.createIndices(fcTable as Table, [
|
||||
idIndex('FilterCriteriaResult', false),
|
||||
]);
|
||||
|
||||
// RunnableResult resultId runnableId
|
||||
|
||||
const rrTable = await queryRunner.getTable('RunnableResult');
|
||||
await queryRunner.createIndices(rrTable as Table, [
|
||||
index('RunnableResult', ['resultId'], false),
|
||||
index('RunnableResult', ['runnableId'], false)
|
||||
]);
|
||||
|
||||
// ActionResult checkResultId premiseId
|
||||
|
||||
const arTable = await queryRunner.getTable('ActionResult');
|
||||
await queryRunner.createIndices(arTable as Table, [
|
||||
index('ActionResult', ['checkResultId'], false),
|
||||
index('ActionResult', ['premiseId'], false)
|
||||
]);
|
||||
|
||||
// CheckResult runId
|
||||
|
||||
const crTable = await queryRunner.getTable('CheckResult');
|
||||
await queryRunner.createIndices(crTable as Table, [
|
||||
index('CheckResult', ['runId'], false),
|
||||
]);
|
||||
|
||||
// RunResult eventId
|
||||
|
||||
const runResTable = await queryRunner.getTable('RunResult');
|
||||
await queryRunner.createIndices(runResTable as Table, [
|
||||
index('RunResult', ['eventId'], false),
|
||||
]);
|
||||
}
|
||||
|
||||
public async down(queryRunner: QueryRunner): Promise<void> {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
import {MigrationInterface, QueryRunner, Table, TableIndex} from "typeorm"
|
||||
import {createdAtColumn, createdAtIndex, idIndex, index, randomIdColumn} from "../MigrationUtil";
|
||||
|
||||
export class reportTracking1657632517934 implements MigrationInterface {
|
||||
|
||||
public async up(queryRunner: QueryRunner): Promise<void> {
|
||||
|
||||
const dbType = queryRunner.connection.driver.options.type;
|
||||
|
||||
await queryRunner.createTable(
|
||||
new Table({
|
||||
name: 'ActivityReport',
|
||||
columns: [
|
||||
randomIdColumn(),
|
||||
{
|
||||
name: 'activityId',
|
||||
type: 'varchar',
|
||||
length: '20',
|
||||
isNullable: false
|
||||
},
|
||||
{
|
||||
name: 'reason',
|
||||
type: 'varchar',
|
||||
length: '500',
|
||||
isNullable: false
|
||||
},
|
||||
{
|
||||
name: 'type',
|
||||
type: 'varchar',
|
||||
length: '200',
|
||||
isNullable: false
|
||||
},
|
||||
{
|
||||
name: 'author',
|
||||
type: 'varchar',
|
||||
length: '100',
|
||||
isNullable: true
|
||||
},
|
||||
{
|
||||
name: 'granularity',
|
||||
type: 'int',
|
||||
isNullable: false
|
||||
},
|
||||
createdAtColumn(dbType),
|
||||
],
|
||||
indices: [
|
||||
idIndex('ActivityReport', true),
|
||||
index('ActivityReport', ['activityId'], false),
|
||||
index('ActivityReportReason', ['reason'], false),
|
||||
createdAtIndex('report'),
|
||||
]
|
||||
}),
|
||||
true,
|
||||
true,
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
public async down(queryRunner: QueryRunner): Promise<void> {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
import {MigrationInterface, QueryRunner, Table} from "typeorm"
|
||||
import {createdAtColumn, createdAtIndex, idIndex, index, randomIdColumn, timeAtColumn} from "../MigrationUtil";
|
||||
|
||||
export class Guests1658930394548 implements MigrationInterface {
|
||||
|
||||
public async up(queryRunner: QueryRunner): Promise<void> {
|
||||
const dbType = queryRunner.connection.driver.options.type;
|
||||
|
||||
await queryRunner.createTable(
|
||||
new Table({
|
||||
name: 'Guests',
|
||||
columns: [
|
||||
randomIdColumn(),
|
||||
{
|
||||
name: 'authorName',
|
||||
type: 'varchar',
|
||||
length: '200',
|
||||
isNullable: false,
|
||||
},
|
||||
{
|
||||
name: 'type',
|
||||
type: 'varchar',
|
||||
isNullable: false,
|
||||
length: '50'
|
||||
},
|
||||
{
|
||||
name: 'guestOfId',
|
||||
type: 'varchar',
|
||||
length: '20',
|
||||
isNullable: true
|
||||
},
|
||||
timeAtColumn('expiresAt', dbType, true),
|
||||
createdAtColumn(dbType),
|
||||
],
|
||||
indices: [
|
||||
idIndex('Guests', true),
|
||||
createdAtIndex('guests'),
|
||||
index('guest', ['expiresAt'], false)
|
||||
]
|
||||
}),
|
||||
true,
|
||||
true,
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
public async down(queryRunner: QueryRunner): Promise<void> {
|
||||
}
|
||||
|
||||
}
|
||||
145
src/Common/Migrations/Database/Server/1660228987769-invites.ts
Normal file
145
src/Common/Migrations/Database/Server/1660228987769-invites.ts
Normal file
@@ -0,0 +1,145 @@
|
||||
import {MigrationInterface, QueryRunner, Table, TableColumn} from "typeorm"
|
||||
import {createdAtColumn, createdAtIndex, idIndex, index, randomIdColumn, tableHasData, timeAtColumn} from "../MigrationUtil";
|
||||
|
||||
export class invites1660228987769 implements MigrationInterface {
|
||||
|
||||
public async up(queryRunner: QueryRunner): Promise<void> {
|
||||
const dbType = queryRunner.connection.driver.options.type;
|
||||
|
||||
await queryRunner.createTable(
|
||||
new Table({
|
||||
name: 'SubredditInvite',
|
||||
columns: [
|
||||
{
|
||||
name: 'id',
|
||||
type: 'varchar',
|
||||
length: '255',
|
||||
isPrimary: true,
|
||||
},
|
||||
{
|
||||
name: 'botId',
|
||||
type: 'varchar',
|
||||
length: '20',
|
||||
isNullable: false
|
||||
},
|
||||
{
|
||||
name: 'subreddit',
|
||||
type: 'varchar',
|
||||
length: '255',
|
||||
isNullable: false
|
||||
},
|
||||
{
|
||||
name: 'guests',
|
||||
type: 'text',
|
||||
isNullable: true
|
||||
},
|
||||
{
|
||||
name: 'initialConfig',
|
||||
type: 'text',
|
||||
isNullable: true
|
||||
},
|
||||
createdAtColumn(dbType),
|
||||
timeAtColumn('expiresAt', dbType, true)
|
||||
],
|
||||
}),
|
||||
true,
|
||||
true,
|
||||
true
|
||||
);
|
||||
|
||||
if (await queryRunner.hasTable('Invite')) {
|
||||
|
||||
await queryRunner.renameTable('Invite', 'BotInvite');
|
||||
const table = await queryRunner.getTable('BotInvite') as Table;
|
||||
|
||||
await queryRunner.addColumns(table, [
|
||||
new TableColumn({
|
||||
name: 'initialConfig',
|
||||
type: 'text',
|
||||
isNullable: true
|
||||
}),
|
||||
new TableColumn({
|
||||
name: 'guests',
|
||||
type: 'text',
|
||||
isNullable: true
|
||||
})
|
||||
]);
|
||||
|
||||
queryRunner.connection.logger.logSchemaBuild(`Table 'Invite' has been renamed 'BotInvite'. If there are existing rows on this table they will need to be recreated.`);
|
||||
|
||||
} else {
|
||||
|
||||
await queryRunner.createTable(
|
||||
new Table({
|
||||
name: 'BotInvite',
|
||||
columns: [
|
||||
{
|
||||
name: 'id',
|
||||
type: 'varchar',
|
||||
length: '255',
|
||||
isPrimary: true,
|
||||
},
|
||||
{
|
||||
name: 'clientId',
|
||||
type: 'varchar',
|
||||
length: '255',
|
||||
},
|
||||
{
|
||||
name: 'clientSecret',
|
||||
type: 'varchar',
|
||||
length: '255',
|
||||
},
|
||||
{
|
||||
name: 'redirectUri',
|
||||
type: 'text',
|
||||
},
|
||||
{
|
||||
name: 'creator',
|
||||
type: 'varchar',
|
||||
length: '255',
|
||||
},
|
||||
{
|
||||
name: 'permissions',
|
||||
type: 'text'
|
||||
},
|
||||
{
|
||||
name: 'instance',
|
||||
type: 'varchar',
|
||||
length: '255',
|
||||
isNullable: true
|
||||
},
|
||||
{
|
||||
name: 'overwrite',
|
||||
type: 'boolean',
|
||||
isNullable: true,
|
||||
},
|
||||
{
|
||||
name: 'subreddits',
|
||||
type: 'text',
|
||||
isNullable: true
|
||||
},
|
||||
{
|
||||
name: 'guests',
|
||||
type: 'text',
|
||||
isNullable: true
|
||||
},
|
||||
{
|
||||
name: 'initialConfig',
|
||||
type: 'text',
|
||||
isNullable: true
|
||||
},
|
||||
createdAtColumn(dbType),
|
||||
timeAtColumn('expiresAt', dbType, true)
|
||||
],
|
||||
}),
|
||||
true,
|
||||
true,
|
||||
true
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public async down(queryRunner: QueryRunner): Promise<void> {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
import {MigrationInterface, QueryRunner} from "typeorm"
|
||||
import {tableHasData} from "../MigrationUtil";
|
||||
|
||||
export class removeInvites1660588028346 implements MigrationInterface {
|
||||
|
||||
public async up(queryRunner: QueryRunner): Promise<void> {
|
||||
const dbType = queryRunner.connection.driver.options.type;
|
||||
|
||||
if (dbType === 'sqljs' && await queryRunner.hasTable('Invite')) {
|
||||
// const countRes = await queryRunner.query('select count(*) from Invite');
|
||||
// let hasNoRows = null;
|
||||
// if (Array.isArray(countRes) && countRes[0] !== null) {
|
||||
// const {
|
||||
// 'count(*)': count
|
||||
// } = countRes[0] || {};
|
||||
// hasNoRows = count === 0;
|
||||
// }
|
||||
|
||||
const hasRows = await tableHasData(queryRunner, 'Invite');
|
||||
|
||||
if (hasRows === false) {
|
||||
await queryRunner.dropTable('Invite');
|
||||
} else {
|
||||
let prefix = hasRows === null ? `Could not determine if SQL.js 'web' database had the table 'Invite' --` : `SQL.js 'web' database had the table 'Invite' and it is not empty --`
|
||||
queryRunner.connection.logger.logSchemaBuild(`${prefix} This table is being replaced by 'BotInvite' table in 'app' database. If you have existing invites you will need to recreate them.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public async down(queryRunner: QueryRunner): Promise<void> {
|
||||
}
|
||||
|
||||
}
|
||||
133
src/Common/Typings/support.d.ts
vendored
133
src/Common/Typings/support.d.ts
vendored
@@ -3,9 +3,11 @@ declare module 'snoowrap/dist/errors' {
|
||||
export interface InvalidUserError extends Error {
|
||||
|
||||
}
|
||||
|
||||
export interface NoCredentialsError extends Error {
|
||||
|
||||
}
|
||||
|
||||
export interface InvalidMethodCallError extends Error {
|
||||
|
||||
}
|
||||
@@ -26,9 +28,138 @@ declare module 'snoowrap/dist/errors' {
|
||||
}
|
||||
|
||||
declare module 'winston-null' {
|
||||
import TransportStream from "winston-transport";
|
||||
import TransportStream from "winston-transport";
|
||||
|
||||
export class NullTransport extends TransportStream {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
declare module '@nlpjs/*' {
|
||||
|
||||
declare interface SentimentResult {
|
||||
score: number,
|
||||
average: number,
|
||||
numWords: number,
|
||||
numHits: number,
|
||||
type: string,
|
||||
language: string
|
||||
}
|
||||
|
||||
declare interface NLPSentimentResult extends Omit<SentimentResult, 'language'> {
|
||||
vote: string
|
||||
locale: string
|
||||
}
|
||||
|
||||
|
||||
declare module '@nlpjs/language' {
|
||||
|
||||
export interface LanguageType {
|
||||
alpha3: string,
|
||||
alpha2: string,
|
||||
language: string,
|
||||
}
|
||||
|
||||
export interface LanguageObj {
|
||||
alpha3: string,
|
||||
alpha2: string,
|
||||
name: string,
|
||||
}
|
||||
|
||||
export interface LanguageGuess extends LanguageType {
|
||||
score: number
|
||||
}
|
||||
|
||||
export class Language {
|
||||
guess(val: string, allowedList?: string[] | null, limit?: number): LanguageGuess[];
|
||||
|
||||
guessBest(val: string, allowedList?: string[] | null): LanguageGuess;
|
||||
|
||||
/**
|
||||
* Key is alpha2 lang IE en es de fr
|
||||
* */
|
||||
languagesAlpha2: Record<string, LanguageObj>;
|
||||
/**
|
||||
* Key is alpha3 lang IE eng spa deu fra
|
||||
* */
|
||||
languagesAlpha3: Record<string, LanguageObj>;
|
||||
}
|
||||
}
|
||||
|
||||
declare module '@nlpjs/sentiment' {
|
||||
|
||||
declare interface SentimentPipelineResult {
|
||||
utterance: string
|
||||
locale: string
|
||||
settings: { tag: string }
|
||||
tokens: string[]
|
||||
sentiment: SentimentResult
|
||||
}
|
||||
|
||||
declare interface SentimentPipelineInput {
|
||||
utterance: string
|
||||
locale: string
|
||||
|
||||
[key: string]: any
|
||||
}
|
||||
|
||||
export class SentimentAnalyzer {
|
||||
constructor(settings?: { language?: string }, container?: any)
|
||||
|
||||
container: any
|
||||
|
||||
process(srcInput: SentimentPipelineInput, settings?: object): Promise<SentimentPipelineResult>
|
||||
}
|
||||
}
|
||||
|
||||
declare module '@nlpjs/nlp' {
|
||||
|
||||
declare interface NlpResult {
|
||||
locale: string
|
||||
language: string
|
||||
languageGuessed: boolean
|
||||
sentiment: NLPSentimentResult
|
||||
}
|
||||
|
||||
export class Nlp {
|
||||
settings: any;
|
||||
nluManager: any;
|
||||
|
||||
constructor(settings?: { language?: string }, container?: any)
|
||||
|
||||
// locale language languageGuessed sentiment
|
||||
process(locale: string, utterance?: string, srcContext?: object, settings?: object): Promise<NlpResult>
|
||||
addLanguage(locale: string)
|
||||
train(): Promise<any>;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
declare module '@nlpjs/lang-es' {
|
||||
export const LangEs: any
|
||||
}
|
||||
declare module '@nlpjs/lang-en' {
|
||||
export const LangEn: any
|
||||
}
|
||||
declare module '@nlpjs/lang-de' {
|
||||
export const LangDe: any
|
||||
}
|
||||
declare module '@nlpjs/lang-fr' {
|
||||
export const LangFr: any
|
||||
}
|
||||
declare module '@nlpjs/nlu' {
|
||||
export const Nlu: any
|
||||
}
|
||||
|
||||
declare module '@nlpjs/core' {
|
||||
export const Container: any
|
||||
export const containerBootstrap: any
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
declare module 'wink-sentiment' {
|
||||
function sentiment(phrase: string): { score: number, normalizedScore: number, tokenizedPhrase: any[] };
|
||||
|
||||
export default sentiment;
|
||||
}
|
||||
|
||||
50
src/Common/Typings/vader-sentiment.d.ts
vendored
Normal file
50
src/Common/Typings/vader-sentiment.d.ts
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
declare module 'vader-sentiment' {
|
||||
export const REGEX_REMOVE_PUNCTUATION: RegExp;
|
||||
export const B_INCR: number;
|
||||
export const B_DECR: number;
|
||||
export const C_INCR: number;
|
||||
export const N_SCALER: number;
|
||||
export const PUNC_LIST: string[];
|
||||
export const NEGATE: string[];
|
||||
export const BOOSTER_DICT: Record<string, number>;
|
||||
export const SPECIAL_CASE_IDIOMS: Record<string, number>;
|
||||
|
||||
export interface Scores {
|
||||
neg: number
|
||||
neu: number
|
||||
pos: number
|
||||
compound: number
|
||||
}
|
||||
|
||||
export function negated(input_words: string[], include_nt: boolean = true): boolean;
|
||||
export function normalize(score: number, alpha: number): number;
|
||||
export function allcap_differential(words: string[]): boolean;
|
||||
export function scalar_inc_dec(word: string, valence: number, is_cap_diff: boolean): number
|
||||
export function is_upper_function(word: string): boolean
|
||||
|
||||
export class SentiText {
|
||||
public text: string;
|
||||
public words_and_emoticons: string[];
|
||||
public is_cap_diff: boolean;
|
||||
|
||||
constructor(text: string);
|
||||
|
||||
get_words_plus_punc(): Record<string, string>;
|
||||
get_words_and_emoticons(): string[];
|
||||
}
|
||||
|
||||
export class SentimentIntensityAnalyzer {
|
||||
|
||||
static polarity_scores(text: string): Scores;
|
||||
static sentiment_valence(valence: number, sentiText: SentiText, item: string, index: number, sentiments: number[]);
|
||||
static least_check(valence: number, words_and_emoticons: string[], index: number): number;
|
||||
static but_check(words_and_emoticons: string[], sentiments: number[]): number[]
|
||||
static idioms_check(valence: number, words_and_emoticons: string[], index: number): number;
|
||||
static never_check(valence: number, words_and_emoticons: string[], start_i: number, index: number): number
|
||||
static punctuation_emphasis(sum_s: any, text: string);
|
||||
static amplify_ep(text: string): number;
|
||||
static amplify_qm(text: string): number;
|
||||
static sift_sentiment_scores(sentiments: number[]): number[];
|
||||
static score_valence(sentiments: number[], text: string): Scores;
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
import { ISession } from "connect-typeorm";
|
||||
import { Column, Entity, Index, PrimaryColumn } from "typeorm";
|
||||
import { Column, Entity, Index, PrimaryColumn, DeleteDateColumn } from "typeorm";
|
||||
@Entity()
|
||||
export class ClientSession implements ISession {
|
||||
@Index()
|
||||
@@ -12,6 +12,6 @@ export class ClientSession implements ISession {
|
||||
@Column("text")
|
||||
public json = "";
|
||||
|
||||
@Column({ name: 'destroyedAt', nullable: true })
|
||||
@DeleteDateColumn({ name: 'destroyedAt', nullable: true })
|
||||
destroyedAt?: Date;
|
||||
}
|
||||
|
||||
@@ -107,8 +107,7 @@ var bmvbhash_even = function(data: BlockImageData, bits: number) {
|
||||
return bits_to_hexhash(result);
|
||||
};
|
||||
|
||||
var bmvbhash = function(data: BlockImageData, bits: number) {
|
||||
var result = [];
|
||||
var bmvbhash = function(data: BlockImageData, bits: number, calculateFlipped: boolean = false): string | [string, string] {
|
||||
|
||||
var i, j, x, y;
|
||||
var block_width, block_height;
|
||||
@@ -198,30 +197,51 @@ var bmvbhash = function(data: BlockImageData, bits: number) {
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < bits; i++) {
|
||||
for (j = 0; j < bits; j++) {
|
||||
result.push(blocks[i][j]);
|
||||
const blocksFlipped: number[][] | undefined = calculateFlipped ? [] : undefined;
|
||||
if(blocksFlipped !== undefined) {
|
||||
for(const row of blocks) {
|
||||
const flippedRow = [...row];
|
||||
flippedRow.reverse();
|
||||
blocksFlipped.push(flippedRow);
|
||||
}
|
||||
}
|
||||
|
||||
translate_blocks_to_bits(result, block_width * block_height);
|
||||
return bits_to_hexhash(result);
|
||||
if(blocksFlipped !== undefined) {
|
||||
const result = [];
|
||||
const resultFlip = [];
|
||||
for (i = 0; i < bits; i++) {
|
||||
for (j = 0; j < bits; j++) {
|
||||
result.push(blocks[i][j]);
|
||||
resultFlip.push(blocksFlipped[i][j])
|
||||
}
|
||||
}
|
||||
|
||||
translate_blocks_to_bits(result, block_width * block_height);
|
||||
translate_blocks_to_bits(resultFlip, block_width * block_height);
|
||||
return [bits_to_hexhash(result), bits_to_hexhash(resultFlip)];
|
||||
} else {
|
||||
const result = [];
|
||||
for (i = 0; i < bits; i++) {
|
||||
for (j = 0; j < bits; j++) {
|
||||
result.push(blocks[i][j]);
|
||||
}
|
||||
}
|
||||
|
||||
translate_blocks_to_bits(result, block_width * block_height);
|
||||
return bits_to_hexhash(result);
|
||||
}
|
||||
};
|
||||
|
||||
var blockhashData = function(imgData: BlockImageData, bits: number, method: number) {
|
||||
var hash;
|
||||
var blockhashData = function(imgData: BlockImageData, bits: number, method: number, calculateFlipped: boolean): string | [string, string] {
|
||||
|
||||
if (method === 1) {
|
||||
hash = bmvbhash_even(imgData, bits);
|
||||
return bmvbhash_even(imgData, bits);
|
||||
}
|
||||
else if (method === 2) {
|
||||
hash = bmvbhash(imgData, bits);
|
||||
}
|
||||
else {
|
||||
throw new Error("Bad hashing method");
|
||||
return bmvbhash(imgData, bits, calculateFlipped);
|
||||
}
|
||||
|
||||
return hash;
|
||||
throw new Error("Bad hashing method");
|
||||
};
|
||||
|
||||
export const blockhash = async function(src: Sharp, bits: number, method: number = 2): Promise<string> {
|
||||
@@ -230,5 +250,14 @@ export const blockhash = async function(src: Sharp, bits: number, method: number
|
||||
width: info.width,
|
||||
height: info.height,
|
||||
data: buff,
|
||||
}, bits, method);
|
||||
}, bits, method, false) as string;
|
||||
};
|
||||
|
||||
export const blockhashAndFlipped = async function(src: Sharp, bits: number, method: number = 2): Promise<[string, string]> {
|
||||
const {data: buff, info} = await src.ensureAlpha().raw().toBuffer({resolveWithObject: true});
|
||||
return blockhashData({
|
||||
width: info.width,
|
||||
height: info.height,
|
||||
data: buff,
|
||||
}, bits, method, true) as [string, string];
|
||||
};
|
||||
|
||||
@@ -3,7 +3,17 @@ import path from "path";
|
||||
import {FilterCriteriaDefaults} from "./Infrastructure/Filters/FilterShapes";
|
||||
|
||||
export const cacheOptDefaults = {ttl: 60, max: 500, checkPeriod: 600};
|
||||
export const cacheTTLDefaults = {authorTTL: 60, userNotesTTL: 300, wikiTTL: 300, submissionTTL: 60, commentTTL: 60, filterCriteriaTTL: 60, subredditTTL: 600, selfTTL: 60};
|
||||
export const cacheTTLDefaults = {
|
||||
authorTTL: 60,
|
||||
userNotesTTL: 300,
|
||||
modNotesTTL: 60,
|
||||
wikiTTL: 300,
|
||||
submissionTTL: 60,
|
||||
commentTTL: 60,
|
||||
filterCriteriaTTL: 60,
|
||||
subredditTTL: 600,
|
||||
selfTTL: 60
|
||||
};
|
||||
|
||||
export const createHistoricalDisplayDefaults = (): HistoricalStatsDisplay => ({
|
||||
checksRunTotal: 0,
|
||||
@@ -32,4 +42,4 @@ export const filterCriteriaDefault: FilterCriteriaDefaults = {
|
||||
export const defaultDataDir = path.resolve(__dirname, '../..');
|
||||
export const defaultConfigFilenames = ['config.json', 'config.yaml'];
|
||||
|
||||
export const VERSION = '0.10.12';
|
||||
export const VERSION = '0.11.4';
|
||||
|
||||
@@ -6,7 +6,6 @@ import Comment from "snoowrap/dist/objects/Comment";
|
||||
import RedditUser from "snoowrap/dist/objects/RedditUser";
|
||||
import {DataSource} from "typeorm";
|
||||
import {JsonOperatorConfigDocument, YamlOperatorConfigDocument} from "./Config/Operator";
|
||||
import {CommentCheckJson, SubmissionCheckJson} from "../Check";
|
||||
import {SafeDictionary} from "ts-essentials";
|
||||
import {RuleResultEntity} from "./Entities/RuleResultEntity";
|
||||
import {Dayjs} from "dayjs";
|
||||
@@ -43,8 +42,16 @@ import {
|
||||
ItemOptions
|
||||
} from "./Infrastructure/Filters/FilterShapes";
|
||||
import {LoggingOptions, LogLevel, StrongLoggingOptions} from "./Infrastructure/Logging";
|
||||
import {DatabaseConfig, DatabaseDriver, DatabaseDriverConfig, DatabaseDriverType} from "./Infrastructure/Database";
|
||||
import {
|
||||
DatabaseConfig,
|
||||
DatabaseDriver,
|
||||
DatabaseDriverConfig,
|
||||
DatabaseDriverType
|
||||
} from "./Infrastructure/Database";
|
||||
import {ActivityType} from "./Infrastructure/Reddit";
|
||||
import {InfluxDB, WriteApi} from "@influxdata/influxdb-client";
|
||||
import {InfluxConfig} from "./Influx/interfaces";
|
||||
import {InfluxClient} from "./Influx/InfluxClient";
|
||||
|
||||
|
||||
export interface ReferenceSubmission {
|
||||
@@ -461,6 +468,17 @@ export interface TTLConfig {
|
||||
* @default 50
|
||||
* */
|
||||
selfTTL?: number | boolean
|
||||
|
||||
/**
|
||||
* Amount of time, in seconds, Mod Notes should be cached
|
||||
*
|
||||
* * If `0` or `true` will cache indefinitely (not recommended)
|
||||
* * If `false` will not cache
|
||||
*
|
||||
* @examples [60]
|
||||
* @default 60
|
||||
* */
|
||||
modNotesTTL?: number | boolean;
|
||||
}
|
||||
|
||||
export interface CacheConfig extends TTLConfig {
|
||||
@@ -714,19 +732,14 @@ export interface SearchAndReplaceRegExp {
|
||||
}
|
||||
|
||||
export interface NamedGroup {
|
||||
[name: string]: string
|
||||
}
|
||||
|
||||
export interface GlobalRegExResult {
|
||||
match: string,
|
||||
groups: string[],
|
||||
named: NamedGroup | undefined
|
||||
[name: string]: any
|
||||
}
|
||||
|
||||
export interface RegExResult {
|
||||
matched: boolean,
|
||||
matches: string[],
|
||||
global: GlobalRegExResult[]
|
||||
match: string,
|
||||
groups: string[],
|
||||
index: number
|
||||
named: NamedGroup
|
||||
}
|
||||
|
||||
export type StrongCache = {
|
||||
@@ -737,6 +750,7 @@ export type StrongCache = {
|
||||
commentTTL: number | boolean,
|
||||
subredditTTL: number | boolean,
|
||||
selfTTL: number | boolean,
|
||||
modNotesTTL: number | boolean,
|
||||
filterCriteriaTTL: number | boolean,
|
||||
provider: CacheOptions
|
||||
actionedEventsMax?: number,
|
||||
@@ -1107,6 +1121,8 @@ export interface BotInstanceJsonConfig {
|
||||
retention?: EventRetentionPolicyRange
|
||||
}
|
||||
|
||||
influxConfig?: InfluxConfig
|
||||
|
||||
/**
|
||||
* Settings related to bot behavior for subreddits it is managing
|
||||
* */
|
||||
@@ -1362,6 +1378,8 @@ export interface OperatorJsonConfig {
|
||||
retention?: EventRetentionPolicyRange
|
||||
}
|
||||
|
||||
influxConfig?: InfluxConfig
|
||||
|
||||
/**
|
||||
* Set global snoowrap options as well as default snoowrap config for all bots that don't specify their own
|
||||
* */
|
||||
@@ -1461,20 +1479,6 @@ export interface OperatorJsonConfig {
|
||||
storage?: 'database' | 'cache'
|
||||
}
|
||||
|
||||
/**
|
||||
* Settings related to oauth flow invites
|
||||
* */
|
||||
invites?: {
|
||||
/**
|
||||
* Number of seconds an invite should be valid for
|
||||
*
|
||||
* If `0` or not specified (default) invites do not expire
|
||||
*
|
||||
* @default 0
|
||||
* @examples [0]
|
||||
* */
|
||||
maxAge?: number
|
||||
}
|
||||
/**
|
||||
* The default log level to filter to in the web interface
|
||||
*
|
||||
@@ -1530,11 +1534,30 @@ export interface OperatorJsonConfig {
|
||||
secret?: string,
|
||||
/**
|
||||
* A friendly name for this server. This will override `friendly` in `BotConnection` if specified.
|
||||
*
|
||||
* If none is set one is randomly generated.
|
||||
* */
|
||||
friendly?: string,
|
||||
}
|
||||
|
||||
credentials?: ThirdPartyCredentialsJsonConfig
|
||||
|
||||
dev?: {
|
||||
/**
|
||||
* Invoke `process.memoryUsage()` on an interval and send metrics to Influx
|
||||
*
|
||||
* Only works if Influx config is provided
|
||||
* */
|
||||
monitorMemory?: boolean
|
||||
/**
|
||||
* Interval, in seconds, to invoke `process.memoryUsage()` at
|
||||
*
|
||||
* Defaults to 15 seconds
|
||||
*
|
||||
* @default 15
|
||||
* */
|
||||
monitorMemoryInterval?: number
|
||||
};
|
||||
}
|
||||
|
||||
export interface RequiredOperatorRedditCredentials extends RedditCredentials {
|
||||
@@ -1568,6 +1591,7 @@ export interface BotInstanceConfig extends BotInstanceJsonConfig {
|
||||
database: DataSource
|
||||
snoowrap: SnoowrapOptions
|
||||
databaseStatisticsDefaults: DatabaseStatisticsOperatorConfig
|
||||
opInflux?: InfluxClient,
|
||||
subreddits: {
|
||||
names?: string[],
|
||||
exclude?: string[],
|
||||
@@ -1608,6 +1632,7 @@ export interface OperatorConfig extends OperatorJsonConfig {
|
||||
retention?: EventRetentionPolicyRange
|
||||
}
|
||||
database: DataSource
|
||||
influx?: InfluxClient,
|
||||
web: {
|
||||
database: DataSource,
|
||||
databaseConfig: {
|
||||
@@ -1622,9 +1647,6 @@ export interface OperatorConfig extends OperatorJsonConfig {
|
||||
secret?: string,
|
||||
storage?: 'database' | 'cache'
|
||||
},
|
||||
invites: {
|
||||
maxAge: number
|
||||
},
|
||||
logLevel?: LogLevel,
|
||||
maxLogs: number,
|
||||
clients: BotConnection[]
|
||||
@@ -1639,6 +1661,10 @@ export interface OperatorConfig extends OperatorJsonConfig {
|
||||
databaseStatisticsDefaults: DatabaseStatisticsOperatorConfig
|
||||
bots: BotInstanceConfig[]
|
||||
credentials: ThirdPartyCredentialsJsonConfig
|
||||
dev: {
|
||||
monitorMemory: boolean
|
||||
monitorMemoryInterval: number
|
||||
}
|
||||
}
|
||||
|
||||
export interface OperatorFileConfig {
|
||||
@@ -1676,6 +1702,7 @@ export interface LogInfo {
|
||||
labels?: string[]
|
||||
bot?: string
|
||||
user?: string
|
||||
transport?: string[]
|
||||
}
|
||||
|
||||
export interface ActionResult extends ActionProcessResult {
|
||||
@@ -1888,8 +1915,6 @@ export interface TextMatchOptions {
|
||||
caseSensitive?: boolean
|
||||
}
|
||||
|
||||
export type ActivityCheckJson = SubmissionCheckJson | CommentCheckJson;
|
||||
|
||||
export interface PostBehaviorOptionConfig {
|
||||
recordTo?: RecordOutputOption
|
||||
behavior?: PostBehaviorType
|
||||
@@ -1942,7 +1967,6 @@ export interface ActivityDispatch extends Omit<ActivityDispatchConfig, 'delay'|
|
||||
author: string
|
||||
delay: Duration
|
||||
tardyTolerant?: boolean | Duration
|
||||
processing: boolean
|
||||
action?: string
|
||||
type: ActivitySourceTypes
|
||||
dryRun?: boolean
|
||||
|
||||
@@ -18,7 +18,10 @@ import {RepostRuleJSONConfig} from "../Rule/RepostRule";
|
||||
import {DispatchActionJson} from "../Action/DispatchAction";
|
||||
import {CancelDispatchActionJson} from "../Action/CancelDispatchAction";
|
||||
import {ContributorActionJson} from "../Action/ContributorAction";
|
||||
import {SentimentRuleJSONConfig} from "../Rule/SentimentRule";
|
||||
import {ModNoteActionJson} from "../Action/ModNoteAction";
|
||||
import {IncludesData} from "./Infrastructure/Includes";
|
||||
|
||||
export type RuleObjectJsonTypes = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | RepostRuleJSONConfig
|
||||
export type RuleObjectJsonTypes = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | RepostRuleJSONConfig | SentimentRuleJSONConfig
|
||||
|
||||
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | UserFlairActionJson | DispatchActionJson | CancelDispatchActionJson | ContributorActionJson | string;
|
||||
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | UserFlairActionJson | DispatchActionJson | CancelDispatchActionJson | ContributorActionJson | ModNoteActionJson | string | IncludesData;
|
||||
|
||||
@@ -1,24 +1,30 @@
|
||||
import winston, {Logger} from "winston";
|
||||
import {
|
||||
asNamedCriteria,
|
||||
buildCacheOptionsFromProvider, buildCachePrefix, buildFilter, castToBool,
|
||||
createAjvFactory, fileOrDirectoryIsWriteable,
|
||||
asNamedCriteria, asWikiContext,
|
||||
buildCachePrefix, buildFilter, castToBool,
|
||||
createAjvFactory, fileOrDirectoryIsWriteable, generateRandomName,
|
||||
mergeArr, mergeFilters,
|
||||
normalizeName,
|
||||
overwriteMerge,
|
||||
parseBool, randomId,
|
||||
readConfigFile, removeFromSourceIfKeysExistsInDestination,
|
||||
parseBool, parseExternalUrl, parseUrlContext, parseWikiContext, randomId,
|
||||
readConfigFile,
|
||||
removeUndefinedKeys, resolvePathFromEnvWithRelative
|
||||
} from "./util";
|
||||
import {CommentCheck} from "./Check/CommentCheck";
|
||||
import {SubmissionCheck} from "./Check/SubmissionCheck";
|
||||
|
||||
import Ajv, {Schema} from 'ajv';
|
||||
import * as appSchema from './Schema/App.json';
|
||||
import * as runSchema from './Schema/Run.json';
|
||||
import * as checkSchema from './Schema/Check.json';
|
||||
import * as operatorSchema from './Schema/OperatorConfig.json';
|
||||
import {JSONConfig} from "./JsonConfig";
|
||||
//import * as rulesetSchema from './Schema/RuleSet.json';
|
||||
import {SubredditConfigHydratedData, SubredditConfigData} from "./SubredditConfigData";
|
||||
import LoggedError from "./Utils/LoggedError";
|
||||
import {CheckStructuredJson} from "./Check";
|
||||
import {
|
||||
ActivityCheckConfigData,
|
||||
ActivityCheckConfigHydratedData,
|
||||
CheckConfigHydratedData,
|
||||
CheckConfigObject
|
||||
} from "./Check";
|
||||
import {
|
||||
DEFAULT_POLLING_INTERVAL,
|
||||
DEFAULT_POLLING_LIMIT,
|
||||
@@ -37,15 +43,11 @@ import {
|
||||
OperatorFileConfig,
|
||||
PostBehavior
|
||||
} from "./Common/interfaces";
|
||||
import {isRuleSetJSON, RuleSetJson, RuleSetObjectJson} from "./Rule/RuleSet";
|
||||
import {isRuleSetJSON, RuleSetConfigData, RuleSetConfigHydratedData, RuleSetConfigObject} from "./Rule/RuleSet";
|
||||
import deepEqual from "fast-deep-equal";
|
||||
import {
|
||||
ActionJson
|
||||
} from "./Common/types";
|
||||
import {isActionJson} from "./Action";
|
||||
import {getLogger, resolveLogDir} from "./Utils/loggerFactory";
|
||||
import {getLogger} from "./Utils/loggerFactory";
|
||||
import {GetEnvVars} from 'env-cmd';
|
||||
import {operatorConfig} from "./Utils/CommandConfig";
|
||||
import merge from 'deepmerge';
|
||||
import * as process from "process";
|
||||
import {
|
||||
@@ -59,57 +61,54 @@ import objectHash from "object-hash";
|
||||
import {
|
||||
createAppDatabaseConnection,
|
||||
createDatabaseConfig,
|
||||
createDatabaseConnection,
|
||||
createWebDatabaseConnection
|
||||
} from "./Utils/databaseUtils";
|
||||
import path from 'path';
|
||||
import {
|
||||
JsonOperatorConfigDocument,
|
||||
OperatorConfigDocumentInterface,
|
||||
YamlOperatorConfigDocument
|
||||
} from "./Common/Config/Operator";
|
||||
import {
|
||||
ConfigDocumentInterface
|
||||
} from "./Common/Config/AbstractConfigDocument";
|
||||
import {Document as YamlDocument} from "yaml";
|
||||
import {SimpleError} from "./Utils/Errors";
|
||||
import {CMError, SimpleError} from "./Utils/Errors";
|
||||
import {ErrorWithCause} from "pony-cause";
|
||||
import {RunJson, RunStructuredJson} from "./Run";
|
||||
import {RunConfigHydratedData, RunConfigData, RunConfigObject} from "./Run";
|
||||
import {AuthorRuleConfig} from "./Rule/AuthorRule";
|
||||
import {
|
||||
CacheProvider, ConfigFormat,
|
||||
CacheProvider, ConfigFormat, ConfigFragmentValidationFunc,
|
||||
PollOn
|
||||
} from "./Common/Infrastructure/Atomic";
|
||||
import {
|
||||
AuthorOptions,
|
||||
FilterCriteriaDefaults,
|
||||
FilterCriteriaDefaultsJson,
|
||||
FilterOptionsJson,
|
||||
MaybeAnonymousCriteria,
|
||||
MaybeAnonymousOrStringCriteria, MinimalOrFullFilter, MinimalOrFullFilterJson, NamedCriteria
|
||||
} from "./Common/Infrastructure/Filters/FilterShapes";
|
||||
import {AuthorCriteria, TypedActivityState, TypedActivityStates} from "./Common/Infrastructure/Filters/FilterCriteria";
|
||||
import {AuthorCriteria, TypedActivityState} from "./Common/Infrastructure/Filters/FilterCriteria";
|
||||
import {StrongLoggingOptions} from "./Common/Infrastructure/Logging";
|
||||
import {DatabaseDriver, DatabaseDriverConfig, DatabaseDriverType} from "./Common/Infrastructure/Database";
|
||||
import {DatabaseDriver, DatabaseDriverType} from "./Common/Infrastructure/Database";
|
||||
import {parseFromJsonOrYamlToObject} from "./Common/Config/ConfigUtil";
|
||||
import {RunnableBaseJson, RunnableBaseOptions, StructuredRunnableBase} from "./Common/Infrastructure/Runnable";
|
||||
import {RunnableBaseJson, StructuredRunnableBase} from "./Common/Infrastructure/Runnable";
|
||||
import {
|
||||
RuleJson,
|
||||
RuleObjectJson,
|
||||
StructuredRuleObjectJson,
|
||||
StructuredRuleSetObjectJson
|
||||
RuleConfigData, RuleConfigHydratedData,
|
||||
RuleConfigObject,
|
||||
} from "./Common/Infrastructure/RuleShapes";
|
||||
import {ActionObjectJson, StructuredActionObjectJson} from "./Common/Infrastructure/ActionShapes";
|
||||
import {
|
||||
ActionConfigHydratedData, ActionConfigObject,
|
||||
} from "./Common/Infrastructure/ActionShapes";
|
||||
import {SubredditResources} from "./Subreddit/SubredditResources";
|
||||
import {asIncludesData, IncludesData, IncludesString} from "./Common/Infrastructure/Includes";
|
||||
import ConfigParseError from "./Utils/ConfigParseError";
|
||||
import {InfluxClient} from "./Common/Influx/InfluxClient";
|
||||
import {BotInvite} from "./Common/Entities/BotInvite";
|
||||
|
||||
export interface ConfigBuilderOptions {
|
||||
logger: Logger,
|
||||
}
|
||||
|
||||
export const validateJson = (config: object, schema: Schema, logger: Logger): any => {
|
||||
export const validateJson = <T>(config: object, schema: Schema, logger: Logger): T => {
|
||||
const ajv = createAjvFactory(logger);
|
||||
const valid = ajv.validate(schema, config);
|
||||
if (valid) {
|
||||
return config;
|
||||
return config as unknown as T;
|
||||
} else {
|
||||
logger.error('Json config was not valid. Please use schema to check validity.', {leaf: 'Config'});
|
||||
if (Array.isArray(ajv.errors)) {
|
||||
@@ -162,30 +161,217 @@ export class ConfigBuilder {
|
||||
|
||||
constructor(options: ConfigBuilderOptions) {
|
||||
|
||||
this.configLogger = options.logger.child({leaf: 'Config'}, mergeArr);
|
||||
this.configLogger = options.logger.child({labels: ['Config']}, mergeArr);
|
||||
this.logger = options.logger;
|
||||
}
|
||||
|
||||
validateJson(config: object): JSONConfig {
|
||||
const validConfig = validateJson(config, appSchema, this.logger);
|
||||
return validConfig as JSONConfig;
|
||||
validateJson(config: object): SubredditConfigData {
|
||||
return validateJson<SubredditConfigData>(config, appSchema, this.logger);
|
||||
}
|
||||
|
||||
parseToStructured(config: JSONConfig, filterCriteriaDefaultsFromBot?: FilterCriteriaDefaults, postCheckBehaviorDefaultsFromBot: PostBehavior = {}): RunStructuredJson[] {
|
||||
let namedRules: Map<string, RuleObjectJson> = new Map();
|
||||
let namedActions: Map<string, ActionObjectJson> = new Map();
|
||||
const {checks = [], runs = [], filterCriteriaDefaults, postCheckBehaviorDefaults} = config;
|
||||
async hydrateConfigFragment<T>(val: IncludesData | string | object, resource: SubredditResources, validateFunc?: ConfigFragmentValidationFunc): Promise<T[]> {
|
||||
let includes: IncludesData | undefined = undefined;
|
||||
if(typeof val === 'string') {
|
||||
const strContextResult = parseUrlContext(val);
|
||||
if(strContextResult !== undefined) {
|
||||
this.configLogger.debug(`Detected ${asWikiContext(strContextResult) !== undefined ? 'REDDIT WIKI' : 'URL'} type Config Fragment from string: ${val}`);
|
||||
includes = {
|
||||
path: val as IncludesString
|
||||
};
|
||||
} else {
|
||||
this.configLogger.debug(`Did not detect Config Fragment as a URL resource: ${val}`);
|
||||
}
|
||||
} else if (asIncludesData(val)) {
|
||||
includes = val;
|
||||
const strContextResult = parseUrlContext(val.path);
|
||||
if(strContextResult === undefined) {
|
||||
throw new ConfigParseError(`Could not detect Config Fragment path as a valid URL Resource. Resource must be prefixed with either 'url:' or 'wiki:' -- ${val.path}`);
|
||||
}
|
||||
}
|
||||
|
||||
if(includes === undefined) {
|
||||
if(Array.isArray(val)) {
|
||||
return val as unknown as T[];
|
||||
} else {
|
||||
return [val as unknown as T];
|
||||
}
|
||||
}
|
||||
|
||||
const resolvedFragment = await resource.getConfigFragment(includes, validateFunc);
|
||||
if(Array.isArray(resolvedFragment)) {
|
||||
return resolvedFragment
|
||||
}
|
||||
return [resolvedFragment as T];
|
||||
}
|
||||
|
||||
async hydrateConfig(config: SubredditConfigData, resource: SubredditResources): Promise<SubredditConfigHydratedData> {
|
||||
const {
|
||||
runs = [],
|
||||
checks = [],
|
||||
...restConfig
|
||||
} = config;
|
||||
|
||||
if(checks.length > 0 && runs.length > 0) {
|
||||
// cannot have both checks and runs at top-level
|
||||
throw new Error(`Subreddit configuration cannot contain both 'checks' and 'runs' at top-level.`);
|
||||
}
|
||||
|
||||
// TODO consolidate this with parseToStructured
|
||||
const realRuns = runs;
|
||||
if(checks.length > 0) {
|
||||
realRuns.push({name: 'Run1', checks: checks});
|
||||
}
|
||||
|
||||
const hydratedRuns: RunConfigHydratedData[] = [];
|
||||
|
||||
let runIndex = 1;
|
||||
for(const r of realRuns) {
|
||||
|
||||
let hydratedRunArr: RunConfigData | RunConfigData[];
|
||||
|
||||
try {
|
||||
hydratedRunArr = await this.hydrateConfigFragment<RunConfigData>(r, resource, <RunConfigData>(data: object, fetched: boolean) => {
|
||||
if (fetched) {
|
||||
if (Array.isArray(data)) {
|
||||
for (const runData of data) {
|
||||
validateJson<RunConfigData>(runData, runSchema, this.logger);
|
||||
}
|
||||
} else {
|
||||
validateJson<RunConfigData>(data, runSchema, this.logger);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
} catch (e: any) {
|
||||
throw new CMError(`Could not fetch or validate Run #${runIndex}`, {cause: e});
|
||||
}
|
||||
|
||||
for(const hydratedRunVal of hydratedRunArr) {
|
||||
if (typeof hydratedRunVal === 'string') {
|
||||
throw new ConfigParseError(`Run Config Fragment #${runIndex} was not in a recognized Config Fragment format. Given: ${hydratedRunVal}`);
|
||||
}
|
||||
|
||||
// validate run with unhydrated checks
|
||||
const preValidatedRun = hydratedRunVal as RunConfigData;
|
||||
|
||||
const {checks, ...rest} = preValidatedRun;
|
||||
|
||||
const hydratedChecks: CheckConfigHydratedData[] = [];
|
||||
let checkIndex = 1;
|
||||
for (const c of preValidatedRun.checks) {
|
||||
let hydratedCheckDataArr: ActivityCheckConfigHydratedData[];
|
||||
|
||||
try {
|
||||
hydratedCheckDataArr = await this.hydrateConfigFragment<ActivityCheckConfigHydratedData>(c, resource, (data: object, fetched: boolean) => {
|
||||
if (fetched) {
|
||||
if (Array.isArray(data)) {
|
||||
for (const checkObj of data) {
|
||||
validateJson<ActivityCheckConfigHydratedData>(checkObj, checkSchema, this.logger);
|
||||
}
|
||||
} else {
|
||||
validateJson<ActivityCheckConfigHydratedData>(data, checkSchema, this.logger);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
} catch (e: any) {
|
||||
throw new CMError(`Could not fetch or validate Check Config Fragment #${checkIndex} in Run #${runIndex}`, {cause: e});
|
||||
}
|
||||
|
||||
for (const hydratedCheckData of hydratedCheckDataArr) {
|
||||
if (typeof hydratedCheckData === 'string') {
|
||||
throw new ConfigParseError(`Check #${checkIndex} in Run #${runIndex} was not in a recognized include format. Given: ${hydratedCheckData}`);
|
||||
}
|
||||
|
||||
const preValidatedCheck = hydratedCheckData as ActivityCheckConfigHydratedData;
|
||||
|
||||
const {rules, actions, ...rest} = preValidatedCheck;
|
||||
const hydratedCheckConfigData: CheckConfigHydratedData = rest;
|
||||
|
||||
if (rules !== undefined) {
|
||||
const hydratedRulesOrSets: (RuleSetConfigHydratedData | RuleConfigHydratedData)[] = [];
|
||||
|
||||
let ruleIndex = 1;
|
||||
for (const r of rules) {
|
||||
let hydratedRuleOrSetArr: (RuleConfigHydratedData | RuleSetConfigHydratedData)[];
|
||||
try {
|
||||
hydratedRuleOrSetArr = await this.hydrateConfigFragment<(RuleSetConfigHydratedData | RuleConfigHydratedData)>(r, resource);
|
||||
} catch (e: any) {
|
||||
throw new CMError(`Rule Config Fragment #${ruleIndex} in Check #${checkIndex} could not be fetched`, {cause: e});
|
||||
}
|
||||
for (const hydratedRuleOrSet of hydratedRuleOrSetArr) {
|
||||
if (typeof hydratedRuleOrSet === 'string') {
|
||||
hydratedRulesOrSets.push(hydratedRuleOrSet);
|
||||
} else if (isRuleSetJSON(hydratedRuleOrSet)) {
|
||||
const hydratedRulesetRules: RuleConfigHydratedData[] = [];
|
||||
for (const rsr of hydratedRuleOrSet.rules) {
|
||||
const hydratedRuleSetRuleArr = await this.hydrateConfigFragment<RuleConfigHydratedData>(rsr, resource);
|
||||
for(const rsrData of hydratedRuleSetRuleArr) {
|
||||
// either a string or rule data at this point
|
||||
// we will validate the whole check again so this rule will be validated eventually
|
||||
hydratedRulesetRules.push(rsrData)
|
||||
}
|
||||
}
|
||||
hydratedRuleOrSet.rules = hydratedRulesetRules;
|
||||
hydratedRulesOrSets.push(hydratedRuleOrSet);
|
||||
} else {
|
||||
hydratedRulesOrSets.push(hydratedRuleOrSet);
|
||||
}
|
||||
ruleIndex++;
|
||||
}
|
||||
}
|
||||
hydratedCheckConfigData.rules = hydratedRulesOrSets;
|
||||
}
|
||||
|
||||
if (actions !== undefined) {
|
||||
const hydratedActions: ActionConfigHydratedData[] = [];
|
||||
|
||||
let actionIndex = 1;
|
||||
for (const a of actions) {
|
||||
let hydratedActionArr: ActionConfigHydratedData[];
|
||||
try {
|
||||
hydratedActionArr = await this.hydrateConfigFragment<ActionConfigHydratedData>(a, resource);
|
||||
} catch (e: any) {
|
||||
throw new CMError(`Action Config Fragment #${actionIndex} in Check #${checkIndex} could not be fetched`, {cause: e});
|
||||
}
|
||||
for (const hydratedAction of hydratedActionArr) {
|
||||
hydratedActions.push(hydratedAction);
|
||||
actionIndex++;
|
||||
}
|
||||
}
|
||||
hydratedCheckConfigData.actions = hydratedActions;
|
||||
}
|
||||
|
||||
hydratedChecks.push(hydratedCheckConfigData);
|
||||
checkIndex++;
|
||||
}
|
||||
}
|
||||
|
||||
const hydratedRun: RunConfigHydratedData = {...rest, checks: hydratedChecks};
|
||||
|
||||
hydratedRuns.push(hydratedRun);
|
||||
runIndex++;
|
||||
}
|
||||
}
|
||||
|
||||
const hydratedConfig: SubredditConfigHydratedData = {...restConfig, runs: hydratedRuns};
|
||||
|
||||
const validatedHydratedConfig = validateJson<SubredditConfigHydratedData>(hydratedConfig, appSchema, this.logger);
|
||||
|
||||
return validatedHydratedConfig;
|
||||
}
|
||||
|
||||
async parseToStructured(config: SubredditConfigData, resource: SubredditResources, filterCriteriaDefaultsFromBot?: FilterCriteriaDefaults, postCheckBehaviorDefaultsFromBot: PostBehavior = {}): Promise<RunConfigObject[]> {
|
||||
let namedRules: Map<string, RuleConfigObject> = new Map();
|
||||
let namedActions: Map<string, ActionConfigObject> = new Map();
|
||||
const {filterCriteriaDefaults, postCheckBehaviorDefaults} = config;
|
||||
|
||||
const hydratedConfig = await this.hydrateConfig(config, resource);
|
||||
|
||||
const {runs: realRuns = []} = hydratedConfig;
|
||||
|
||||
for(const r of realRuns) {
|
||||
for (const c of r.checks) {
|
||||
const {rules = [], actions = []} = c;
|
||||
@@ -194,11 +380,11 @@ export class ConfigBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
const [namedAuthorFilters, namedItemFilters] = extractNamedFilters({...config, runs: realRuns});
|
||||
const [namedAuthorFilters, namedItemFilters] = extractNamedFilters({...hydratedConfig, runs: realRuns});
|
||||
|
||||
const configFilterDefaults = filterCriteriaDefaults === undefined ? undefined : buildDefaultFilterCriteriaFromJson(filterCriteriaDefaults, namedAuthorFilters, namedItemFilters);
|
||||
|
||||
const structuredRuns: RunStructuredJson[] = [];
|
||||
const structuredRuns: RunConfigObject[] = [];
|
||||
|
||||
const namedFilters = insertNameFilters(namedAuthorFilters, namedItemFilters);
|
||||
|
||||
@@ -210,7 +396,7 @@ export class ConfigBuilder {
|
||||
|
||||
const configFilterDefaultsFromRun = filterCriteriaDefaultsFromRun === undefined ? undefined : buildDefaultFilterCriteriaFromJson(filterCriteriaDefaultsFromRun, namedAuthorFilters, namedItemFilters);
|
||||
|
||||
const structuredChecks: CheckStructuredJson[] = [];
|
||||
const structuredChecks: CheckConfigObject[] = [];
|
||||
for (const c of r.checks) {
|
||||
const {rules = [], actions = [], authorIs = {}, itemIs = []} = c;
|
||||
const strongRules = insertNamedRules(rules, namedRules, namedAuthorFilters, namedItemFilters);
|
||||
@@ -227,7 +413,7 @@ export class ConfigBuilder {
|
||||
rules: strongRules,
|
||||
actions: strongActions,
|
||||
...postCheckBehaviors
|
||||
} as CheckStructuredJson;
|
||||
} as CheckConfigObject;
|
||||
structuredChecks.push(strongCheck);
|
||||
}
|
||||
structuredRuns.push({
|
||||
@@ -283,19 +469,19 @@ export const buildDefaultFilterCriteriaFromJson = (val: FilterCriteriaDefaultsJs
|
||||
return def;
|
||||
}
|
||||
|
||||
export const extractNamedRules = (rules: Array<RuleSetJson | RuleJson>, namedRules: Map<string, RuleObjectJson> = new Map()): Map<string, RuleObjectJson> => {
|
||||
export const extractNamedRules = (rules: Array<RuleSetConfigData | RuleConfigData>, namedRules: Map<string, RuleConfigObject> = new Map()): Map<string, RuleConfigObject> => {
|
||||
//const namedRules = new Map();
|
||||
for (const r of rules) {
|
||||
let rulesToAdd: RuleObjectJson[] = [];
|
||||
let rulesToAdd: RuleConfigObject[] = [];
|
||||
if ((typeof r === 'object')) {
|
||||
if ((r as RuleObjectJson).kind !== undefined) {
|
||||
if ((r as RuleConfigObject).kind !== undefined) {
|
||||
// itsa rule
|
||||
const rule = r as RuleObjectJson;
|
||||
const rule = r as RuleConfigObject;
|
||||
if (rule.name !== undefined) {
|
||||
rulesToAdd.push(rule);
|
||||
}
|
||||
} else {
|
||||
const ruleSet = r as RuleSetJson;
|
||||
const ruleSet = r as RuleSetConfigData;
|
||||
const nestedNamed = extractNamedRules(ruleSet.rules);
|
||||
rulesToAdd = [...nestedNamed.values()];
|
||||
}
|
||||
@@ -306,7 +492,7 @@ export const extractNamedRules = (rules: Array<RuleSetJson | RuleJson>, namedRul
|
||||
const ruleNoName = {...rest};
|
||||
|
||||
if (namedRules.has(normalName)) {
|
||||
const {name: nn, ...ruleRest} = namedRules.get(normalName) as RuleObjectJson;
|
||||
const {name: nn, ...ruleRest} = namedRules.get(normalName) as RuleConfigObject;
|
||||
if (!deepEqual(ruleRest, ruleNoName)) {
|
||||
throw new Error(`Rule names must be unique (case-insensitive). Conflicting name: ${name}`);
|
||||
}
|
||||
@@ -352,7 +538,7 @@ const parseFilterJson = <T>(addToFilter: FilterJsonFuncArg<T>) => (val: MinimalO
|
||||
}
|
||||
}
|
||||
|
||||
export const extractNamedFilters = (config: JSONConfig, namedAuthorFilters: Map<string, NamedCriteria<AuthorCriteria>> = new Map(), namedItemFilters: Map<string, NamedCriteria<TypedActivityState>> = new Map()): [Map<string, NamedCriteria<AuthorCriteria>>, Map<string, NamedCriteria<TypedActivityState>>] => {
|
||||
export const extractNamedFilters = (config: SubredditConfigHydratedData, namedAuthorFilters: Map<string, NamedCriteria<AuthorCriteria>> = new Map(), namedItemFilters: Map<string, NamedCriteria<TypedActivityState>> = new Map()): [Map<string, NamedCriteria<AuthorCriteria>>, Map<string, NamedCriteria<TypedActivityState>>] => {
|
||||
const addToAuthors = addToNamedFilter(namedAuthorFilters, 'authorIs');
|
||||
const addToItems = addToNamedFilter(namedItemFilters, 'itemIs');
|
||||
|
||||
@@ -407,7 +593,7 @@ export const extractNamedFilters = (config: JSONConfig, namedAuthorFilters: Map<
|
||||
parseAuthorIs(filterCriteriaDefaults?.authorIs);
|
||||
parseItemIs(filterCriteriaDefaults?.itemIs);
|
||||
|
||||
for (const r of runs as RunJson[]) {
|
||||
for (const r of runs) {
|
||||
|
||||
const {
|
||||
filterCriteriaDefaults: filterCriteriaDefaultsFromRun
|
||||
@@ -508,13 +694,13 @@ export const insertNameFilters = (namedAuthorFilters: Map<string, NamedCriteria<
|
||||
return runnableOpts;
|
||||
}
|
||||
|
||||
export const insertNamedRules = (rules: Array<RuleSetJson | RuleJson>, namedRules: Map<string, RuleObjectJson> = new Map(), namedAuthorFilters: Map<string, NamedCriteria<AuthorCriteria>> = new Map(), namedItemFilters: Map<string, NamedCriteria<TypedActivityState>> = new Map()): (StructuredRuleSetObjectJson | StructuredRuleObjectJson)[] => {
|
||||
export const insertNamedRules = (rules: Array<RuleSetConfigHydratedData | RuleConfigHydratedData>, namedRules: Map<string, RuleConfigObject> = new Map(), namedAuthorFilters: Map<string, NamedCriteria<AuthorCriteria>> = new Map(), namedItemFilters: Map<string, NamedCriteria<TypedActivityState>> = new Map()): (RuleSetConfigObject | RuleConfigObject)[] => {
|
||||
|
||||
const namedFilters = insertNameFilters(namedAuthorFilters, namedItemFilters);
|
||||
|
||||
const strongRules: (StructuredRuleSetObjectJson | StructuredRuleObjectJson)[] = [];
|
||||
const strongRules: (RuleSetConfigObject | RuleConfigObject)[] = [];
|
||||
for (const r of rules) {
|
||||
let rule: StructuredRuleObjectJson | undefined;
|
||||
let rule: RuleConfigObject | undefined;
|
||||
if (typeof r === 'string') {
|
||||
const foundRule = namedRules.get(r.toLowerCase());
|
||||
if (foundRule === undefined) {
|
||||
@@ -523,20 +709,20 @@ export const insertNamedRules = (rules: Array<RuleSetJson | RuleJson>, namedRule
|
||||
rule = {
|
||||
...foundRule,
|
||||
...namedFilters(foundRule)
|
||||
} as StructuredRuleObjectJson
|
||||
} as RuleConfigObject
|
||||
//strongRules.push(foundRule);
|
||||
} else if (isRuleSetJSON(r)) {
|
||||
const {rules: sr, ...rest} = r;
|
||||
const setRules = insertNamedRules(sr, namedRules, namedAuthorFilters, namedItemFilters);
|
||||
const strongSet = {rules: setRules, ...rest} as StructuredRuleSetObjectJson;
|
||||
const strongSet = {rules: setRules, ...rest} as RuleSetConfigObject;
|
||||
strongRules.push(strongSet);
|
||||
} else {
|
||||
rule = {...r, ...namedFilters(r)} as StructuredRuleObjectJson;
|
||||
rule = {...r, ...namedFilters(r)} as RuleConfigObject;
|
||||
}
|
||||
|
||||
if(rule !== undefined) {
|
||||
if(rule.kind === 'author') {
|
||||
const authorRuleConfig = rule as (StructuredRuleObjectJson & AuthorRuleConfig);
|
||||
const authorRuleConfig = rule as (RuleConfigObject & AuthorRuleConfig);
|
||||
const filters = namedFilters({authorIs: {include: authorRuleConfig.include, exclude: authorRuleConfig.exclude}});
|
||||
const builtFilter = buildFilter(filters.authorIs as MinimalOrFullFilter<AuthorCriteria>);
|
||||
rule = {
|
||||
@@ -546,14 +732,14 @@ export const insertNamedRules = (rules: Array<RuleSetJson | RuleJson>, namedRule
|
||||
exclude: builtFilter.exclude
|
||||
}
|
||||
}
|
||||
strongRules.push(rule as StructuredRuleObjectJson);
|
||||
strongRules.push(rule as RuleConfigObject);
|
||||
}
|
||||
}
|
||||
|
||||
return strongRules;
|
||||
}
|
||||
|
||||
export const extractNamedActions = (actions: Array<ActionJson>, namedActions: Map<string, ActionObjectJson> = new Map()): Map<string, ActionObjectJson> => {
|
||||
export const extractNamedActions = (actions: Array<ActionConfigHydratedData>, namedActions: Map<string, ActionConfigObject> = new Map()): Map<string, ActionConfigObject> => {
|
||||
for (const a of actions) {
|
||||
if (!(typeof a === 'string')) {
|
||||
if (isActionJson(a) && a.name !== undefined) {
|
||||
@@ -562,7 +748,7 @@ export const extractNamedActions = (actions: Array<ActionJson>, namedActions: Ma
|
||||
const actionNoName = {...rest};
|
||||
if (namedActions.has(normalName)) {
|
||||
// @ts-ignore
|
||||
const {name: nn, ...aRest} = namedActions.get(normalName) as ActionObjectJson;
|
||||
const {name: nn, ...aRest} = namedActions.get(normalName) as ActionConfigObject;
|
||||
if (!deepEqual(aRest, actionNoName)) {
|
||||
throw new Error(`Actions names must be unique (case-insensitive). Conflicting name: ${a.name}`);
|
||||
}
|
||||
@@ -575,20 +761,20 @@ export const extractNamedActions = (actions: Array<ActionJson>, namedActions: Ma
|
||||
return namedActions;
|
||||
}
|
||||
|
||||
export const insertNamedActions = (actions: Array<ActionJson>, namedActions: Map<string, ActionObjectJson> = new Map(), namedAuthorFilters: Map<string, NamedCriteria<AuthorCriteria>> = new Map(), namedItemFilters: Map<string, NamedCriteria<TypedActivityState>> = new Map()): Array<StructuredActionObjectJson> => {
|
||||
export const insertNamedActions = (actions: Array<ActionConfigHydratedData>, namedActions: Map<string, ActionConfigObject> = new Map(), namedAuthorFilters: Map<string, NamedCriteria<AuthorCriteria>> = new Map(), namedItemFilters: Map<string, NamedCriteria<TypedActivityState>> = new Map()): Array<ActionConfigObject> => {
|
||||
|
||||
const namedFilters = insertNameFilters(namedAuthorFilters, namedItemFilters);
|
||||
|
||||
const strongActions: Array<StructuredActionObjectJson> = [];
|
||||
const strongActions: Array<ActionConfigObject> = [];
|
||||
for (const a of actions) {
|
||||
if (typeof a === 'string') {
|
||||
const foundAction = namedActions.get(a.toLowerCase());
|
||||
if (foundAction === undefined) {
|
||||
throw new Error(`No named Action with the name ${a} was found`);
|
||||
}
|
||||
strongActions.push({...foundAction, ...namedFilters(foundAction)} as StructuredActionObjectJson);
|
||||
strongActions.push({...foundAction, ...namedFilters(foundAction)} as ActionConfigObject);
|
||||
} else {
|
||||
strongActions.push({...a, ...namedFilters(a)} as StructuredActionObjectJson);
|
||||
strongActions.push({...a, ...namedFilters(a)} as ActionConfigObject);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1024,6 +1210,7 @@ export const buildOperatorConfigWithDefaults = async (data: OperatorJsonConfig):
|
||||
migrations = {},
|
||||
retention,
|
||||
} = {},
|
||||
influxConfig,
|
||||
web: {
|
||||
port = 8085,
|
||||
maxLogs = 200,
|
||||
@@ -1038,9 +1225,6 @@ export const buildOperatorConfigWithDefaults = async (data: OperatorJsonConfig):
|
||||
maxAge: sessionMaxAge = 86400,
|
||||
storage: sessionStorage = undefined,
|
||||
} = {},
|
||||
invites: {
|
||||
maxAge: inviteMaxAge = 0,
|
||||
} = {},
|
||||
clients,
|
||||
credentials: webCredentials,
|
||||
operators,
|
||||
@@ -1053,6 +1237,10 @@ export const buildOperatorConfigWithDefaults = async (data: OperatorJsonConfig):
|
||||
} = {},
|
||||
credentials = {},
|
||||
bots = [],
|
||||
dev: {
|
||||
monitorMemory = false,
|
||||
monitorMemoryInterval = 15
|
||||
} = {},
|
||||
} = data;
|
||||
|
||||
let cache: StrongCache;
|
||||
@@ -1141,6 +1329,37 @@ export const buildOperatorConfigWithDefaults = async (data: OperatorJsonConfig):
|
||||
}
|
||||
const webDbConfig = createDatabaseConfig(realdbConnectionWeb);
|
||||
|
||||
const appDataSource = await createAppDatabaseConnection(dbConfig, appLogger);
|
||||
|
||||
let influx: InfluxClient | undefined = undefined;
|
||||
if(influxConfig !== undefined) {
|
||||
const tags = friendly !== undefined ? {server: friendly} : undefined;
|
||||
influx = new InfluxClient(influxConfig, appLogger, tags);
|
||||
await influx.isReady();
|
||||
}
|
||||
|
||||
/* let friendlyId: string;
|
||||
if (friendly === undefined) {
|
||||
let randFriendly: string = generateRandomName();
|
||||
// see if we can get invites to check for unique name
|
||||
// if this is a new instance will not be able to get it but try anyway
|
||||
try {
|
||||
const inviteRepo = appDataSource.getRepository(BotInvite);
|
||||
const exists = async (name: string) => {
|
||||
const existing = await inviteRepo.findBy({instance: name});
|
||||
return existing.length > 0;
|
||||
}
|
||||
while (await exists(randFriendly)) {
|
||||
randFriendly = generateRandomName();
|
||||
}
|
||||
} catch (e: any) {
|
||||
// something went wrong, just ignore this
|
||||
}
|
||||
friendlyId = randFriendly;
|
||||
} else {
|
||||
friendlyId = friendly;
|
||||
}*/
|
||||
|
||||
const config: OperatorConfig = {
|
||||
mode,
|
||||
operator: {
|
||||
@@ -1154,12 +1373,13 @@ export const buildOperatorConfigWithDefaults = async (data: OperatorJsonConfig):
|
||||
frequency,
|
||||
minFrequency
|
||||
},
|
||||
database: await createAppDatabaseConnection(dbConfig, appLogger),
|
||||
database: appDataSource,
|
||||
databaseConfig: {
|
||||
connection: dbConfig,
|
||||
migrations,
|
||||
retention,
|
||||
},
|
||||
influx,
|
||||
userAgent,
|
||||
web: {
|
||||
database: await createWebDatabaseConnection(webDbConfig, appLogger),
|
||||
@@ -1173,9 +1393,6 @@ export const buildOperatorConfigWithDefaults = async (data: OperatorJsonConfig):
|
||||
},
|
||||
port,
|
||||
storage: webStorage,
|
||||
invites: {
|
||||
maxAge: inviteMaxAge,
|
||||
},
|
||||
session: {
|
||||
secret: sessionSecretFromConfig,
|
||||
maxAge: sessionMaxAge,
|
||||
@@ -1189,10 +1406,14 @@ export const buildOperatorConfigWithDefaults = async (data: OperatorJsonConfig):
|
||||
api: {
|
||||
port: apiPort,
|
||||
secret: apiSecret,
|
||||
friendly
|
||||
friendly,
|
||||
},
|
||||
bots: [],
|
||||
credentials,
|
||||
dev: {
|
||||
monitorMemory,
|
||||
monitorMemoryInterval
|
||||
}
|
||||
};
|
||||
|
||||
config.bots = bots.map(x => buildBotConfig(x, config));
|
||||
@@ -1214,6 +1435,7 @@ export const buildBotConfig = (data: BotInstanceJsonConfig, opConfig: OperatorCo
|
||||
databaseConfig: {
|
||||
retention: retentionFromOp,
|
||||
} = {},
|
||||
influx: opInflux
|
||||
} = opConfig;
|
||||
const {
|
||||
name: botName,
|
||||
@@ -1239,6 +1461,7 @@ export const buildBotConfig = (data: BotInstanceJsonConfig, opConfig: OperatorCo
|
||||
databaseConfig: {
|
||||
retention,
|
||||
} = {},
|
||||
influxConfig,
|
||||
flowControlDefaults,
|
||||
credentials = {},
|
||||
subreddits: {
|
||||
@@ -1370,6 +1593,7 @@ export const buildBotConfig = (data: BotInstanceJsonConfig, opConfig: OperatorCo
|
||||
databaseConfig: {
|
||||
retention: retention ?? retentionFromOp
|
||||
},
|
||||
opInflux,
|
||||
subreddits: {
|
||||
names,
|
||||
exclude,
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
import {CheckJson, CommentCheckJson, SubmissionCheckJson} from "./Check";
|
||||
import {ActivityCheckJson, ManagerOptions} from "./Common/interfaces";
|
||||
import {RunJson} from "./Run";
|
||||
|
||||
export interface JSONConfig extends ManagerOptions {
|
||||
/**
|
||||
* A list of all the checks that should be run for a subreddit.
|
||||
*
|
||||
* Checks are split into two lists -- submission or comment -- based on kind and run independently.
|
||||
*
|
||||
* Checks in each list are run in the order found in the configuration.
|
||||
*
|
||||
* When a check "passes", and actions are performed, then all subsequent checks are skipped.
|
||||
* @minItems 1
|
||||
* */
|
||||
checks?: ActivityCheckJson[]
|
||||
|
||||
/**
|
||||
* A list of sets of Checks to run
|
||||
* @minItems 1
|
||||
* */
|
||||
runs?: RunJson[]
|
||||
}
|
||||
@@ -10,10 +10,9 @@ import {getAttributionIdentifier} from "../Utils/SnoowrapUtils";
|
||||
import dayjs from "dayjs";
|
||||
import {
|
||||
asSubmission, buildFilter, buildSubredditFilter,
|
||||
comparisonTextOp, convertSubredditsRawToStrong,
|
||||
convertSubredditsRawToStrong,
|
||||
FAIL,
|
||||
formatNumber, getActivitySubredditName, isActivityWindowConfig, isSubmission,
|
||||
parseGenericValueOrPercentComparison,
|
||||
parseSubredditName,
|
||||
PASS, windowConfigToWindowCriteria
|
||||
} from "../util";
|
||||
@@ -27,6 +26,7 @@ import {
|
||||
HistoryFiltersOptions
|
||||
} from "../Common/Infrastructure/ActivityWindow";
|
||||
import {FilterOptions} from "../Common/Infrastructure/Filters/FilterShapes";
|
||||
import {comparisonTextOp, parseGenericValueOrPercentComparison} from "../Common/Infrastructure/Comparisons";
|
||||
|
||||
|
||||
export interface AttributionCriteria {
|
||||
|
||||
@@ -8,10 +8,9 @@ import Submission from "snoowrap/dist/objects/Submission";
|
||||
import dayjs from "dayjs";
|
||||
import {
|
||||
asSubmission,
|
||||
comparisonTextOp,
|
||||
FAIL,
|
||||
formatNumber, getActivitySubredditName, historyFilterConfigToOptions, isSubmission,
|
||||
parseGenericValueOrPercentComparison, parseSubredditName,
|
||||
parseSubredditName,
|
||||
PASS,
|
||||
percentFromString, removeUndefinedKeys, toStrongSubredditState, windowConfigToWindowCriteria
|
||||
} from "../util";
|
||||
@@ -20,6 +19,7 @@ import {SubredditCriteria} from "../Common/Infrastructure/Filters/FilterCriteria
|
||||
import {CompareValueOrPercent} from "../Common/Infrastructure/Atomic";
|
||||
import {ActivityWindowConfig, ActivityWindowCriteria} from "../Common/Infrastructure/ActivityWindow";
|
||||
import {ErrorWithCause} from "pony-cause";
|
||||
import {comparisonTextOp, parseGenericValueOrPercentComparison} from "../Common/Infrastructure/Comparisons";
|
||||
|
||||
export interface CommentThresholdCriteria extends ThresholdCriteria {
|
||||
/**
|
||||
|
||||
@@ -11,7 +11,7 @@ import {
|
||||
asSubmission, bitsToHexLength,
|
||||
// blockHashImage,
|
||||
compareImages,
|
||||
comparisonTextOp, convertSubredditsRawToStrong,
|
||||
convertSubredditsRawToStrong,
|
||||
FAIL,
|
||||
formatNumber,
|
||||
getActivitySubredditName, imageCompareMaxConcurrencyGuess,
|
||||
@@ -19,7 +19,7 @@ import {
|
||||
isSubmission,
|
||||
isValidImageURL,
|
||||
objectToStringSummary,
|
||||
parseGenericValueOrPercentComparison, parseRedditEntity,
|
||||
parseRedditEntity,
|
||||
parseStringToRegex,
|
||||
parseSubredditName,
|
||||
parseUsableLinkIdentifier,
|
||||
@@ -41,6 +41,8 @@ import {
|
||||
SubredditCriteria
|
||||
} from "../Common/Infrastructure/Filters/FilterCriteria";
|
||||
import {ActivityWindow, ActivityWindowConfig} from "../Common/Infrastructure/ActivityWindow";
|
||||
import {comparisonTextOp, parseGenericValueOrPercentComparison} from "../Common/Infrastructure/Comparisons";
|
||||
import {ImageHashCacheData} from "../Common/Infrastructure/Atomic";
|
||||
|
||||
const parseLink = parseUsableLinkIdentifier();
|
||||
|
||||
@@ -194,21 +196,21 @@ export class RecentActivityRule extends Rule {
|
||||
let filteredActivity: (Submission|Comment)[] = [];
|
||||
let analysisTimes: number[] = [];
|
||||
let referenceImage: ImageData | undefined;
|
||||
let refHash: Required<ImageHashCacheData> | undefined;
|
||||
if (this.imageDetection.enable) {
|
||||
try {
|
||||
referenceImage = ImageData.fromSubmission(item);
|
||||
referenceImage.setPreferredResolutionByWidth(800);
|
||||
if(this.imageDetection.hash.enable) {
|
||||
let refHash: string | undefined;
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
refHash = await this.resources.getImageHash(referenceImage);
|
||||
if(refHash === undefined) {
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
await this.resources.setImageHash(referenceImage, refHash, this.imageDetection.hash.ttl);
|
||||
} else if(refHash.length !== bitsToHexLength(this.imageDetection.hash.bits)) {
|
||||
await this.resources.setImageHash(referenceImage, this.imageDetection.hash.ttl);
|
||||
} else if(refHash.original.length !== bitsToHexLength(this.imageDetection.hash.bits)) {
|
||||
this.logger.warn('Reference image hash length did not correspond to bits specified in config. Recomputing...');
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
await this.resources.setImageHash(referenceImage, refHash, this.imageDetection.hash.ttl);
|
||||
await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
await this.resources.setImageHash(referenceImage, this.imageDetection.hash.ttl);
|
||||
}
|
||||
} else {
|
||||
refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
@@ -243,29 +245,38 @@ export class RecentActivityRule extends Rule {
|
||||
}
|
||||
// only do image detection if regular URL comparison and other conditions fail first
|
||||
// to reduce CPU/bandwidth usage
|
||||
if (referenceImage !== undefined) {
|
||||
if (referenceImage !== undefined && refHash !== undefined) {
|
||||
try {
|
||||
let imgData = ImageData.fromSubmission(x);
|
||||
imgData.setPreferredResolutionByWidth(800);
|
||||
if(this.imageDetection.hash.enable) {
|
||||
let compareHash: string | undefined;
|
||||
let compareHash: Required<ImageHashCacheData> | undefined;
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
compareHash = await this.resources.getImageHash(imgData);
|
||||
}
|
||||
if(compareHash === undefined)
|
||||
if(compareHash === undefined || compareHash.original.length !== refHash.original.length)
|
||||
{
|
||||
if(compareHash !== undefined) {
|
||||
this.logger.debug(`Hash lengths were not the same! Will need to recompute compare hash to match reference.\n\nReference: ${referenceImage.basePath} has is ${refHash.original.length} char long | Comparing: ${imgData.basePath} has is ${compareHash} ${compareHash.original.length} long`);
|
||||
}
|
||||
compareHash = await imgData.hash(this.imageDetection.hash.bits);
|
||||
if(this.imageDetection.hash.ttl !== undefined) {
|
||||
await this.resources.setImageHash(imgData, compareHash, this.imageDetection.hash.ttl);
|
||||
await this.resources.setImageHash(imgData, this.imageDetection.hash.ttl);
|
||||
}
|
||||
}
|
||||
const refHash = await referenceImage.hash(this.imageDetection.hash.bits);
|
||||
if(refHash.length !== compareHash.length) {
|
||||
this.logger.debug(`Hash lengths were not the same! Will need to recompute compare hash to match reference.\n\nReference: ${referenceImage.baseUrl} has is ${refHash.length} char long | Comparing: ${imgData.baseUrl} has is ${compareHash} ${compareHash.length} long`);
|
||||
compareHash = await imgData.hash(this.imageDetection.hash.bits)
|
||||
let diff: number;
|
||||
const odistance = leven(refHash.original, compareHash.original);
|
||||
diff = (odistance/refHash.original.length)*100;
|
||||
|
||||
// compare flipped hash if it exists
|
||||
// if it has less difference than normal comparison then the image is probably flipped (or so different it doesn't matter)
|
||||
if(compareHash.flipped !== undefined) {
|
||||
const fdistance = leven(refHash.original, compareHash.flipped);
|
||||
const fdiff = (fdistance/refHash.original.length)*100;
|
||||
if(fdiff < diff) {
|
||||
diff = fdiff;
|
||||
}
|
||||
}
|
||||
const distance = leven(refHash, compareHash);
|
||||
const diff = (distance/refHash.length)*100;
|
||||
|
||||
|
||||
// return image if hard is defined and diff is less
|
||||
|
||||
@@ -3,8 +3,7 @@ import {Comment} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {
|
||||
asSubmission,
|
||||
comparisonTextOp, FAIL, isExternalUrlSubmission, isSubmission, parseGenericValueComparison,
|
||||
parseGenericValueOrPercentComparison, parseRegex, parseStringToRegex,
|
||||
FAIL, isExternalUrlSubmission, isSubmission, parseRegex, parseStringToRegex,
|
||||
PASS, triggeredIndicator, windowConfigToWindowCriteria
|
||||
} from "../util";
|
||||
import {
|
||||
@@ -14,6 +13,11 @@ import dayjs from 'dayjs';
|
||||
import {SimpleError} from "../Utils/Errors";
|
||||
import {JoinOperands} from "../Common/Infrastructure/Atomic";
|
||||
import {ActivityWindowConfig} from "../Common/Infrastructure/ActivityWindow";
|
||||
import {
|
||||
comparisonTextOp,
|
||||
parseGenericValueComparison,
|
||||
parseGenericValueOrPercentComparison
|
||||
} from "../Common/Infrastructure/Comparisons";
|
||||
|
||||
export interface RegexCriteria {
|
||||
/**
|
||||
@@ -376,8 +380,10 @@ export class RegexRule extends Rule {
|
||||
|
||||
for (const c of contents) {
|
||||
const results = parseRegex(reg, c);
|
||||
if (results.matched) {
|
||||
m = m.concat(results.matches);
|
||||
if(results !== undefined) {
|
||||
for(const r of results) {
|
||||
m.push(r.match);
|
||||
}
|
||||
}
|
||||
}
|
||||
return m;
|
||||
|
||||
@@ -3,12 +3,10 @@ import {Comment, RedditUser} from "snoowrap";
|
||||
import {
|
||||
activityWindowText,
|
||||
asSubmission,
|
||||
comparisonTextOp,
|
||||
FAIL,
|
||||
getActivitySubredditName, isActivityWindowConfig,
|
||||
isExternalUrlSubmission,
|
||||
isRedditMedia,
|
||||
parseGenericValueComparison,
|
||||
parseSubredditName,
|
||||
parseUsableLinkIdentifier as linkParser,
|
||||
PASS,
|
||||
@@ -23,7 +21,6 @@ import {
|
||||
} from "../Common/interfaces";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import dayjs from "dayjs";
|
||||
import Fuse from 'fuse.js'
|
||||
import {StrongSubredditCriteria, SubredditCriteria} from "../Common/Infrastructure/Filters/FilterCriteria";
|
||||
import {
|
||||
ActivityWindow,
|
||||
@@ -31,6 +28,7 @@ import {
|
||||
ActivityWindowCriteria,
|
||||
HistoryFiltersOptions
|
||||
} from "../Common/Infrastructure/ActivityWindow";
|
||||
import {comparisonTextOp, parseGenericValueComparison} from "../Common/Infrastructure/Comparisons";
|
||||
|
||||
const parseUsableLinkIdentifier = linkParser();
|
||||
|
||||
|
||||
@@ -3,11 +3,8 @@ import {Listing, SearchOptions} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import Comment from "snoowrap/dist/objects/Comment";
|
||||
import {
|
||||
compareDurationValue,
|
||||
comparisonTextOp,
|
||||
FAIL, formatNumber,
|
||||
isRepostItemResult, parseDurationComparison, parseGenericValueComparison,
|
||||
parseUsableLinkIdentifier,
|
||||
isRepostItemResult, parseUsableLinkIdentifier,
|
||||
PASS, searchAndReplace, stringSameness, triggeredIndicator, windowConfigToWindowCriteria, wordCount
|
||||
} from "../util";
|
||||
import {
|
||||
@@ -18,13 +15,16 @@ import {
|
||||
} from "../Common/interfaces";
|
||||
import objectHash from "object-hash";
|
||||
import {getAttributionIdentifier} from "../Utils/SnoowrapUtils";
|
||||
import Fuse from "fuse.js";
|
||||
import leven from "leven";
|
||||
import {YoutubeClient, commentsAsRepostItems} from "../Utils/ThirdParty/YoutubeClient";
|
||||
import dayjs from "dayjs";
|
||||
import {rest} from "lodash";
|
||||
import {CompareValue, DurationComparor, JoinOperands, SearchFacetType} from "../Common/Infrastructure/Atomic";
|
||||
import {ActivityWindow, ActivityWindowConfig} from "../Common/Infrastructure/ActivityWindow";
|
||||
import {
|
||||
compareDurationValue, comparisonTextOp,
|
||||
parseDurationComparison,
|
||||
parseGenericValueComparison
|
||||
} from "../Common/Infrastructure/Comparisons";
|
||||
|
||||
const parseYtIdentifier = parseUsableLinkIdentifier();
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import {RecentActivityRule, RecentActivityRuleJSONConfig} from "./RecentActivityRule";
|
||||
import RepeatActivityRule, {RepeatActivityJSONConfig} from "./RepeatActivityRule";
|
||||
import {Rule, RuleJSONConfig, StructuredRuleJson} from "./index";
|
||||
import {Rule} from "./index";
|
||||
import AuthorRule, {AuthorRuleJSONConfig} from "./AuthorRule";
|
||||
import {AttributionJSONConfig, AttributionRule} from "./AttributionRule";
|
||||
import {Logger} from "winston";
|
||||
@@ -10,9 +10,11 @@ import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import Snoowrap from "snoowrap";
|
||||
import {RepostRule, RepostRuleJSONConfig} from "./RepostRule";
|
||||
import {StructuredFilter} from "../Common/Infrastructure/Filters/FilterShapes";
|
||||
import {SentimentRule, SentimentRuleJSONConfig} from "./SentimentRule";
|
||||
import {StructuredRuleConfigObject} from "../Common/Infrastructure/RuleShapes";
|
||||
|
||||
export function ruleFactory
|
||||
(config: StructuredRuleJson, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Rule {
|
||||
(config: StructuredRuleConfigObject, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Rule {
|
||||
let cfg;
|
||||
switch (config.kind) {
|
||||
case 'recentActivity':
|
||||
@@ -37,7 +39,10 @@ export function ruleFactory
|
||||
case 'repost':
|
||||
cfg = config as StructuredFilter<RepostRuleJSONConfig>;
|
||||
return new RepostRule({...cfg, logger, subredditName, resources, client});
|
||||
case 'sentiment':
|
||||
cfg = config as StructuredFilter<SentimentRuleJSONConfig>;
|
||||
return new SentimentRule({...cfg, logger, subredditName, resources, client});
|
||||
default:
|
||||
throw new Error('rule "kind" was not recognized.');
|
||||
throw new Error(`Rule with kind '${config.kind}' was not recognized.`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
import {IRule, Triggerable, Rule, RuleJSONConfig, StructuredRuleJson} from "./index";
|
||||
import {IRule, Rule} from "./index";
|
||||
import Snoowrap, {Comment, Submission} from "snoowrap";
|
||||
import {ruleFactory} from "./RuleFactory";
|
||||
import {createAjvFactory, mergeArr} from "../util";
|
||||
import {Logger} from "winston";
|
||||
import {JoinCondition, RuleResult, RuleSetResult} from "../Common/interfaces";
|
||||
import {JoinCondition, RuleSetResult} from "../Common/interfaces";
|
||||
import * as RuleSchema from '../Schema/Rule.json';
|
||||
import Ajv from 'ajv';
|
||||
import {SubredditResources} from "../Subreddit/SubredditResources";
|
||||
import {runCheckOptions} from "../Subreddit/Manager";
|
||||
import {RuleResultEntity} from "../Common/Entities/RuleResultEntity";
|
||||
import {RuleSetResultEntity} from "../Common/Entities/RuleSetResultEntity";
|
||||
import {JoinOperands} from "../Common/Infrastructure/Atomic";
|
||||
import {RuleJson, RuleObjectJson, StructuredRuleObjectJson} from "../Common/Infrastructure/RuleShapes";
|
||||
import {
|
||||
RuleConfigData,
|
||||
RuleConfigHydratedData,
|
||||
RuleConfigObject,
|
||||
StructuredRuleConfigObject
|
||||
} from "../Common/Infrastructure/RuleShapes";
|
||||
|
||||
export class RuleSet implements IRuleSet {
|
||||
rules: Rule[] = [];
|
||||
@@ -99,7 +102,7 @@ export interface IRuleSet extends JoinCondition {
|
||||
}
|
||||
|
||||
export interface RuleSetOptions extends IRuleSet {
|
||||
rules: Array<StructuredRuleObjectJson>,
|
||||
rules: Array<StructuredRuleConfigObject>,
|
||||
logger: Logger
|
||||
subredditName: string
|
||||
resources: SubredditResources
|
||||
@@ -109,20 +112,24 @@ export interface RuleSetOptions extends IRuleSet {
|
||||
/**
|
||||
* A RuleSet is a "nested" set of `Rule` objects that can be used to create more complex AND/OR behavior. Think of the outcome of a `RuleSet` as the result of all of its run `Rule` objects (based on `condition`)
|
||||
* */
|
||||
export interface RuleSetJson extends JoinCondition {
|
||||
export interface RuleSetConfigData extends JoinCondition {
|
||||
/**
|
||||
* Can be `Rule` or the `name` of any **named** `Rule` in your subreddit's configuration
|
||||
* @minItems 1
|
||||
* */
|
||||
rules: Array<RuleJson>
|
||||
rules: RuleConfigData[]
|
||||
}
|
||||
|
||||
export interface RuleSetObjectJson extends RuleSetJson {
|
||||
rules: Array<RuleObjectJson>
|
||||
export interface RuleSetConfigHydratedData extends RuleSetConfigData {
|
||||
rules: RuleConfigHydratedData[]
|
||||
}
|
||||
|
||||
export const isRuleSetJSON = (obj: object): obj is RuleSetJson => {
|
||||
return (obj as RuleSetJson).rules !== undefined;
|
||||
export interface RuleSetConfigObject extends RuleSetConfigHydratedData {
|
||||
rules: RuleConfigObject[]
|
||||
}
|
||||
|
||||
export const isRuleSetJSON = (obj: object): obj is RuleSetConfigData => {
|
||||
return (obj as RuleSetConfigData).rules !== undefined;
|
||||
}
|
||||
|
||||
export const isRuleSet = (obj: object): obj is RuleSet => {
|
||||
|
||||
248
src/Rule/SentimentRule.ts
Normal file
248
src/Rule/SentimentRule.ts
Normal file
@@ -0,0 +1,248 @@
|
||||
import {Rule, RuleJSONConfig, RuleOptions} from "./index";
|
||||
import {Comment} from "snoowrap";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {
|
||||
formatNumber,
|
||||
triggeredIndicator, windowConfigToWindowCriteria
|
||||
} from "../util";
|
||||
|
||||
import dayjs from 'dayjs';
|
||||
import {map as mapAsync} from 'async';
|
||||
import {
|
||||
comparisonTextOp,
|
||||
GenericComparison,
|
||||
parseGenericValueOrPercentComparison,
|
||||
RangedComparison
|
||||
} from "../Common/Infrastructure/Comparisons";
|
||||
import {ActivityWindowConfig, ActivityWindowCriteria} from "../Common/Infrastructure/ActivityWindow";
|
||||
import {VaderSentimentComparison} from "../Common/Infrastructure/Atomic";
|
||||
import {RuleResult} from "../Common/interfaces";
|
||||
import {SnoowrapActivity} from "../Common/Infrastructure/Reddit";
|
||||
import {
|
||||
ActivitySentimentOptions,
|
||||
ActivitySentimentTestResult,
|
||||
parseTextToNumberComparison,
|
||||
testActivitySentiment
|
||||
} from "../Common/LangaugeProcessing";
|
||||
|
||||
export class SentimentRule extends Rule {
|
||||
|
||||
sentimentVal: string;
|
||||
sentiment: GenericComparison | RangedComparison;
|
||||
|
||||
historical?: HistoricalSentiment;
|
||||
|
||||
testOn: ('title' | 'body')[]
|
||||
|
||||
constructor(options: SentimentRuleOptions) {
|
||||
super(options);
|
||||
|
||||
this.sentimentVal = options.sentiment;
|
||||
this.sentiment = parseTextToNumberComparison(options.sentiment);
|
||||
this.testOn = options.testOn ?? ['title', 'body'];
|
||||
|
||||
if(options.historical !== undefined) {
|
||||
const {
|
||||
window,
|
||||
sentiment: historicalSentiment = this.sentimentVal,
|
||||
mustMatchCurrent = false,
|
||||
totalMatching = '> 0',
|
||||
} = options.historical
|
||||
|
||||
this.historical = {
|
||||
sentiment: parseTextToNumberComparison(historicalSentiment),
|
||||
sentimentVal: historicalSentiment,
|
||||
window: windowConfigToWindowCriteria(window),
|
||||
mustMatchCurrent,
|
||||
totalMatching: parseGenericValueOrPercentComparison(totalMatching),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
getKind(): string {
|
||||
return 'sentiment';
|
||||
}
|
||||
|
||||
getSpecificPremise(): object {
|
||||
return {
|
||||
sentiment: this.sentiment,
|
||||
}
|
||||
}
|
||||
|
||||
protected async process(item: Submission | Comment): Promise<[boolean, RuleResult]> {
|
||||
|
||||
let ogResult = await this.testActivity(item, this.sentiment);
|
||||
let historicResults: ActivitySentimentTestResult[] | undefined;
|
||||
|
||||
if(this.historical !== undefined && (!this.historical.mustMatchCurrent || ogResult.passes)) {
|
||||
const {
|
||||
sentiment = this.sentiment,
|
||||
window,
|
||||
} = this.historical;
|
||||
const history = await this.resources.getAuthorActivities(item.author, window);
|
||||
|
||||
historicResults = await mapAsync(history, async (x: SnoowrapActivity) => await this.testActivity(x, sentiment)); // history.map(x => this.testActivity(x, sentiment));
|
||||
}
|
||||
|
||||
|
||||
|
||||
const logSummary: string[] = [];
|
||||
|
||||
const sentimentTest = this.sentiment.displayText;
|
||||
const historicalSentimentTest = this.historical !== undefined ? this.historical.sentiment.displayText : undefined;
|
||||
|
||||
let triggered = false;
|
||||
let averageScore: number;
|
||||
let averageWindowScore: number | undefined;
|
||||
let humanWindow: string | undefined;
|
||||
let historicalPassed: string | undefined;
|
||||
let totalMatchingText: string | undefined;
|
||||
|
||||
if(historicResults === undefined) {
|
||||
triggered = ogResult.passes;
|
||||
averageScore = ogResult.scoreWeighted;
|
||||
logSummary.push(`${triggeredIndicator(triggered)} Current Activity Sentiment '${ogResult.sentiment} (${ogResult.scoreWeighted})' ${triggered ? 'PASSED' : 'DID NOT PASS'} sentiment test '${sentimentTest}'`);
|
||||
if(!triggered && this.historical !== undefined && this.historical.mustMatchCurrent) {
|
||||
logSummary.push(`Did not check Historical because 'mustMatchCurrent' is true`);
|
||||
}
|
||||
} else {
|
||||
|
||||
const {
|
||||
totalMatching,
|
||||
sentiment,
|
||||
} = this.historical as HistoricalSentiment;
|
||||
|
||||
totalMatchingText = totalMatching.displayText;
|
||||
const allResults = historicResults
|
||||
const passed = allResults.filter(x => x.passes);
|
||||
averageScore = passed.reduce((acc, curr) => acc + curr.scoreWeighted,0) / passed.length;
|
||||
averageWindowScore = allResults.reduce((acc, curr) => acc + curr.scoreWeighted,0) / allResults.length;
|
||||
|
||||
const firstActivity = allResults[0].activity;
|
||||
const lastActivity = allResults[allResults.length - 1].activity;
|
||||
|
||||
const humanRange = dayjs.duration(dayjs(firstActivity.created_utc * 1000).diff(dayjs(lastActivity.created_utc * 1000))).humanize();
|
||||
|
||||
humanWindow = `${allResults.length} Activities (${humanRange})`;
|
||||
|
||||
const {operator, value, isPercent} = totalMatching;
|
||||
if(isPercent) {
|
||||
const passPercentVal = passed.length/allResults.length
|
||||
triggered = comparisonTextOp(passPercentVal, operator, (value/100));
|
||||
historicalPassed = `${passed.length} (${formatNumber(passPercentVal)}%)`;
|
||||
} else {
|
||||
triggered = comparisonTextOp(passed.length, operator, value);
|
||||
historicalPassed = `${passed.length}`;
|
||||
}
|
||||
logSummary.push(`${triggeredIndicator(triggered)} ${historicalPassed} historical activities of ${humanWindow} passed sentiment test '${sentiment.displayText}' which ${triggered ? 'MET' : 'DID NOT MEET'} threshold '${totalMatching.displayText}'`);
|
||||
}
|
||||
|
||||
const result = logSummary.join(' || ');
|
||||
this.logger.verbose(result);
|
||||
|
||||
return Promise.resolve([triggered, this.getResult(triggered, {
|
||||
result,
|
||||
data: {
|
||||
results: {
|
||||
triggered,
|
||||
sentimentTest,
|
||||
historicalSentimentTest,
|
||||
averageScore,
|
||||
averageWindowScore,
|
||||
window: humanWindow,
|
||||
totalMatching: totalMatchingText
|
||||
}
|
||||
}
|
||||
})]);
|
||||
}
|
||||
|
||||
protected async testActivity(a: (Submission | Comment), criteria: GenericComparison | RangedComparison): Promise<ActivitySentimentTestResult> {
|
||||
return await testActivitySentiment(a, criteria, {testOn: this.testOn});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the Sentiment of Activities from the Author history
|
||||
*
|
||||
* If this is defined then the `totalMatching` threshold must pass for the Rule to trigger
|
||||
*
|
||||
* If `sentiment` is defined here it overrides the top-level `sentiment` value
|
||||
*
|
||||
* */
|
||||
interface HistoricalSentimentConfig {
|
||||
window: ActivityWindowConfig
|
||||
|
||||
sentiment?: VaderSentimentComparison
|
||||
|
||||
/**
|
||||
* When `true` the original Activity being checked MUST match desired sentiment before the Rule considers any history
|
||||
*
|
||||
* @default false
|
||||
* */
|
||||
mustMatchCurrent?: boolean
|
||||
|
||||
/**
|
||||
* A string containing a comparison operator and a value to compare Activities from history that pass the given `sentiment` comparison
|
||||
*
|
||||
* The syntax is `(< OR > OR <= OR >=) <number>[percent sign]`
|
||||
*
|
||||
* * EX `> 12` => greater than 12 activities passed given `sentiment` comparison
|
||||
* * EX `<= 10%` => less than 10% of all Activities from history passed given `sentiment` comparison
|
||||
*
|
||||
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
|
||||
* @default "> 0"
|
||||
* @examples ["> 0","> 10%"]
|
||||
* */
|
||||
totalMatching: string
|
||||
}
|
||||
|
||||
interface HistoricalSentiment extends Omit<HistoricalSentimentConfig, 'sentiment' | 'window' | 'totalMatching'> {
|
||||
sentiment: GenericComparison | RangedComparison,
|
||||
sentimentVal: string
|
||||
window: ActivityWindowCriteria
|
||||
totalMatching: GenericComparison
|
||||
}
|
||||
|
||||
interface SentimentConfig extends ActivitySentimentOptions {
|
||||
|
||||
sentiment: VaderSentimentComparison
|
||||
|
||||
/**
|
||||
* Test the Sentiment of Activities from the Author history
|
||||
*
|
||||
* If this is defined then the `totalMatching` threshold must pass for the Rule to trigger
|
||||
*
|
||||
* If `sentiment` is defined here it overrides the top-level `sentiment` value
|
||||
*
|
||||
* */
|
||||
historical?: HistoricalSentimentConfig
|
||||
|
||||
/**
|
||||
* Which content from an Activity to test for `sentiment` against
|
||||
*
|
||||
* Only used if the Activity being tested is a Submission -- Comments are only tested against their body
|
||||
*
|
||||
* If more than one type of content is specified then all text is tested together as one string
|
||||
*
|
||||
* @default ["title", "body"]
|
||||
* */
|
||||
testOn?: ('title' | 'body')[]
|
||||
}
|
||||
|
||||
export interface SentimentRuleOptions extends SentimentConfig, RuleOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the calculated VADER sentiment for an Activity to determine if the text context is negative, neutral, or positive in tone.
|
||||
*
|
||||
* More about VADER Sentiment: https://github.com/cjhutto/vaderSentiment
|
||||
*
|
||||
* */
|
||||
export interface SentimentRuleJSONConfig extends SentimentConfig, RuleJSONConfig {
|
||||
/**
|
||||
* @examples ["sentiment"]
|
||||
* */
|
||||
kind: 'sentiment'
|
||||
}
|
||||
|
||||
export default SentimentRule;
|
||||
@@ -185,10 +185,5 @@ export interface RuleJSONConfig extends IRule {
|
||||
* The kind of rule to run
|
||||
* @examples ["recentActivity", "repeatActivity", "author", "attribution", "history"]
|
||||
*/
|
||||
kind: 'recentActivity' | 'repeatActivity' | 'author' | 'attribution' | 'history' | 'regex' | 'repost'
|
||||
}
|
||||
|
||||
|
||||
export interface StructuredRuleJson extends Omit<RuleJSONConfig, 'authorIs' | 'itemIs'>, StructuredRunnableBase {
|
||||
|
||||
kind: 'recentActivity' | 'repeatActivity' | 'author' | 'attribution' | 'history' | 'regex' | 'repost' | 'sentiment'
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
import {asStructuredCommentCheckJson, asStructuredSubmissionCheckJson, Check, CheckStructuredJson} from "../Check";
|
||||
import {
|
||||
ActivityCheckJson,
|
||||
ActivityCheckConfigHydratedData,
|
||||
ActivityCheckConfigValue, ActivityCheckObject,
|
||||
asStructuredCommentCheckJson,
|
||||
asStructuredSubmissionCheckJson,
|
||||
Check,
|
||||
} from "../Check";
|
||||
import {
|
||||
PostBehavior, PostBehaviorOption,
|
||||
RunResult
|
||||
} from "../Common/interfaces";
|
||||
@@ -20,6 +25,7 @@ import {RuleResultEntity} from "../Common/Entities/RuleResultEntity";
|
||||
import {RunnableBase} from "../Common/RunnableBase";
|
||||
import {RunnableBaseJson, RunnableBaseOptions, StructuredRunnableBase} from "../Common/Infrastructure/Runnable";
|
||||
import {FilterCriteriaDefaults} from "../Common/Infrastructure/Filters/FilterShapes";
|
||||
import {IncludesData} from "../Common/Infrastructure/Includes";
|
||||
|
||||
export class Run extends RunnableBase {
|
||||
name: string;
|
||||
@@ -296,7 +302,7 @@ export interface IRun extends PostBehavior, RunnableBaseJson {
|
||||
enable?: boolean,
|
||||
}
|
||||
|
||||
export interface RunOptions extends RunStructuredJson, RunnableBaseOptions {
|
||||
export interface RunOptions extends RunConfigObject, RunnableBaseOptions {
|
||||
// submissionChecks?: SubmissionCheck[]
|
||||
// commentChecks?: CommentCheck[]
|
||||
//checks: CheckStructuredJson[]
|
||||
@@ -308,10 +314,16 @@ export interface RunOptions extends RunStructuredJson, RunnableBaseOptions {
|
||||
emitter: EventEmitter;
|
||||
}
|
||||
|
||||
export interface RunJson extends IRun {
|
||||
checks: ActivityCheckJson[]
|
||||
export interface RunConfigData extends IRun {
|
||||
checks: ActivityCheckConfigValue[]
|
||||
}
|
||||
|
||||
export interface RunStructuredJson extends Omit<RunJson, 'authorIs' | 'itemIs' | 'checks'>, StructuredRunnableBase {
|
||||
checks: CheckStructuredJson[]
|
||||
export type RunConfigValue = string | IncludesData | RunConfigData;
|
||||
|
||||
export interface RunConfigHydratedData extends IRun {
|
||||
checks: ActivityCheckConfigHydratedData[]
|
||||
}
|
||||
|
||||
export interface RunConfigObject extends Omit<RunConfigHydratedData, 'authorIs' | 'itemIs'>, StructuredRunnableBase {
|
||||
checks: ActivityCheckObject[]
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
5774
src/Schema/Check.json
Normal file
5774
src/Schema/Check.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -119,6 +119,19 @@
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"modActions": {
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ModNoteCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/ModLogCriteria"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"name": {
|
||||
"description": "A list of reddit usernames (case-insensitive) to match against. Do not include the \"u/\" prefix\n\n EX to match against /u/FoxxMD and /u/AnotherUser use [\"FoxxMD\",\"AnotherUser\"]",
|
||||
"examples": [
|
||||
@@ -245,6 +258,9 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"influxConfig": {
|
||||
"$ref": "#/definitions/InfluxConfig"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -565,8 +581,8 @@
|
||||
"description": "* true/false => test whether Activity is removed or not\n* string or list of strings => test which moderator removed this Activity"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"description": "A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint\n\nThe syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`\n\nIf only comparison and number is given then defaults to TOTAL reports on an Activity.\n\n* EX `> 2` => greater than 2 total reports\n\nType (optional) determines which type of reports to look at:\n\n* `mod` -- mod reports\n * EX `> 3 mod` => greater than 3 mod reports\n* `user` -- user reports\n * EX `> 3 user` => greater than 3 user reports\n\nReport reason qualifiers can be:\n\n* enclosed double or single quotes -- report reason contains\n * EX `> 1 \"misinformation\" => greater than 1 report with reason containing \"misinformation\"\n* enclosed in backslashes -- match regex\n * EX `> 1 \\harassment towards .*\\` => greater than 1 report with reason matching regex \\harassment towards .*\\\n\nType and reason qualifiers can be used together:\n\nEX `> 2 user \"misinformation\" => greater than 2 user reports with reasons containing \"misinformation\"\n\nThe time constraint filter reports created between NOW and [timeConstraint] in the past:\n\n* `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago\n* `> 2 user \"misinformation\" in 2 hours` => more than 2 user reports containing \"misinformation\" created between NOW and 2 hours ago",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)(\\s*%)?(\\s+(?:mods?|users?))?(\\s+(?:[\"'].*[\"']|\\/.*\\/))?.*(\\d+)?\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"score": {
|
||||
@@ -853,6 +869,43 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"InfluxConfig": {
|
||||
"properties": {
|
||||
"credentials": {
|
||||
"$ref": "#/definitions/InfluxCredentials"
|
||||
},
|
||||
"defaultTags": {
|
||||
"$ref": "#/definitions/Record<string,string>"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"credentials"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"InfluxCredentials": {
|
||||
"properties": {
|
||||
"bucket": {
|
||||
"type": "string"
|
||||
},
|
||||
"org": {
|
||||
"type": "string"
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"bucket",
|
||||
"org",
|
||||
"token",
|
||||
"url"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"LoggingOptions": {
|
||||
"properties": {
|
||||
"console": {
|
||||
@@ -965,6 +1018,241 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ModLogCriteria": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"activityType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"details": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
"total"
|
||||
],
|
||||
"examples": [
|
||||
"current"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ModNoteCriteria": {
|
||||
"properties": {
|
||||
"activityType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"note": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"noteType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"ABUSE_WARNING",
|
||||
"BAN",
|
||||
"BOT_BAN",
|
||||
"HELPFUL_USER",
|
||||
"PERMA_BAN",
|
||||
"SOLID_CONTRIBUTOR",
|
||||
"SPAM_WARNING",
|
||||
"SPAM_WATCH"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"ABUSE_WARNING",
|
||||
"BAN",
|
||||
"BOT_BAN",
|
||||
"HELPFUL_USER",
|
||||
"PERMA_BAN",
|
||||
"SOLID_CONTRIBUTOR",
|
||||
"SPAM_WARNING",
|
||||
"SPAM_WATCH"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
"total"
|
||||
],
|
||||
"examples": [
|
||||
"current"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ModeratorNameCriteria": {
|
||||
"properties": {
|
||||
"behavior": {
|
||||
@@ -1226,6 +1514,17 @@
|
||||
"boolean"
|
||||
]
|
||||
},
|
||||
"modNotesTTL": {
|
||||
"default": 60,
|
||||
"description": "Amount of time, in seconds, Mod Notes should be cached\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
|
||||
"examples": [
|
||||
60
|
||||
],
|
||||
"type": [
|
||||
"number",
|
||||
"boolean"
|
||||
]
|
||||
},
|
||||
"provider": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -1410,6 +1709,9 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"Record<string,string>": {
|
||||
"type": "object"
|
||||
},
|
||||
"RedditCredentials": {
|
||||
"description": "Credentials required for the bot to interact with Reddit's API\n\nThese credentials will provided to both the API and Web interface unless otherwise specified with the `web.credentials` property\n\nRefer to the [required credentials table](https://github.com/FoxxMD/context-mod/blob/master/docs/operatorConfiguration.md#minimum-required-configuration) to see what is necessary to run the bot.",
|
||||
"examples": [
|
||||
@@ -1647,8 +1949,8 @@
|
||||
"description": "* true/false => test whether Activity is removed or not\n* string or list of strings => test which moderator removed this Activity"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"description": "A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint\n\nThe syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`\n\nIf only comparison and number is given then defaults to TOTAL reports on an Activity.\n\n* EX `> 2` => greater than 2 total reports\n\nType (optional) determines which type of reports to look at:\n\n* `mod` -- mod reports\n * EX `> 3 mod` => greater than 3 mod reports\n* `user` -- user reports\n * EX `> 3 user` => greater than 3 user reports\n\nReport reason qualifiers can be:\n\n* enclosed double or single quotes -- report reason contains\n * EX `> 1 \"misinformation\" => greater than 1 report with reason containing \"misinformation\"\n* enclosed in backslashes -- match regex\n * EX `> 1 \\harassment towards .*\\` => greater than 1 report with reason matching regex \\harassment towards .*\\\n\nType and reason qualifiers can be used together:\n\nEX `> 2 user \"misinformation\" => greater than 2 user reports with reasons containing \"misinformation\"\n\nThe time constraint filter reports created between NOW and [timeConstraint] in the past:\n\n* `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago\n* `> 2 user \"misinformation\" in 2 hours` => more than 2 user reports containing \"misinformation\" created between NOW and 2 hours ago",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)(\\s*%)?(\\s+(?:mods?|users?))?(\\s+(?:[\"'].*[\"']|\\/.*\\/))?.*(\\d+)?\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"score": {
|
||||
@@ -1682,6 +1984,13 @@
|
||||
"title": {
|
||||
"description": "A valid regular expression to match against the title of the submission",
|
||||
"type": "string"
|
||||
},
|
||||
"upvoteRatio": {
|
||||
"description": "Compare the upvote ratio for this Submission, expressed as a whole number\n\nCan be either a comparison string or a number. If a number then CM assumes upvote ratio must be greater than or equal to this.\n\nExample:\n\n* `< 90` => less than 90% upvoted\n* 45 => greater than or equal to 45% upvoted",
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1749,16 +2058,16 @@
|
||||
"properties": {
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [ascending|descending]`",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the notes for this Author:\n\n### current\n\nOnly the most recent note is checked for `type`\n\n### total\n\nThe `count` comparison of `type` must be found within all notes\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n\n### consecutive\n\nThe `count` **number** of `type` notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
@@ -1823,7 +2132,7 @@
|
||||
"description": "Configuration for the **Server** application. See [Architecture Documentation](https://github.com/FoxxMD/context-mod/blob/master/docs/serverClientArchitecture.md) for more info",
|
||||
"properties": {
|
||||
"friendly": {
|
||||
"description": "A friendly name for this server. This will override `friendly` in `BotConnection` if specified.",
|
||||
"description": "A friendly name for this server. This will override `friendly` in `BotConnection` if specified.\n\nIf none is set one is randomly generated.",
|
||||
"type": "string"
|
||||
},
|
||||
"port": {
|
||||
@@ -1934,6 +2243,23 @@
|
||||
"$ref": "#/definitions/DatabaseStatisticsOperatorJsonConfig",
|
||||
"description": "Set defaults for the frequency time series stats are collected"
|
||||
},
|
||||
"dev": {
|
||||
"properties": {
|
||||
"monitorMemory": {
|
||||
"description": "Invoke `process.memoryUsage()` on an interval and send metrics to Influx\n\nOnly works if Influx config is provided",
|
||||
"type": "boolean"
|
||||
},
|
||||
"monitorMemoryInterval": {
|
||||
"default": 15,
|
||||
"description": "Interval, in seconds, to invoke `process.memoryUsage()` at\n\nDefaults to 15 seconds",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"influxConfig": {
|
||||
"$ref": "#/definitions/InfluxConfig"
|
||||
},
|
||||
"logging": {
|
||||
"$ref": "#/definitions/LoggingOptions",
|
||||
"description": "Settings to configure global logging defaults"
|
||||
@@ -2099,20 +2425,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"invites": {
|
||||
"description": "Settings related to oauth flow invites",
|
||||
"properties": {
|
||||
"maxAge": {
|
||||
"default": 0,
|
||||
"description": "Number of seconds an invite should be valid for\n\n If `0` or not specified (default) invites do not expire",
|
||||
"examples": [
|
||||
0
|
||||
],
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"logLevel": {
|
||||
"description": "The default log level to filter to in the web interface\n\nIf not specified or `null` will be same as global `logLevel`",
|
||||
"enum": [
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/IncludesData"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RecentActivityRuleJSONConfig"
|
||||
},
|
||||
@@ -22,6 +25,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/SentimentRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
@@ -104,8 +110,8 @@
|
||||
"description": "* true/false => test whether Activity is removed or not\n* string or list of strings => test which moderator removed this Activity"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"description": "A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint\n\nThe syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`\n\nIf only comparison and number is given then defaults to TOTAL reports on an Activity.\n\n* EX `> 2` => greater than 2 total reports\n\nType (optional) determines which type of reports to look at:\n\n* `mod` -- mod reports\n * EX `> 3 mod` => greater than 3 mod reports\n* `user` -- user reports\n * EX `> 3 user` => greater than 3 user reports\n\nReport reason qualifiers can be:\n\n* enclosed double or single quotes -- report reason contains\n * EX `> 1 \"misinformation\" => greater than 1 report with reason containing \"misinformation\"\n* enclosed in backslashes -- match regex\n * EX `> 1 \\harassment towards .*\\` => greater than 1 report with reason matching regex \\harassment towards .*\\\n\nType and reason qualifiers can be used together:\n\nEX `> 2 user \"misinformation\" => greater than 2 user reports with reasons containing \"misinformation\"\n\nThe time constraint filter reports created between NOW and [timeConstraint] in the past:\n\n* `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago\n* `> 2 user \"misinformation\" in 2 hours` => more than 2 user reports containing \"misinformation\" created between NOW and 2 hours ago",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)(\\s*%)?(\\s+(?:mods?|users?))?(\\s+(?:[\"'].*[\"']|\\/.*\\/))?.*(\\d+)?\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"score": {
|
||||
@@ -574,6 +580,19 @@
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"modActions": {
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ModNoteCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/ModLogCriteria"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"name": {
|
||||
"description": "A list of reddit usernames (case-insensitive) to match against. Do not include the \"u/\" prefix\n\n EX to match against /u/FoxxMD and /u/AnotherUser use [\"FoxxMD\",\"AnotherUser\"]",
|
||||
"examples": [
|
||||
@@ -803,8 +822,8 @@
|
||||
"description": "* true/false => test whether Activity is removed or not\n* string or list of strings => test which moderator removed this Activity"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"description": "A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint\n\nThe syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`\n\nIf only comparison and number is given then defaults to TOTAL reports on an Activity.\n\n* EX `> 2` => greater than 2 total reports\n\nType (optional) determines which type of reports to look at:\n\n* `mod` -- mod reports\n * EX `> 3 mod` => greater than 3 mod reports\n* `user` -- user reports\n * EX `> 3 user` => greater than 3 user reports\n\nReport reason qualifiers can be:\n\n* enclosed double or single quotes -- report reason contains\n * EX `> 1 \"misinformation\" => greater than 1 report with reason containing \"misinformation\"\n* enclosed in backslashes -- match regex\n * EX `> 1 \\harassment towards .*\\` => greater than 1 report with reason matching regex \\harassment towards .*\\\n\nType and reason qualifiers can be used together:\n\nEX `> 2 user \"misinformation\" => greater than 2 user reports with reasons containing \"misinformation\"\n\nThe time constraint filter reports created between NOW and [timeConstraint] in the past:\n\n* `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago\n* `> 2 user \"misinformation\" in 2 hours` => more than 2 user reports containing \"misinformation\" created between NOW and 2 hours ago",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)(\\s*%)?(\\s+(?:mods?|users?))?(\\s+(?:[\"'].*[\"']|\\/.*\\/))?.*(\\d+)?\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"score": {
|
||||
@@ -1297,6 +1316,60 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"HistoricalSentimentConfig": {
|
||||
"description": "Test the Sentiment of Activities from the Author history\n\nIf this is defined then the `totalMatching` threshold must pass for the Rule to trigger\n\nIf `sentiment` is defined here it overrides the top-level `sentiment` value",
|
||||
"properties": {
|
||||
"mustMatchCurrent": {
|
||||
"default": false,
|
||||
"description": "When `true` the original Activity being checked MUST match desired sentiment before the Rule considers any history",
|
||||
"type": "boolean"
|
||||
},
|
||||
"sentiment": {
|
||||
"description": "Test the calculated VADER sentiment (compound) score for an Activity using this comparison. Can be either a numerical or natural language\n\nSentiment values range from extremely negative to extremely positive in a numerical range of -1 to +1:\n\n* -0.6 => extremely negative\n* -0.3 => very negative\n* -0.1 => negative\n* 0 => neutral\n* 0.1 => positive\n* 0.3 => very positive\n* 0.6 => extremely positive\n\nThe below examples are all equivocal. You can use either set of values as the value for `sentiment` (numerical comparisons or natural langauge)\n\n* `>= 0.1` = `is positive`\n* `<= 0.3` = `is very negative`\n* `< 0.1` = `is not positive`\n* `> -0.3` = `is not very negative`\n\nSpecial case:\n\n* `is neutral` equates to `> -0.1 and < 0.1`\n* `is not neutral` equates to `< -0.1 or > 0.1`\n\nContextMod uses a normalized, weighted average from these sentiment tools:\n\n* NLP.js (english, french, german, and spanish) https://github.com/axa-group/nlp.js/blob/master/docs/v3/sentiment-analysis.md\n* (english only) vaderSentiment-js https://github.com/vaderSentiment/vaderSentiment-js/\n* (english only) wink-sentiment https://github.com/winkjs/wink-sentiment\n\nMore about the sentiment algorithms used:\n* VADER https://github.com/cjhutto/vaderSentiment\n* AFINN http://corpustext.com/reference/sentiment_afinn.html\n* Senticon https://ieeexplore.ieee.org/document/8721408\n* Pattern https://github.com/clips/pattern\n* wink https://github.com/winkjs/wink-sentiment",
|
||||
"examples": [
|
||||
"is negative",
|
||||
"> 0.2"
|
||||
],
|
||||
"pattern": "((>|>=|<|<=)\\s*(-?\\d?\\.?\\d+))|((not)?\\s*(very|extremely)?\\s*(positive|neutral|negative))",
|
||||
"type": "string"
|
||||
},
|
||||
"totalMatching": {
|
||||
"default": "> 0",
|
||||
"description": "A string containing a comparison operator and a value to compare Activities from history that pass the given `sentiment` comparison\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign]`\n\n* EX `> 12` => greater than 12 activities passed given `sentiment` comparison\n* EX `<= 10%` => less than 10% of all Activities from history passed given `sentiment` comparison",
|
||||
"examples": [
|
||||
"> 0",
|
||||
"> 10%"
|
||||
],
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/FullActivityWindowConfig"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"totalMatching",
|
||||
"window"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"HistoryCriteria": {
|
||||
"description": "Criteria will only trigger if ALL present thresholds (comment, submission, total) are met",
|
||||
"properties": {
|
||||
@@ -1585,6 +1658,276 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"IncludesData": {
|
||||
"properties": {
|
||||
"path": {
|
||||
"description": "The special-form path to the config fragment to retrieve.\n\nIf value starts with `wiki:` then the proceeding value will be used to get a wiki page from the current subreddit\n\n * EX `wiki:botconfig/mybot` tries to get `https://reddit.com/r/currentSubreddit/wiki/botconfig/mybot`\n\nIf the value starts with `wiki:` and ends with `|someValue` then `someValue` will be used as the base subreddit for the wiki page\n\n* EX `wiki:replytemplates/test|ContextModBot` tries to get `https://reddit.com/r/ContextModBot/wiki/replytemplates/test`\n\nIf the value starts with `url:` then the value is fetched as an external url and expects raw text returned\n\n* EX `url:https://pastebin.com/raw/38qfL7mL` tries to get the text response of `https://pastebin.com/raw/38qfL7mL`",
|
||||
"type": "string"
|
||||
},
|
||||
"ttl": {
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
false,
|
||||
"response",
|
||||
true
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "number"
|
||||
}
|
||||
],
|
||||
"description": "Control caching for the config fragment.\n\nIf not specified the value for `wikiTTL` will be used\n\n* If true then value is cached forever\n* If false then value is never cached\n* If a number then the number of seconds to cache value\n* If 'response' then CM will attempt to use Cache-Control or Expires headers from the response to determine how long to cache the value"
|
||||
},
|
||||
"type": {
|
||||
"description": "An unused hint about the content type. Not implemented yet",
|
||||
"enum": [
|
||||
"json",
|
||||
"yaml"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"path"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ModLogCriteria": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"activityType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"details": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
"total"
|
||||
],
|
||||
"examples": [
|
||||
"current"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ModNoteCriteria": {
|
||||
"properties": {
|
||||
"activityType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"note": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"noteType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"ABUSE_WARNING",
|
||||
"BAN",
|
||||
"BOT_BAN",
|
||||
"HELPFUL_USER",
|
||||
"PERMA_BAN",
|
||||
"SOLID_CONTRIBUTOR",
|
||||
"SPAM_WARNING",
|
||||
"SPAM_WATCH"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"ABUSE_WARNING",
|
||||
"BAN",
|
||||
"BOT_BAN",
|
||||
"HELPFUL_USER",
|
||||
"PERMA_BAN",
|
||||
"SOLID_CONTRIBUTOR",
|
||||
"SPAM_WARNING",
|
||||
"SPAM_WATCH"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
"total"
|
||||
],
|
||||
"examples": [
|
||||
"current"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ModeratorNameCriteria": {
|
||||
"properties": {
|
||||
"behavior": {
|
||||
@@ -2721,6 +3064,149 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SentimentRuleJSONConfig": {
|
||||
"description": "Test the calculated VADER sentiment for an Activity to determine if the text context is negative, neutral, or positive in tone.\n\nMore about VADER Sentiment: https://github.com/cjhutto/vaderSentiment",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AuthorCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/NamedCriteria<AuthorCriteria>"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/FilterOptionsJson<AuthorCriteria>"
|
||||
}
|
||||
],
|
||||
"description": "If present then these Author criteria are checked before running the Check. If criteria fails then the Check will fail."
|
||||
},
|
||||
"defaultLanguage": {
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
false
|
||||
],
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"null",
|
||||
"string"
|
||||
]
|
||||
}
|
||||
],
|
||||
"default": "en",
|
||||
"description": "Make the analyzer assume a language if it cannot determine one itself.\n\nThis is very useful for the analyzer when it is parsing short pieces of content. For example, if you know your subreddit is majority english speakers this will make the analyzer return \"neutral\" sentiment instead of \"not detected language\".\n\nDefaults to 'en'"
|
||||
},
|
||||
"historical": {
|
||||
"$ref": "#/definitions/HistoricalSentimentConfig",
|
||||
"description": "Test the Sentiment of Activities from the Author history\n\nIf this is defined then the `totalMatching` threshold must pass for the Rule to trigger\n\nIf `sentiment` is defined here it overrides the top-level `sentiment` value"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/NamedCriteria<TypedActivityState>"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/FilterOptionsJson<TypedActivityState>"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the check.\n\nIf any set of criteria passes the Check will be run. If the criteria fails then the Check will fail.\n\n* @examples [[{\"over_18\": true, \"removed': false}]]"
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind of rule to run",
|
||||
"enum": [
|
||||
"sentiment"
|
||||
],
|
||||
"examples": [
|
||||
"sentiment"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"languageHints": {
|
||||
"default": [
|
||||
"en",
|
||||
"es",
|
||||
"de",
|
||||
"fr"
|
||||
],
|
||||
"description": "Helps the analyzer coerce a low confidence language guess into a known-used languages in two ways:\n\nIf the analyzer's\n * *best* guess is NOT one of these\n * but it did guess one of these\n * and its guess is above requiredLanguageConfidence score then use the hinted language instead of best guess\n * OR text content is very short (4 words or less)\n * and the best guess was below the requiredLanguageConfidence score\n * and none of guesses was a hinted language then use the defaultLanguage\n\nDefaults to popular romance languages: ['en', 'es', 'de', 'fr']",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
|
||||
"examples": [
|
||||
"myNewRule"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
},
|
||||
"requiredLanguageConfidence": {
|
||||
"default": 0.9,
|
||||
"description": "Required confidence to use a guessed language as the best guess. Score from 0 to 1.\n\nDefaults to 0.9",
|
||||
"type": "number"
|
||||
},
|
||||
"sentiment": {
|
||||
"description": "Test the calculated VADER sentiment (compound) score for an Activity using this comparison. Can be either a numerical or natural language\n\nSentiment values range from extremely negative to extremely positive in a numerical range of -1 to +1:\n\n* -0.6 => extremely negative\n* -0.3 => very negative\n* -0.1 => negative\n* 0 => neutral\n* 0.1 => positive\n* 0.3 => very positive\n* 0.6 => extremely positive\n\nThe below examples are all equivocal. You can use either set of values as the value for `sentiment` (numerical comparisons or natural langauge)\n\n* `>= 0.1` = `is positive`\n* `<= 0.3` = `is very negative`\n* `< 0.1` = `is not positive`\n* `> -0.3` = `is not very negative`\n\nSpecial case:\n\n* `is neutral` equates to `> -0.1 and < 0.1`\n* `is not neutral` equates to `< -0.1 or > 0.1`\n\nContextMod uses a normalized, weighted average from these sentiment tools:\n\n* NLP.js (english, french, german, and spanish) https://github.com/axa-group/nlp.js/blob/master/docs/v3/sentiment-analysis.md\n* (english only) vaderSentiment-js https://github.com/vaderSentiment/vaderSentiment-js/\n* (english only) wink-sentiment https://github.com/winkjs/wink-sentiment\n\nMore about the sentiment algorithms used:\n* VADER https://github.com/cjhutto/vaderSentiment\n* AFINN http://corpustext.com/reference/sentiment_afinn.html\n* Senticon https://ieeexplore.ieee.org/document/8721408\n* Pattern https://github.com/clips/pattern\n* wink https://github.com/winkjs/wink-sentiment",
|
||||
"examples": [
|
||||
"is negative",
|
||||
"> 0.2"
|
||||
],
|
||||
"pattern": "((>|>=|<|<=)\\s*(-?\\d?\\.?\\d+))|((not)?\\s*(very|extremely)?\\s*(positive|neutral|negative))",
|
||||
"type": "string"
|
||||
},
|
||||
"testOn": {
|
||||
"default": [
|
||||
"title",
|
||||
"body"
|
||||
],
|
||||
"description": "Which content from an Activity to test for `sentiment` against\n\nOnly used if the Activity being tested is a Submission -- Comments are only tested against their body\n\nIf more than one type of content is specified then all text is tested together as one string",
|
||||
"items": {
|
||||
"enum": [
|
||||
"body",
|
||||
"title"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind",
|
||||
"sentiment"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SubmissionState": {
|
||||
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
|
||||
"examples": [
|
||||
@@ -2870,8 +3356,8 @@
|
||||
"description": "* true/false => test whether Activity is removed or not\n* string or list of strings => test which moderator removed this Activity"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"description": "A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint\n\nThe syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`\n\nIf only comparison and number is given then defaults to TOTAL reports on an Activity.\n\n* EX `> 2` => greater than 2 total reports\n\nType (optional) determines which type of reports to look at:\n\n* `mod` -- mod reports\n * EX `> 3 mod` => greater than 3 mod reports\n* `user` -- user reports\n * EX `> 3 user` => greater than 3 user reports\n\nReport reason qualifiers can be:\n\n* enclosed double or single quotes -- report reason contains\n * EX `> 1 \"misinformation\" => greater than 1 report with reason containing \"misinformation\"\n* enclosed in backslashes -- match regex\n * EX `> 1 \\harassment towards .*\\` => greater than 1 report with reason matching regex \\harassment towards .*\\\n\nType and reason qualifiers can be used together:\n\nEX `> 2 user \"misinformation\" => greater than 2 user reports with reasons containing \"misinformation\"\n\nThe time constraint filter reports created between NOW and [timeConstraint] in the past:\n\n* `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago\n* `> 2 user \"misinformation\" in 2 hours` => more than 2 user reports containing \"misinformation\" created between NOW and 2 hours ago",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)(\\s*%)?(\\s+(?:mods?|users?))?(\\s+(?:[\"'].*[\"']|\\/.*\\/))?.*(\\d+)?\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"score": {
|
||||
@@ -2905,6 +3391,13 @@
|
||||
"title": {
|
||||
"description": "A valid regular expression to match against the title of the submission",
|
||||
"type": "string"
|
||||
},
|
||||
"upvoteRatio": {
|
||||
"description": "Compare the upvote ratio for this Submission, expressed as a whole number\n\nCan be either a comparison string or a number. If a number then CM assumes upvote ratio must be greater than or equal to this.\n\nExample:\n\n* `< 90` => less than 90% upvoted\n* 45 => greater than or equal to 45% upvoted",
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2969,16 +3462,16 @@
|
||||
"properties": {
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [ascending|descending]`",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the notes for this Author:\n\n### current\n\nOnly the most recent note is checked for `type`\n\n### total\n\nThe `count` comparison of `type` must be found within all notes\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n\n### consecutive\n\nThe `count` **number** of `type` notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
|
||||
@@ -78,8 +78,8 @@
|
||||
"description": "* true/false => test whether Activity is removed or not\n* string or list of strings => test which moderator removed this Activity"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"description": "A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint\n\nThe syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`\n\nIf only comparison and number is given then defaults to TOTAL reports on an Activity.\n\n* EX `> 2` => greater than 2 total reports\n\nType (optional) determines which type of reports to look at:\n\n* `mod` -- mod reports\n * EX `> 3 mod` => greater than 3 mod reports\n* `user` -- user reports\n * EX `> 3 user` => greater than 3 user reports\n\nReport reason qualifiers can be:\n\n* enclosed double or single quotes -- report reason contains\n * EX `> 1 \"misinformation\" => greater than 1 report with reason containing \"misinformation\"\n* enclosed in backslashes -- match regex\n * EX `> 1 \\harassment towards .*\\` => greater than 1 report with reason matching regex \\harassment towards .*\\\n\nType and reason qualifiers can be used together:\n\nEX `> 2 user \"misinformation\" => greater than 2 user reports with reasons containing \"misinformation\"\n\nThe time constraint filter reports created between NOW and [timeConstraint] in the past:\n\n* `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago\n* `> 2 user \"misinformation\" in 2 hours` => more than 2 user reports containing \"misinformation\" created between NOW and 2 hours ago",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)(\\s*%)?(\\s+(?:mods?|users?))?(\\s+(?:[\"'].*[\"']|\\/.*\\/))?.*(\\d+)?\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"score": {
|
||||
@@ -548,6 +548,19 @@
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"modActions": {
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ModNoteCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/ModLogCriteria"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"name": {
|
||||
"description": "A list of reddit usernames (case-insensitive) to match against. Do not include the \"u/\" prefix\n\n EX to match against /u/FoxxMD and /u/AnotherUser use [\"FoxxMD\",\"AnotherUser\"]",
|
||||
"examples": [
|
||||
@@ -777,8 +790,8 @@
|
||||
"description": "* true/false => test whether Activity is removed or not\n* string or list of strings => test which moderator removed this Activity"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"description": "A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint\n\nThe syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`\n\nIf only comparison and number is given then defaults to TOTAL reports on an Activity.\n\n* EX `> 2` => greater than 2 total reports\n\nType (optional) determines which type of reports to look at:\n\n* `mod` -- mod reports\n * EX `> 3 mod` => greater than 3 mod reports\n* `user` -- user reports\n * EX `> 3 user` => greater than 3 user reports\n\nReport reason qualifiers can be:\n\n* enclosed double or single quotes -- report reason contains\n * EX `> 1 \"misinformation\" => greater than 1 report with reason containing \"misinformation\"\n* enclosed in backslashes -- match regex\n * EX `> 1 \\harassment towards .*\\` => greater than 1 report with reason matching regex \\harassment towards .*\\\n\nType and reason qualifiers can be used together:\n\nEX `> 2 user \"misinformation\" => greater than 2 user reports with reasons containing \"misinformation\"\n\nThe time constraint filter reports created between NOW and [timeConstraint] in the past:\n\n* `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago\n* `> 2 user \"misinformation\" in 2 hours` => more than 2 user reports containing \"misinformation\" created between NOW and 2 hours ago",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)(\\s*%)?(\\s+(?:mods?|users?))?(\\s+(?:[\"'].*[\"']|\\/.*\\/))?.*(\\d+)?\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"score": {
|
||||
@@ -1271,6 +1284,60 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"HistoricalSentimentConfig": {
|
||||
"description": "Test the Sentiment of Activities from the Author history\n\nIf this is defined then the `totalMatching` threshold must pass for the Rule to trigger\n\nIf `sentiment` is defined here it overrides the top-level `sentiment` value",
|
||||
"properties": {
|
||||
"mustMatchCurrent": {
|
||||
"default": false,
|
||||
"description": "When `true` the original Activity being checked MUST match desired sentiment before the Rule considers any history",
|
||||
"type": "boolean"
|
||||
},
|
||||
"sentiment": {
|
||||
"description": "Test the calculated VADER sentiment (compound) score for an Activity using this comparison. Can be either a numerical or natural language\n\nSentiment values range from extremely negative to extremely positive in a numerical range of -1 to +1:\n\n* -0.6 => extremely negative\n* -0.3 => very negative\n* -0.1 => negative\n* 0 => neutral\n* 0.1 => positive\n* 0.3 => very positive\n* 0.6 => extremely positive\n\nThe below examples are all equivocal. You can use either set of values as the value for `sentiment` (numerical comparisons or natural langauge)\n\n* `>= 0.1` = `is positive`\n* `<= 0.3` = `is very negative`\n* `< 0.1` = `is not positive`\n* `> -0.3` = `is not very negative`\n\nSpecial case:\n\n* `is neutral` equates to `> -0.1 and < 0.1`\n* `is not neutral` equates to `< -0.1 or > 0.1`\n\nContextMod uses a normalized, weighted average from these sentiment tools:\n\n* NLP.js (english, french, german, and spanish) https://github.com/axa-group/nlp.js/blob/master/docs/v3/sentiment-analysis.md\n* (english only) vaderSentiment-js https://github.com/vaderSentiment/vaderSentiment-js/\n* (english only) wink-sentiment https://github.com/winkjs/wink-sentiment\n\nMore about the sentiment algorithms used:\n* VADER https://github.com/cjhutto/vaderSentiment\n* AFINN http://corpustext.com/reference/sentiment_afinn.html\n* Senticon https://ieeexplore.ieee.org/document/8721408\n* Pattern https://github.com/clips/pattern\n* wink https://github.com/winkjs/wink-sentiment",
|
||||
"examples": [
|
||||
"is negative",
|
||||
"> 0.2"
|
||||
],
|
||||
"pattern": "((>|>=|<|<=)\\s*(-?\\d?\\.?\\d+))|((not)?\\s*(very|extremely)?\\s*(positive|neutral|negative))",
|
||||
"type": "string"
|
||||
},
|
||||
"totalMatching": {
|
||||
"default": "> 0",
|
||||
"description": "A string containing a comparison operator and a value to compare Activities from history that pass the given `sentiment` comparison\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign]`\n\n* EX `> 12` => greater than 12 activities passed given `sentiment` comparison\n* EX `<= 10%` => less than 10% of all Activities from history passed given `sentiment` comparison",
|
||||
"examples": [
|
||||
"> 0",
|
||||
"> 10%"
|
||||
],
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"type": "string"
|
||||
},
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/DurationObject"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/FullActivityWindowConfig"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
],
|
||||
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
|
||||
"examples": [
|
||||
"90 days"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"totalMatching",
|
||||
"window"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"HistoryCriteria": {
|
||||
"description": "Criteria will only trigger if ALL present thresholds (comment, submission, total) are met",
|
||||
"properties": {
|
||||
@@ -1559,6 +1626,276 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"IncludesData": {
|
||||
"properties": {
|
||||
"path": {
|
||||
"description": "The special-form path to the config fragment to retrieve.\n\nIf value starts with `wiki:` then the proceeding value will be used to get a wiki page from the current subreddit\n\n * EX `wiki:botconfig/mybot` tries to get `https://reddit.com/r/currentSubreddit/wiki/botconfig/mybot`\n\nIf the value starts with `wiki:` and ends with `|someValue` then `someValue` will be used as the base subreddit for the wiki page\n\n* EX `wiki:replytemplates/test|ContextModBot` tries to get `https://reddit.com/r/ContextModBot/wiki/replytemplates/test`\n\nIf the value starts with `url:` then the value is fetched as an external url and expects raw text returned\n\n* EX `url:https://pastebin.com/raw/38qfL7mL` tries to get the text response of `https://pastebin.com/raw/38qfL7mL`",
|
||||
"type": "string"
|
||||
},
|
||||
"ttl": {
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
false,
|
||||
"response",
|
||||
true
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "number"
|
||||
}
|
||||
],
|
||||
"description": "Control caching for the config fragment.\n\nIf not specified the value for `wikiTTL` will be used\n\n* If true then value is cached forever\n* If false then value is never cached\n* If a number then the number of seconds to cache value\n* If 'response' then CM will attempt to use Cache-Control or Expires headers from the response to determine how long to cache the value"
|
||||
},
|
||||
"type": {
|
||||
"description": "An unused hint about the content type. Not implemented yet",
|
||||
"enum": [
|
||||
"json",
|
||||
"yaml"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"path"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ModLogCriteria": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"activityType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"details": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
"total"
|
||||
],
|
||||
"examples": [
|
||||
"current"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ModNoteCriteria": {
|
||||
"properties": {
|
||||
"activityType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"comment",
|
||||
"submission"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"note": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"noteType": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"ABUSE_WARNING",
|
||||
"BAN",
|
||||
"BOT_BAN",
|
||||
"HELPFUL_USER",
|
||||
"PERMA_BAN",
|
||||
"SOLID_CONTRIBUTOR",
|
||||
"SPAM_WARNING",
|
||||
"SPAM_WATCH"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"ABUSE_WARNING",
|
||||
"BAN",
|
||||
"BOT_BAN",
|
||||
"HELPFUL_USER",
|
||||
"PERMA_BAN",
|
||||
"SOLID_CONTRIBUTOR",
|
||||
"SPAM_WARNING",
|
||||
"SPAM_WATCH"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
"total"
|
||||
],
|
||||
"examples": [
|
||||
"current"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"APPROVAL",
|
||||
"INVITE",
|
||||
"NOTE",
|
||||
"REMOVAL",
|
||||
"SPAM"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ModeratorNameCriteria": {
|
||||
"properties": {
|
||||
"behavior": {
|
||||
@@ -2695,6 +3032,149 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SentimentRuleJSONConfig": {
|
||||
"description": "Test the calculated VADER sentiment for an Activity to determine if the text context is negative, neutral, or positive in tone.\n\nMore about VADER Sentiment: https://github.com/cjhutto/vaderSentiment",
|
||||
"properties": {
|
||||
"authorIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AuthorCriteria"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/NamedCriteria<AuthorCriteria>"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/FilterOptionsJson<AuthorCriteria>"
|
||||
}
|
||||
],
|
||||
"description": "If present then these Author criteria are checked before running the Check. If criteria fails then the Check will fail."
|
||||
},
|
||||
"defaultLanguage": {
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
false
|
||||
],
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": [
|
||||
"null",
|
||||
"string"
|
||||
]
|
||||
}
|
||||
],
|
||||
"default": "en",
|
||||
"description": "Make the analyzer assume a language if it cannot determine one itself.\n\nThis is very useful for the analyzer when it is parsing short pieces of content. For example, if you know your subreddit is majority english speakers this will make the analyzer return \"neutral\" sentiment instead of \"not detected language\".\n\nDefaults to 'en'"
|
||||
},
|
||||
"historical": {
|
||||
"$ref": "#/definitions/HistoricalSentimentConfig",
|
||||
"description": "Test the Sentiment of Activities from the Author history\n\nIf this is defined then the `totalMatching` threshold must pass for the Rule to trigger\n\nIf `sentiment` is defined here it overrides the top-level `sentiment` value"
|
||||
},
|
||||
"itemIs": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SubmissionState"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/CommentState"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/NamedCriteria<TypedActivityState>"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/FilterOptionsJson<TypedActivityState>"
|
||||
}
|
||||
],
|
||||
"description": "A list of criteria to test the state of the `Activity` against before running the check.\n\nIf any set of criteria passes the Check will be run. If the criteria fails then the Check will fail.\n\n* @examples [[{\"over_18\": true, \"removed': false}]]"
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind of rule to run",
|
||||
"enum": [
|
||||
"sentiment"
|
||||
],
|
||||
"examples": [
|
||||
"sentiment"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"languageHints": {
|
||||
"default": [
|
||||
"en",
|
||||
"es",
|
||||
"de",
|
||||
"fr"
|
||||
],
|
||||
"description": "Helps the analyzer coerce a low confidence language guess into a known-used languages in two ways:\n\nIf the analyzer's\n * *best* guess is NOT one of these\n * but it did guess one of these\n * and its guess is above requiredLanguageConfidence score then use the hinted language instead of best guess\n * OR text content is very short (4 words or less)\n * and the best guess was below the requiredLanguageConfidence score\n * and none of guesses was a hinted language then use the defaultLanguage\n\nDefaults to popular romance languages: ['en', 'es', 'de', 'fr']",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"name": {
|
||||
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
|
||||
"examples": [
|
||||
"myNewRule"
|
||||
],
|
||||
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
|
||||
"type": "string"
|
||||
},
|
||||
"requiredLanguageConfidence": {
|
||||
"default": 0.9,
|
||||
"description": "Required confidence to use a guessed language as the best guess. Score from 0 to 1.\n\nDefaults to 0.9",
|
||||
"type": "number"
|
||||
},
|
||||
"sentiment": {
|
||||
"description": "Test the calculated VADER sentiment (compound) score for an Activity using this comparison. Can be either a numerical or natural language\n\nSentiment values range from extremely negative to extremely positive in a numerical range of -1 to +1:\n\n* -0.6 => extremely negative\n* -0.3 => very negative\n* -0.1 => negative\n* 0 => neutral\n* 0.1 => positive\n* 0.3 => very positive\n* 0.6 => extremely positive\n\nThe below examples are all equivocal. You can use either set of values as the value for `sentiment` (numerical comparisons or natural langauge)\n\n* `>= 0.1` = `is positive`\n* `<= 0.3` = `is very negative`\n* `< 0.1` = `is not positive`\n* `> -0.3` = `is not very negative`\n\nSpecial case:\n\n* `is neutral` equates to `> -0.1 and < 0.1`\n* `is not neutral` equates to `< -0.1 or > 0.1`\n\nContextMod uses a normalized, weighted average from these sentiment tools:\n\n* NLP.js (english, french, german, and spanish) https://github.com/axa-group/nlp.js/blob/master/docs/v3/sentiment-analysis.md\n* (english only) vaderSentiment-js https://github.com/vaderSentiment/vaderSentiment-js/\n* (english only) wink-sentiment https://github.com/winkjs/wink-sentiment\n\nMore about the sentiment algorithms used:\n* VADER https://github.com/cjhutto/vaderSentiment\n* AFINN http://corpustext.com/reference/sentiment_afinn.html\n* Senticon https://ieeexplore.ieee.org/document/8721408\n* Pattern https://github.com/clips/pattern\n* wink https://github.com/winkjs/wink-sentiment",
|
||||
"examples": [
|
||||
"is negative",
|
||||
"> 0.2"
|
||||
],
|
||||
"pattern": "((>|>=|<|<=)\\s*(-?\\d?\\.?\\d+))|((not)?\\s*(very|extremely)?\\s*(positive|neutral|negative))",
|
||||
"type": "string"
|
||||
},
|
||||
"testOn": {
|
||||
"default": [
|
||||
"title",
|
||||
"body"
|
||||
],
|
||||
"description": "Which content from an Activity to test for `sentiment` against\n\nOnly used if the Activity being tested is a Submission -- Comments are only tested against their body\n\nIf more than one type of content is specified then all text is tested together as one string",
|
||||
"items": {
|
||||
"enum": [
|
||||
"body",
|
||||
"title"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind",
|
||||
"sentiment"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SubmissionState": {
|
||||
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
|
||||
"examples": [
|
||||
@@ -2844,8 +3324,8 @@
|
||||
"description": "* true/false => test whether Activity is removed or not\n* string or list of strings => test which moderator removed this Activity"
|
||||
},
|
||||
"reports": {
|
||||
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
|
||||
"description": "A string containing a comparison operator, a value to compare against, an (optional) report type filter, an (optional) qualifier for report reason, and an (optional) time constraint\n\nThe syntax is `(< OR > OR <= OR >=) number[%] [type] [reasonQualifier] [timeValue] [timeUnit]`\n\nIf only comparison and number is given then defaults to TOTAL reports on an Activity.\n\n* EX `> 2` => greater than 2 total reports\n\nType (optional) determines which type of reports to look at:\n\n* `mod` -- mod reports\n * EX `> 3 mod` => greater than 3 mod reports\n* `user` -- user reports\n * EX `> 3 user` => greater than 3 user reports\n\nReport reason qualifiers can be:\n\n* enclosed double or single quotes -- report reason contains\n * EX `> 1 \"misinformation\" => greater than 1 report with reason containing \"misinformation\"\n* enclosed in backslashes -- match regex\n * EX `> 1 \\harassment towards .*\\` => greater than 1 report with reason matching regex \\harassment towards .*\\\n\nType and reason qualifiers can be used together:\n\nEX `> 2 user \"misinformation\" => greater than 2 user reports with reasons containing \"misinformation\"\n\nThe time constraint filter reports created between NOW and [timeConstraint] in the past:\n\n* `> 3 in 30 minutes` => more than 3 reports created between NOW and 30 minutes ago\n* `> 2 user \"misinformation\" in 2 hours` => more than 2 user reports containing \"misinformation\" created between NOW and 2 hours ago",
|
||||
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)(\\s*%)?(\\s+(?:mods?|users?))?(\\s+(?:[\"'].*[\"']|\\/.*\\/))?.*(\\d+)?\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)?\\s*$",
|
||||
"type": "string"
|
||||
},
|
||||
"score": {
|
||||
@@ -2879,6 +3359,13 @@
|
||||
"title": {
|
||||
"description": "A valid regular expression to match against the title of the submission",
|
||||
"type": "string"
|
||||
},
|
||||
"upvoteRatio": {
|
||||
"description": "Compare the upvote ratio for this Submission, expressed as a whole number\n\nCan be either a comparison string or a number. If a number then CM assumes upvote ratio must be greater than or equal to this.\n\nExample:\n\n* `< 90` => less than 90% upvoted\n* 45 => greater than or equal to 45% upvoted",
|
||||
"type": [
|
||||
"string",
|
||||
"number"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2943,16 +3430,16 @@
|
||||
"properties": {
|
||||
"count": {
|
||||
"default": ">= 1",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [ascending|descending]`",
|
||||
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [in timeRange] [ascending|descending]`\n\nIf `timeRange` is given then only notes/mod actions that occur between timeRange and NOW will be returned. `timeRange` is ignored if search is `current`",
|
||||
"examples": [
|
||||
">= 1"
|
||||
],
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<duration>in\\s+\\d+\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?))?\\s*(?<extra>asc.*|desc.*)*$",
|
||||
"type": "string"
|
||||
},
|
||||
"search": {
|
||||
"default": "current",
|
||||
"description": "How to test the notes for this Author:\n\n### current\n\nOnly the most recent note is checked for `type`\n\n### total\n\nThe `count` comparison of `type` must be found within all notes\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n\n### consecutive\n\nThe `count` **number** of `type` notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"description": "How to test the Toolbox Notes or Mod Actions for this Author:\n\n### current\n\nOnly the most recent note is checked for criteria\n\n### total\n\n`count` comparison of mod actions/notes must be found within all history\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n* EX: `count: > 3 in 1 week` => Must have more than 3 notes within the last week\n\n### consecutive\n\nThe `count` **number** of mod actions/notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
|
||||
"enum": [
|
||||
"consecutive",
|
||||
"current",
|
||||
@@ -2995,6 +3482,9 @@
|
||||
"description": "Can be `Rule` or the `name` of any **named** `Rule` in your subreddit's configuration",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/IncludesData"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/RecentActivityRuleJSONConfig"
|
||||
},
|
||||
@@ -3016,6 +3506,9 @@
|
||||
{
|
||||
"$ref": "#/definitions/RepostRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/SentimentRuleJSONConfig"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
|
||||
6020
src/Schema/Run.json
Normal file
6020
src/Schema/Run.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
import Snoowrap, {Comment, Subreddit, WikiPage} from "snoowrap";
|
||||
import Snoowrap, {WikiPage} from "snoowrap";
|
||||
import {Logger} from "winston";
|
||||
import {SubmissionCheck} from "../Check/SubmissionCheck";
|
||||
import {CommentCheck} from "../Check/CommentCheck";
|
||||
@@ -42,7 +42,7 @@ import {
|
||||
SYSTEM,
|
||||
USER, RuleResult, DatabaseStatisticsOperatorConfig
|
||||
} from "../Common/interfaces";
|
||||
import Submission from "snoowrap/dist/objects/Submission";
|
||||
import {Submission, Comment, Subreddit} from 'snoowrap/dist/objects';
|
||||
import {activityIsRemoved, ItemContent, itemContentPeek} from "../Utils/SnoowrapUtils";
|
||||
import LoggedError from "../Utils/LoggedError";
|
||||
import {
|
||||
@@ -57,8 +57,7 @@ import ConfigParseError from "../Utils/ConfigParseError";
|
||||
import dayjs, {Dayjs as DayjsObj} from "dayjs";
|
||||
import Action from "../Action";
|
||||
import {queue, QueueObject} from 'async';
|
||||
import {JSONConfig} from "../JsonConfig";
|
||||
import {Check, CheckStructuredJson} from "../Check";
|
||||
import {SubredditConfigHydratedData, SubredditConfigData} from "../SubredditConfigData";
|
||||
import NotificationManager from "../Notification/NotificationManager";
|
||||
import {createHistoricalDisplayDefaults} from "../Common/defaults";
|
||||
import {ExtendedSnoowrap} from "../Utils/SnoowrapClients";
|
||||
@@ -98,6 +97,10 @@ import {
|
||||
} from "../Common/Infrastructure/Atomic";
|
||||
import {parseFromJsonOrYamlToObject} from "../Common/Config/ConfigUtil";
|
||||
import {FilterCriteriaDefaults} from "../Common/Infrastructure/Filters/FilterShapes";
|
||||
import {InfluxClient} from "../Common/Influx/InfluxClient";
|
||||
import { Point } from "@influxdata/influxdb-client";
|
||||
import {NormalizedManagerResponse} from "../Web/Common/interfaces";
|
||||
import {guestEntityToApiGuest} from "../Common/Entities/Guest/GuestEntity";
|
||||
|
||||
export interface RunningState {
|
||||
state: RunState,
|
||||
@@ -139,6 +142,7 @@ export interface RuntimeManagerOptions extends Omit<ManagerOptions, 'filterCrite
|
||||
managerEntity: ManagerEntity
|
||||
filterCriteriaDefaults?: FilterCriteriaDefaults
|
||||
statDefaults: DatabaseStatisticsOperatorConfig
|
||||
influxClients: InfluxClient[]
|
||||
}
|
||||
|
||||
interface QueuedIdentifier {
|
||||
@@ -227,7 +231,10 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
rulesUniqueSample: number[] = [];
|
||||
rulesUniqueSampleInterval: any;
|
||||
rulesUniqueRollingAvg: number = 0;
|
||||
actionedEvents: ActionedEvent[] = [];
|
||||
|
||||
modqueueInterval: number = 0;
|
||||
|
||||
delayedQueueInterval: any;
|
||||
|
||||
processEmitter: EventEmitter = new EventEmitter();
|
||||
|
||||
@@ -235,6 +242,8 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
authorRepo!: Repository<AuthorEntity>
|
||||
eventRepo!: Repository<CMEvent>;
|
||||
|
||||
influxClients: InfluxClient[] = [];
|
||||
|
||||
getStats = async (): Promise<ManagerStats> => {
|
||||
const data: any = {
|
||||
eventsAvg: formatNumber(this.eventsRollingAvg),
|
||||
@@ -272,12 +281,11 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
return {
|
||||
id: x.id,
|
||||
activityId: x.activity.name,
|
||||
permalink: x.activity.permalink,
|
||||
permalink: x.activity.permalink, // TODO construct this without having to fetch activity
|
||||
submissionId: asComment(x.activity) ? x.activity.link_id : undefined,
|
||||
author: x.author,
|
||||
queuedAt: x.queuedAt.unix(),
|
||||
durationMilli: x.delay.asSeconds(),
|
||||
duration: x.delay.humanize(),
|
||||
duration: x.delay.asSeconds(),
|
||||
source: `${x.action}${x.identifier !== undefined ? ` (${x.identifier})` : ''}`,
|
||||
subreddit: this.subreddit.display_name_prefixed
|
||||
}
|
||||
@@ -307,7 +315,8 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
botEntity,
|
||||
managerEntity,
|
||||
statDefaults,
|
||||
retention
|
||||
retention,
|
||||
influxClients,
|
||||
} = opts || {};
|
||||
this.displayLabel = opts.nickname || `${sub.display_name_prefixed}`;
|
||||
const getLabels = this.getCurrentLabels;
|
||||
@@ -324,7 +333,11 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
}, mergeArr);
|
||||
this.logger.stream().on('log', (log: LogInfo) => {
|
||||
if(log.subreddit !== undefined && log.subreddit === this.getDisplay()) {
|
||||
this.logs = [log, ...this.logs].slice(0, 301);
|
||||
this.logs.unshift(log);
|
||||
if(this.logs.length > 300) {
|
||||
// remove all elements starting from the 300th index (301st item)
|
||||
this.logs.splice(300);
|
||||
}
|
||||
}
|
||||
});
|
||||
this.globalDryRun = dryRun;
|
||||
@@ -337,6 +350,9 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
this.pollingRetryHandler = createRetryHandler({maxRequestRetry: 3, maxOtherRetry: 2}, this.logger);
|
||||
this.subreddit = sub;
|
||||
this.botEntity = botEntity;
|
||||
for(const client of influxClients) {
|
||||
this.influxClients.push(client.childClient(this.logger, {manager: this.displayLabel, subreddit: sub.display_name_prefixed}));
|
||||
}
|
||||
|
||||
this.managerEntity = managerEntity;
|
||||
// always init in stopped state but use last invokee to determine if we should start the manager automatically afterwards
|
||||
@@ -399,6 +415,45 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
}
|
||||
})(this), 10000);
|
||||
|
||||
this.delayedQueueInterval = setInterval((function(self) {
|
||||
return function() {
|
||||
if(!self.queue.paused && self.resources !== undefined) {
|
||||
let index = 0;
|
||||
let anyQueued = false;
|
||||
for(const ar of self.resources.delayedItems) {
|
||||
if(ar.queuedAt.add(ar.delay).isSameOrBefore(dayjs())) {
|
||||
anyQueued = true;
|
||||
self.logger.info(`Activity ${ar.activity.name} dispatched at ${ar.queuedAt.format('HH:mm:ss z')} (delayed for ${ar.delay.humanize()}) is now being queued.`, {leaf: 'Delayed Activities'});
|
||||
self.firehose.push({
|
||||
activity: ar.activity,
|
||||
options: {
|
||||
refresh: true,
|
||||
// @ts-ignore
|
||||
source: ar.identifier === undefined ? ar.type : `${ar.type}:${ar.identifier}`,
|
||||
initialGoto: ar.goto,
|
||||
activitySource: {
|
||||
id: ar.id,
|
||||
queuedAt: ar.queuedAt,
|
||||
delay: ar.delay,
|
||||
action: ar.action,
|
||||
goto: ar.goto,
|
||||
identifier: ar.identifier,
|
||||
type: ar.type
|
||||
},
|
||||
dryRun: ar.dryRun,
|
||||
}
|
||||
});
|
||||
self.resources.removeDelayedActivity(ar.id);
|
||||
}
|
||||
index++;
|
||||
}
|
||||
if(!anyQueued) {
|
||||
self.logger.debug('No Activities ready to queue', {leaf: 'Delayed Activities'});
|
||||
}
|
||||
}
|
||||
}
|
||||
})(this), 5000); // every 5 seconds
|
||||
|
||||
this.processEmitter.on('notify', (payload: NotificationEventPayload) => {
|
||||
this.notificationManager.handle(payload.type, payload.title, payload.body, payload.causedBy, payload.logLevel);
|
||||
});
|
||||
@@ -449,7 +504,7 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
//
|
||||
// if we insert the same item again because it is currently being processed AND THEN we get the item AGAIN we only want to update the newest meta
|
||||
// so search the array backwards to get the neweset only
|
||||
const queuedItemIndex = findLastIndex(this.queuedItemsMeta, x => x.id === task.activity.id);
|
||||
const queuedItemIndex = findLastIndex(this.queuedItemsMeta, x => x.id === task.activity.name);
|
||||
if(queuedItemIndex !== -1) {
|
||||
const itemMeta = this.queuedItemsMeta[queuedItemIndex];
|
||||
let msg = `Item ${itemMeta.id} is already ${itemMeta.state}.`;
|
||||
@@ -458,11 +513,11 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
this.queuedItemsMeta.splice(queuedItemIndex, 1, {...itemMeta, shouldRefresh: true});
|
||||
} else {
|
||||
this.logger.debug(`${msg} Re-queuing item but will also refresh data before processing.`);
|
||||
this.queuedItemsMeta.push({id: task.activity.id, shouldRefresh: true, state: 'queued'});
|
||||
this.queuedItemsMeta.push({id: task.activity.name, shouldRefresh: true, state: 'queued'});
|
||||
this.queue.push(task);
|
||||
}
|
||||
} else {
|
||||
this.queuedItemsMeta.push({id: task.activity.id, shouldRefresh: false, state: 'queued'});
|
||||
this.queuedItemsMeta.push({id: task.activity.name, shouldRefresh: false, state: 'queued'});
|
||||
this.queue.push(task);
|
||||
}
|
||||
|
||||
@@ -493,40 +548,6 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
, 1);
|
||||
}
|
||||
|
||||
protected async startDelayQueue() {
|
||||
while(this.queueState.state === RUNNING) {
|
||||
let index = 0;
|
||||
for(const ar of this.resources.delayedItems) {
|
||||
if(!ar.processing && ar.queuedAt.add(ar.delay).isSameOrBefore(dayjs())) {
|
||||
this.logger.info(`Delayed Activity ${ar.activity.name} is being queued.`);
|
||||
await this.firehose.push({
|
||||
activity: ar.activity,
|
||||
options: {
|
||||
refresh: true,
|
||||
// @ts-ignore
|
||||
source: ar.identifier === undefined ? ar.type : `${ar.type}:${ar.identifier}`,
|
||||
initialGoto: ar.goto,
|
||||
activitySource: {
|
||||
id: ar.id,
|
||||
queuedAt: ar.queuedAt,
|
||||
delay: ar.delay,
|
||||
action: ar.action,
|
||||
goto: ar.goto,
|
||||
identifier: ar.identifier,
|
||||
type: ar.type
|
||||
},
|
||||
dryRun: ar.dryRun,
|
||||
}
|
||||
});
|
||||
this.resources.delayedItems.splice(index, 1, {...ar, processing: true});
|
||||
}
|
||||
index++;
|
||||
}
|
||||
// sleep for 5 seconds
|
||||
await sleep(5000);
|
||||
}
|
||||
}
|
||||
|
||||
protected generateQueue(maxWorkers: number) {
|
||||
if (maxWorkers > 1) {
|
||||
this.logger.warn(`Setting max queue workers above 1 (specified: ${maxWorkers}) may have detrimental effects to log readability and api usage. Consult the documentation before using this advanced/experimental feature.`);
|
||||
@@ -538,7 +559,7 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
await sleep(this.delayBy * 1000);
|
||||
}
|
||||
|
||||
const queuedItemIndex = this.queuedItemsMeta.findIndex(x => x.id === task.activity.id);
|
||||
const queuedItemIndex = this.queuedItemsMeta.findIndex(x => x.id === task.activity.name);
|
||||
try {
|
||||
const itemMeta = this.queuedItemsMeta[queuedItemIndex];
|
||||
this.queuedItemsMeta.splice(queuedItemIndex, 1, {...itemMeta, state: 'processing'});
|
||||
@@ -551,9 +572,6 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
} finally {
|
||||
// always remove item meta regardless of success or failure since we are done with it meow
|
||||
this.queuedItemsMeta.splice(queuedItemIndex, 1);
|
||||
if(task.options.activitySource?.id !== undefined) {
|
||||
await this.resources.removeDelayedActivity(task.options.activitySource?.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
, maxWorkers);
|
||||
@@ -648,7 +666,7 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
this.logger.info('Subreddit-specific options updated');
|
||||
this.logger.info('Building Runs and Checks...');
|
||||
|
||||
const structuredRuns = configBuilder.parseToStructured(validJson, this.filterCriteriaDefaults, this.postCheckBehaviorDefaults);
|
||||
const structuredRuns = await configBuilder.parseToStructured(validJson, this.resources, this.filterCriteriaDefaults, this.postCheckBehaviorDefaults);
|
||||
|
||||
let runs: Run[] = [];
|
||||
|
||||
@@ -875,7 +893,6 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
const checkType = isSubmission(activity) ? 'Submission' : 'Comment';
|
||||
let item = activity,
|
||||
runtimeShouldRefresh = false;
|
||||
const itemId = await item.id;
|
||||
|
||||
const {
|
||||
delayUntil,
|
||||
@@ -885,6 +902,69 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
force = false,
|
||||
} = options;
|
||||
|
||||
const event = new CMEvent();
|
||||
|
||||
if(refresh) {
|
||||
this.logger.verbose(`Refreshed data`);
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
}
|
||||
|
||||
let activityEntity: Activity;
|
||||
const existingEntity = await this.activityRepo.findOneBy({_id: item.name});
|
||||
|
||||
|
||||
/**
|
||||
* Report Tracking
|
||||
*
|
||||
* Store ids for activities we process. Enables us to be sure of whether modqueue has been monitored since we've last seen the activity
|
||||
*
|
||||
* */
|
||||
let lastKnownStateTimestamp = await this.resources.getActivityLastSeenDate(item.name);
|
||||
if(lastKnownStateTimestamp !== undefined && lastKnownStateTimestamp.isBefore(this.startedAt)) {
|
||||
// if we last saw this activity BEFORE we started event polling (modqueue) then it's not useful to us
|
||||
lastKnownStateTimestamp = undefined;
|
||||
}
|
||||
await this.resources.setActivityLastSeenDate(item.name);
|
||||
|
||||
// if modqueue is running then we know we are checking for new reports every X seconds
|
||||
if(options.activitySource.identifier === 'modqueue') {
|
||||
// if the activity is from modqueue and only has one report then we know that report was just created
|
||||
if(item.num_reports === 1
|
||||
// otherwise if it has more than one report AND we have seen it (its only seen if it has already been stored (in below block))
|
||||
// then we are reasonably sure that any reports created were in the last X seconds
|
||||
|| (item.num_reports > 1 && lastKnownStateTimestamp !== undefined)) {
|
||||
|
||||
lastKnownStateTimestamp = dayjs().subtract(this.modqueueInterval, 'seconds');
|
||||
}
|
||||
}
|
||||
// if activity is not from modqueue then known good timestamps for "time between last known report and now" is dependent on these things:
|
||||
// 1) (most accurate) lastKnownStateTimestamp -- only available if activity either had 0 reports OR 1+ and existing reports have been stored (see below code)
|
||||
// 2) last stored report time from Activity
|
||||
// 3) create date of activity
|
||||
|
||||
let shouldPersistReports = false;
|
||||
|
||||
if (existingEntity === null) {
|
||||
activityEntity = Activity.fromSnoowrapActivity(this.managerEntity.subreddit, activity, lastKnownStateTimestamp);
|
||||
// always persist if activity is not already persisted and any reports exist
|
||||
if (item.num_reports > 0) {
|
||||
shouldPersistReports = true;
|
||||
}
|
||||
} else {
|
||||
activityEntity = existingEntity;
|
||||
// always persist if reports need to be updated
|
||||
if (activityEntity.syncReports(item, lastKnownStateTimestamp)) {
|
||||
shouldPersistReports = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldPersistReports) {
|
||||
activityEntity = await this.activityRepo.save(activityEntity);
|
||||
}
|
||||
|
||||
const itemId = await item.id;
|
||||
|
||||
if(await this.resources.hasRecentSelf(item)) {
|
||||
let recentMsg = `Found in Activities recently (last ${this.resources.selfTTL} seconds) modified/created by this bot`;
|
||||
if(force) {
|
||||
@@ -895,15 +975,6 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
}
|
||||
}
|
||||
|
||||
let activityEntity: Activity;
|
||||
const existingEntity = await this.activityRepo.findOneBy({_id: item.name});
|
||||
if(existingEntity === null) {
|
||||
activityEntity = Activity.fromSnoowrapActivity(this.managerEntity.subreddit, activity);
|
||||
} else {
|
||||
activityEntity = existingEntity;
|
||||
}
|
||||
|
||||
const event = new CMEvent();
|
||||
event.triggered = false;
|
||||
event.manager = this.managerEntity;
|
||||
event.activity = activityEntity;
|
||||
@@ -918,7 +989,6 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
itemIdentifiers.push(`${checkType === 'Submission' ? 'SUB' : 'COM'} ${itemId}`);
|
||||
this.currentLabels = itemIdentifiers;
|
||||
let ePeek = '';
|
||||
let peekParts: ItemContent;
|
||||
try {
|
||||
const [peek, { content: peekContent }] = await itemContentPeek(item);
|
||||
ePeek = peekContent;
|
||||
@@ -963,7 +1033,6 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
delay: dayjs.duration(remaining, 'seconds'),
|
||||
id: 'notUsed',
|
||||
queuedAt: dayjs(),
|
||||
processing: false,
|
||||
activity,
|
||||
author: getActivityAuthorName(activity.author),
|
||||
});
|
||||
@@ -977,7 +1046,7 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
}
|
||||
// refresh signal from firehose if activity was ingested multiple times before processing or re-queued while processing
|
||||
// want to make sure we have the most recent data
|
||||
if(runtimeShouldRefresh || refresh) {
|
||||
if(runtimeShouldRefresh) {
|
||||
this.logger.verbose(`Refreshed data`);
|
||||
// @ts-ignore
|
||||
item = await activity.refresh();
|
||||
@@ -1075,6 +1144,10 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
event.runResults = runResults;
|
||||
//actionedEvent.runResults = runResults;
|
||||
|
||||
const checksRun = actionedEvent.runResults.map(x => x.checkResults).flat().length;
|
||||
let actionsRun = actionedEvent.runResults.map(x => x.checkResults?.map(y => y.actionResults)).flat().length;
|
||||
let totalRulesRun = actionedEvent.runResults.map(x => x.checkResults?.map(y => y.ruleResults)).flat(5).length;
|
||||
|
||||
// determine if event should be recorded
|
||||
const allOutputs = [...new Set(runResults.map(x => x.checkResults.map(y => y.recordOutputs ?? [])).flat(2).filter(x => recordOutputTypes.includes(x)))];
|
||||
if(allOutputs.length > 0) {
|
||||
@@ -1106,11 +1179,108 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
}
|
||||
await this.eventRepo.save(event);
|
||||
}
|
||||
}
|
||||
if (allOutputs.includes('influx') && this.influxClients.length > 0) {
|
||||
try {
|
||||
const time = dayjs().valueOf()
|
||||
|
||||
const checksRun = actionedEvent.runResults.map(x => x.checkResults).flat().length;
|
||||
let actionsRun = actionedEvent.runResults.map(x => x.checkResults?.map(y => y.actionResults)).flat().length;
|
||||
let totalRulesRun = actionedEvent.runResults.map(x => x.checkResults?.map(y => y.ruleResults)).flat().length;
|
||||
const measurements: Point[] = [];
|
||||
|
||||
measurements.push(new Point('event')
|
||||
.timestamp(time)
|
||||
.tag('triggered', event.triggered ? '1' : '0')
|
||||
.tag('activityType', isSubmission(item) ? 'submission' : 'comment')
|
||||
.tag('sourceIdentifier', event.source.identifier ?? 'unknown')
|
||||
.tag('sourceType', event.source.type)
|
||||
.stringField('eventId', event.id)
|
||||
.stringField('activityId', event.activity.id)
|
||||
.stringField('author', actionedEvent.activity.author)
|
||||
.intField('processingTime', time - event.processedAt.valueOf())
|
||||
.intField('queuedTime', event.processedAt.valueOf() - event.queuedAt.valueOf())
|
||||
.intField('runsProcessed', actionedEvent.runResults.length)
|
||||
.intField('runsTriggered', actionedEvent.runResults.filter(x => x.triggered).length)
|
||||
.intField('checksProcessed', checksRun)
|
||||
.intField('checksTriggered', actionedEvent.runResults.map(x => x.checkResults).flat().filter(x => x.triggered).length)
|
||||
.intField('totalRulesProcessed', totalRulesRun)
|
||||
.intField('rulesTriggered', actionedEvent.runResults.map(x => x.checkResults?.map(y => y.ruleResults)).flat(5).filter((x: RuleResultEntity) => x.triggered === true).length)
|
||||
.intField('uniqueRulesProcessed', allRuleResults.length)
|
||||
.intField('cachedRulesProcessed', totalRulesRun - allRuleResults.length)
|
||||
.intField('actionsProcessed', actionsRun)
|
||||
.intField('apiUsage', startingApiLimit - this.client.ratelimitRemaining));
|
||||
|
||||
const defaultPoint = () => new Point('triggeredEntity')
|
||||
.timestamp(time)
|
||||
.tag('activityType', isSubmission(item) ? 'submission' : 'comment')
|
||||
.tag('sourceIdentifier', event.source.identifier ?? 'unknown')
|
||||
.tag('sourceType', event.source.type)
|
||||
.stringField('activityId', event.activity.id)
|
||||
.stringField('author', actionedEvent.activity.author)
|
||||
.stringField('eventId', event.id);
|
||||
|
||||
for (const r of event.runResults) {
|
||||
if (r.triggered) {
|
||||
measurements.push(defaultPoint()
|
||||
.tag('entityType', 'run')
|
||||
.tag('name', r.run.name));
|
||||
for (const c of r.checkResults) {
|
||||
if (c.triggered) {
|
||||
measurements.push(defaultPoint()
|
||||
.tag('entityType', 'check')
|
||||
.stringField('name', c.check.name)
|
||||
.tag('fromCache', c.fromCache ? '1' : '0'));
|
||||
|
||||
if (c.ruleResults !== undefined) {
|
||||
for (const ru of c.ruleResults) {
|
||||
if (ru.result.triggered) {
|
||||
measurements.push(defaultPoint()
|
||||
.tag('entityType', 'rule')
|
||||
.stringField('name', ru.result.premise.name)
|
||||
.tag('fromCache', ru.result.fromCache ? '1' : '0'))
|
||||
}
|
||||
}
|
||||
}
|
||||
if (c.ruleSetResults !== undefined) {
|
||||
for (const rs of c.ruleSetResults) {
|
||||
if (rs.result.triggered) {
|
||||
measurements.push(defaultPoint()
|
||||
.tag('entityType', 'ruleSet'));
|
||||
for (const ru of rs.result.results) {
|
||||
if (ru.triggered) {
|
||||
measurements.push(defaultPoint()
|
||||
.tag('entityType', 'rule')
|
||||
.stringField('name', ru.premise.name))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (c.actionResults !== undefined) {
|
||||
for (const a of c.actionResults) {
|
||||
if (a.run) {
|
||||
measurements.push(defaultPoint()
|
||||
.tag('entityType', 'action')
|
||||
.stringField('name', a.premise.name)
|
||||
.tag('dryRun', a.dryRun ? '1' : '0')
|
||||
.tag('succes', a.success ? '1' : '0')
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const client of this.influxClients) {
|
||||
await client.writePoint(measurements);
|
||||
}
|
||||
} catch (e: any) {
|
||||
this.logger.error(new CMError('Error occurred while building or sending Influx data', {
|
||||
cause: e,
|
||||
isSerious: false
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.logger.verbose(`Run Stats: Checks ${checksRun} | Rules => Total: ${totalRulesRun} Unique: ${allRuleResults.length} Cached: ${totalRulesRun - allRuleResults.length} Rolling Avg: ~${formatNumber(this.rulesUniqueRollingAvg)}/s | Actions ${actionsRun}`);
|
||||
this.logger.verbose(`Reddit API Stats: Initial ${startingApiLimit} | Current ${this.client.ratelimitRemaining} | Used ~${startingApiLimit - this.client.ratelimitRemaining} | Events ~${formatNumber(this.eventsRollingAvg)}/s`);
|
||||
@@ -1325,6 +1495,11 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
|
||||
async startQueue(causedBy: Invokee = 'system', options?: ManagerStateChangeOption) {
|
||||
|
||||
if(!this.validConfigLoaded) {
|
||||
this.logger.warn('Cannot start queue while manager has an invalid configuration');
|
||||
return;
|
||||
}
|
||||
|
||||
if(this.activityRepo === undefined) {
|
||||
this.activityRepo = this.resources.database.getRepository(Activity);
|
||||
}
|
||||
@@ -1348,7 +1523,6 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
state: RUNNING,
|
||||
causedBy
|
||||
}
|
||||
this.startDelayQueue();
|
||||
if(!suppressNotification) {
|
||||
this.notificationManager.handle('runStateChanged', 'Queue Started', reason, causedBy);
|
||||
}
|
||||
@@ -1466,6 +1640,11 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
s.startInterval();
|
||||
}
|
||||
this.startedAt = dayjs();
|
||||
|
||||
const modQueuePollOpts = this.pollOptions.find(x => x.pollOn === 'modqueue');
|
||||
if(modQueuePollOpts !== undefined) {
|
||||
this.modqueueInterval = modQueuePollOpts.interval;
|
||||
}
|
||||
}
|
||||
|
||||
this.logger.info('Event polling STARTED');
|
||||
@@ -1563,6 +1742,14 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
await this.syncRunningState('managerState');
|
||||
}
|
||||
|
||||
async destroy(causedBy: Invokee = 'system', options?: ManagerStateChangeOption) {
|
||||
await this.stop(causedBy, options);
|
||||
clearInterval(this.eventsSampleInterval);
|
||||
clearInterval(this.delayedQueueInterval);
|
||||
clearInterval(this.rulesUniqueSampleInterval)
|
||||
await this.cacheManager.destroy(this.subreddit.display_name);
|
||||
}
|
||||
|
||||
setInitialRunningState(managerEntity: RunningStateEntities, type: RunningStateTypes): RunningState {
|
||||
if(managerEntity[type].runType.name === 'stopped' && managerEntity[type].invokee.name === 'user') {
|
||||
return {state: STOPPED, causedBy: 'user'};
|
||||
@@ -1583,4 +1770,41 @@ export class Manager extends EventEmitter implements RunningStates {
|
||||
|
||||
await this.cacheManager.defaultDatabase.getRepository(ManagerEntity).save(this.managerEntity);
|
||||
}
|
||||
|
||||
async writeHealthMetrics(time?: number) {
|
||||
if (this.influxClients.length > 0) {
|
||||
const metric = new Point('managerHealth')
|
||||
.intField('delayedActivities', this.resources !== undefined ? this.resources.delayedItems.length : 0)
|
||||
.intField('processing', this.queue.running())
|
||||
.intField('queued', this.queue.length())
|
||||
.booleanField('eventsRunning', this.eventsState.state === RUNNING)
|
||||
.booleanField('queueRunning', this.queueState.state === RUNNING)
|
||||
.booleanField('running', this.managerState.state === RUNNING)
|
||||
.intField('uptime', this.startedAt !== undefined ? dayjs().diff(this.startedAt, 'seconds') : 0)
|
||||
.intField('configAge', this.lastWikiRevision === undefined ? 0 : dayjs().diff(this.lastWikiRevision, 'seconds'));
|
||||
|
||||
if (this.resources !== undefined) {
|
||||
const {req, miss} = this.resources.getCacheTotals();
|
||||
metric.intField('cacheRequests', req)
|
||||
.intField('cacheMisses', miss);
|
||||
}
|
||||
|
||||
if (time !== undefined) {
|
||||
metric.timestamp(time);
|
||||
}
|
||||
|
||||
for (const client of this.influxClients) {
|
||||
await client.writePoint(metric);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
toNormalizedManager(): NormalizedManagerResponse {
|
||||
return {
|
||||
name: this.displayLabel,
|
||||
subreddit: this.subreddit.display_name,
|
||||
subredditNormal: parseRedditEntity(this.subreddit.display_name).name,
|
||||
guests: this.managerEntity.getGuests().map(x => guestEntityToApiGuest(x))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
53
src/Subreddit/ModNotes/ModAction.ts
Normal file
53
src/Subreddit/ModNotes/ModAction.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import {Submission, RedditUser, Comment, Subreddit, PrivateMessage} from "snoowrap/dist/objects"
|
||||
import {generateSnoowrapEntityFromRedditThing, parseRedditFullname} from "../../util"
|
||||
import Snoowrap from "snoowrap";
|
||||
|
||||
//import {ExtendedSnoowrap} from "../../Utils/SnoowrapClients";
|
||||
|
||||
export interface ModActionRaw {
|
||||
action?: string | null
|
||||
reddit_id?: string | null
|
||||
details?: string | null
|
||||
description?: string | null
|
||||
}
|
||||
|
||||
export class ModAction {
|
||||
action?: string
|
||||
actedOn?: RedditUser | Submission | Comment | Subreddit | PrivateMessage
|
||||
details?: string
|
||||
description?: string
|
||||
|
||||
constructor(data: ModActionRaw | undefined, client: Snoowrap) {
|
||||
const {
|
||||
action,
|
||||
reddit_id,
|
||||
details,
|
||||
description
|
||||
} = data || {};
|
||||
this.action = action !== null ? action : undefined;
|
||||
this.details = details !== null ? details : undefined;
|
||||
this.description = description !== null ? description : undefined;
|
||||
|
||||
if (reddit_id !== null && reddit_id !== undefined) {
|
||||
const thing = parseRedditFullname(reddit_id);
|
||||
if (thing !== undefined) {
|
||||
this.actedOn = generateSnoowrapEntityFromRedditThing(thing, client);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
toRaw(): ModActionRaw {
|
||||
return {
|
||||
action: this.action,
|
||||
details: this.details,
|
||||
reddit_id: this.actedOn !== undefined ? this.actedOn.id : undefined,
|
||||
description: this.description
|
||||
}
|
||||
}
|
||||
|
||||
toJSON() {
|
||||
return this.toRaw();
|
||||
}
|
||||
}
|
||||
|
||||
export default ModAction;
|
||||
119
src/Subreddit/ModNotes/ModNote.ts
Normal file
119
src/Subreddit/ModNotes/ModNote.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
import {ModAction, ModActionRaw} from "./ModAction";
|
||||
import {Submission, RedditUser, Comment, Subreddit} from "snoowrap/dist/objects"
|
||||
import {ModUserNote, ModUserNoteRaw} from "./ModUserNote";
|
||||
//import {ExtendedSnoowrap} from "../../Utils/SnoowrapClients";
|
||||
import dayjs, {Dayjs} from "dayjs";
|
||||
import {generateSnoowrapEntityFromRedditThing, parseRedditFullname} from "../../util";
|
||||
import Snoowrap from "snoowrap";
|
||||
import {ModActionType, ModUserNoteLabel} from "../../Common/Infrastructure/Atomic";
|
||||
import {RedditThing} from "../../Common/Infrastructure/Reddit";
|
||||
|
||||
export interface ModNoteSnoowrapPopulated extends Omit<ModNoteRaw, 'subreddit' | 'user'> {
|
||||
subreddit: Subreddit
|
||||
user: RedditUser
|
||||
}
|
||||
|
||||
export interface CreateModNoteData {
|
||||
user: RedditUser
|
||||
subreddit: Subreddit
|
||||
activity?: Submission | Comment | RedditUser
|
||||
label?: ModUserNoteLabel
|
||||
note?: string
|
||||
}
|
||||
|
||||
export const asCreateModNoteData = (val: any): val is CreateModNoteData => {
|
||||
if(val !== null && typeof val === 'object') {
|
||||
return val.user instanceof RedditUser && val.subreddit instanceof Subreddit && typeof val.note === 'string';
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
export interface ModNoteRaw {
|
||||
subreddit: string
|
||||
subreddit_id: string
|
||||
|
||||
user: string
|
||||
user_id: string
|
||||
|
||||
operator: string
|
||||
operator_id: string
|
||||
|
||||
id: string
|
||||
created_at: number
|
||||
cursor?: string
|
||||
type: ModActionType | string
|
||||
mod_action_data: ModActionRaw
|
||||
user_note_data: ModUserNoteRaw
|
||||
}
|
||||
|
||||
export class ModNote {
|
||||
|
||||
createdBy: RedditUser | Subreddit
|
||||
createdByName?: string
|
||||
createdAt: Dayjs
|
||||
action: ModAction
|
||||
note: ModUserNote
|
||||
user: RedditUser
|
||||
operatorVal: string
|
||||
cursor?: string
|
||||
id: string
|
||||
subreddit: Subreddit
|
||||
type: ModActionType | string
|
||||
|
||||
|
||||
constructor(data: ModNoteRaw, client: Snoowrap) {
|
||||
|
||||
this.createdByName = data.operator;
|
||||
this.createdAt = dayjs.unix(data.created_at);
|
||||
this.id = data.id;
|
||||
this.type = data.type;
|
||||
this.cursor = data.cursor;
|
||||
|
||||
this.subreddit = new Subreddit({display_name: data.subreddit, id: data.subreddit_id}, client, false);
|
||||
this.user = new RedditUser({name: data.user, id: data.user_id}, client, false);
|
||||
|
||||
this.operatorVal = data.operator;
|
||||
|
||||
const opThing = parseRedditFullname(data.operator_id) as RedditThing;
|
||||
this.createdBy = generateSnoowrapEntityFromRedditThing(opThing, client) as RedditUser | Subreddit;
|
||||
if (this.createdBy instanceof RedditUser) {
|
||||
this.createdBy.name = data.operator;
|
||||
}
|
||||
|
||||
this.action = new ModAction(data.mod_action_data, client);
|
||||
if (this.action.actedOn instanceof RedditUser && this.action.actedOn.id === this.user.id) {
|
||||
this.action.actedOn = this.user;
|
||||
}
|
||||
|
||||
this.note = new ModUserNote(data.user_note_data, client);
|
||||
if (this.note.actedOn instanceof RedditUser && this.note.actedOn.id === this.user.id) {
|
||||
this.note.actedOn = this.user;
|
||||
}
|
||||
}
|
||||
|
||||
toRaw(): ModNoteRaw {
|
||||
return {
|
||||
subreddit: this.subreddit.display_name,
|
||||
subreddit_id: this.subreddit.id,
|
||||
|
||||
user: this.user.name,
|
||||
user_id: this.user.id,
|
||||
|
||||
operator: this.operatorVal,
|
||||
operator_id: this.createdBy.id,
|
||||
|
||||
mod_action_data: this.action.toRaw(),
|
||||
|
||||
id: this.id,
|
||||
user_note_data: this.note.toRaw(),
|
||||
created_at: this.createdAt.unix(),
|
||||
type: this.type,
|
||||
cursor: this.cursor
|
||||
}
|
||||
}
|
||||
|
||||
toJSON() {
|
||||
return this.toRaw();
|
||||
}
|
||||
}
|
||||
48
src/Subreddit/ModNotes/ModUserNote.ts
Normal file
48
src/Subreddit/ModNotes/ModUserNote.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import {Comment, PrivateMessage, RedditUser, Submission} from "snoowrap/dist/objects";
|
||||
import {ModUserNoteLabel} from "../../Common/Infrastructure/Atomic";
|
||||
//import {ExtendedSnoowrap} from "../../Utils/SnoowrapClients";
|
||||
import {generateSnoowrapEntityFromRedditThing, parseRedditFullname} from "../../util";
|
||||
import Snoowrap from "snoowrap";
|
||||
|
||||
export interface ModUserNoteRaw {
|
||||
note?: string | null
|
||||
reddit_id?: string | null
|
||||
label?: string | null
|
||||
}
|
||||
|
||||
export class ModUserNote {
|
||||
note?: string
|
||||
actedOn?: RedditUser | Submission | Comment | PrivateMessage
|
||||
label?: ModUserNoteLabel
|
||||
|
||||
constructor(data: ModUserNoteRaw | undefined, client: Snoowrap) {
|
||||
const {
|
||||
note,
|
||||
reddit_id,
|
||||
label
|
||||
} = data || {};
|
||||
this.note = note !== null ? note : undefined;
|
||||
this.label = label !== null ? label as ModUserNoteLabel : undefined;
|
||||
|
||||
if (reddit_id !== null && reddit_id !== undefined) {
|
||||
const thing = parseRedditFullname(reddit_id);
|
||||
if (thing !== undefined) {
|
||||
this.actedOn = generateSnoowrapEntityFromRedditThing(thing, client) as RedditUser | Submission | Comment;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
toRaw(): ModUserNoteRaw {
|
||||
return {
|
||||
note: this.note,
|
||||
reddit_id: this.actedOn !== undefined ? this.actedOn.id : undefined,
|
||||
label: this.label
|
||||
}
|
||||
}
|
||||
|
||||
toJSON() {
|
||||
return this.toRaw();
|
||||
}
|
||||
}
|
||||
|
||||
export default ModUserNote;
|
||||
@@ -1,11 +1,12 @@
|
||||
import {Poll, SnooStormOptions} from "snoostorm"
|
||||
import Snoowrap, {Listing} from "snoowrap";
|
||||
import Snoowrap, {Listing, RedditContent} from "snoowrap";
|
||||
import {EventEmitter} from "events";
|
||||
import {PollConfiguration} from "snoostorm/out/util/Poll";
|
||||
import {DEFAULT_POLLING_INTERVAL} from "../Common/interfaces";
|
||||
import {mergeArr, parseDuration, random} from "../util";
|
||||
import { Logger } from "winston";
|
||||
import {ErrorWithCause} from "pony-cause";
|
||||
import dayjs, {Dayjs as DayjsObj} from "dayjs";
|
||||
|
||||
type Awaitable<T> = Promise<T> | T;
|
||||
|
||||
@@ -16,13 +17,15 @@ interface RCBPollingOptions<T> extends SnooStormOptions {
|
||||
name?: string,
|
||||
processed?: Set<T[keyof T]>
|
||||
label?: string
|
||||
dateCutoff?: boolean
|
||||
}
|
||||
|
||||
interface RCBPollConfiguration<T> extends PollConfiguration<T>,RCBPollingOptions<T> {
|
||||
get: () => Promise<Listing<T>>
|
||||
dateCutoff: boolean
|
||||
}
|
||||
|
||||
export class SPoll<T extends object> extends Poll<T> {
|
||||
export class SPoll<T extends RedditContent<object>> extends Poll<T> {
|
||||
identifier: keyof T;
|
||||
getter: () => Promise<Listing<T>>;
|
||||
frequency;
|
||||
@@ -31,6 +34,8 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
// -- that is, we don't want to emit the items we immediately fetch on a fresh poll start since they existed "before" polling started
|
||||
newStart: boolean = true;
|
||||
enforceContinuity: boolean;
|
||||
useDateCutoff: boolean;
|
||||
dateCutoff?: DayjsObj;
|
||||
randInterval?: { clear: () => void };
|
||||
name: string = 'Reddit Stream';
|
||||
logger: Logger;
|
||||
@@ -47,7 +52,8 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
name,
|
||||
subreddit,
|
||||
label = 'Polling',
|
||||
processed
|
||||
processed,
|
||||
dateCutoff,
|
||||
} = options;
|
||||
this.subreddit = subreddit;
|
||||
this.name = name !== undefined ? name : this.name;
|
||||
@@ -56,6 +62,7 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
this.getter = get;
|
||||
this.frequency = frequency;
|
||||
this.enforceContinuity = enforceContinuity;
|
||||
this.useDateCutoff = dateCutoff;
|
||||
|
||||
// if we pass in processed on init the intention is to "continue" from where the previous stream left off
|
||||
// WITHOUT new start behavior
|
||||
@@ -80,7 +87,7 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
// but only continue iterating if stream enforces continuity and we've only seen new items so far
|
||||
while(page === 1 || (self.enforceContinuity && !self.newStart && !anyAlreadySeen)) {
|
||||
if(page !== 1) {
|
||||
self.logger.debug(`Did not find any already seen activities and continuity is enforced. This probably means there were more new items than 1 api call can return. Fetching next page (${page})...`);
|
||||
self.logger.debug(`Did not find any already seen Activities and continuity is enforced. This probably means there were more new Activities than 1 api call can return. Fetching next page (page ${page})...`);
|
||||
// @ts-ignore
|
||||
batch = await batch.fetchMore({amount: 100});
|
||||
}
|
||||
@@ -95,24 +102,67 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Emit for new items and add it to the list
|
||||
// add new item to list and set as processed
|
||||
newItems.push(item);
|
||||
self.processed.add(id);
|
||||
// but don't emit on new start since we are "buffering" already existing activities
|
||||
if(!self.newStart) {
|
||||
self.emit("item", item);
|
||||
}
|
||||
}
|
||||
page++;
|
||||
}
|
||||
const newItemMsg = `Found ${newItems.length} new items out of ${batch.length} returned`;
|
||||
|
||||
if(self.newStart) {
|
||||
self.logger.debug(`${newItemMsg} but will ignore all on first start.`);
|
||||
|
||||
self.logger.debug(`Found ${newItems.length} unseen Activities out of ${batch.length} returned, but will ignore all on first start.`);
|
||||
self.emit("listing", []);
|
||||
|
||||
if(self.useDateCutoff && self.dateCutoff === undefined) {
|
||||
self.logger.debug('Cutoff date should be used for filtering unseen Activities but none was set. Will determine date based on newest Activity returned from first polling results.');
|
||||
if(newItems.length === 0) {
|
||||
// no items found, cutoff is now
|
||||
self.dateCutoff = dayjs();
|
||||
self.logger.debug(`Cutoff date set to NOW (${self.dateCutoff.format('YYYY-MM-DD HH:mm:ssZ')}) since no unseen Activities returned. Unseen Activities will only be returned if newer than this date.`);
|
||||
} else {
|
||||
// set cutoff date for new items from the newest items found
|
||||
const sorted = [...newItems];
|
||||
sorted.sort((a, z) => z.created_utc - a.created_utc);
|
||||
self.dateCutoff = dayjs.unix(sorted[0].created_utc);
|
||||
self.logger.debug(`Cutoff date set to newest unseen Activity found, ${self.dateCutoff.format('YYYY-MM-DD HH:mm:ssZ')}. Unseen Activities will only be returned if newer than this date.`);
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
self.logger.debug(newItemMsg);
|
||||
|
||||
// applies mostly (only?) to 'unmoderated' polling
|
||||
//
|
||||
// scenario:
|
||||
// * polling unmoderated for many subreddits and unmoderated has not been clearing out for awhile so it has many (100's) of items
|
||||
// * a moderator, or CM, iterates through list and actions items so the list is shorter
|
||||
// * CM polling unmoderated and finds "unseen" items that don't appear in unprocessed list
|
||||
//
|
||||
// these "unseen" are OLDER than the "newest" seen items we have got from polling because CM only got the first page of unmoderated items
|
||||
// so now CM emits them as "new" and CM starts processing them. If it continues to process them then more and more 'unseen old' items continue to appear in stream,
|
||||
// creating a feedback loop where CM eventually processes the entire backlog of unmoderated items
|
||||
//
|
||||
// this is UNWANTED behavior. CM should only ever process items added to polling sources after it starts monitoring them.
|
||||
//
|
||||
// to address this we use a cutoff date determined from the newest activity returned from the first polling call (or current datetime if none returned)
|
||||
// then we make sure any 'new' items (unseen by CM) are newer than this cutoff date
|
||||
//
|
||||
// -- this is the default behavior for all polling sources except modqueue. See comments on that class below for why.
|
||||
const unixCutoff = self.useDateCutoff && self.dateCutoff !== undefined ? self.dateCutoff.unix() : undefined;
|
||||
const validNewItems = unixCutoff === undefined || newItems.length === 0 ? newItems : newItems.filter(x => x.created_utc >= unixCutoff);
|
||||
|
||||
if(validNewItems.length !== newItems.length && self.dateCutoff !== undefined) {
|
||||
self.logger.warn(`${newItems.length - validNewItems.length} unseen Activities were created before cutoff date (${self.dateCutoff.format('YYYY-MM-DD HH:mm:ssZ')}) and have been filtered out.`);
|
||||
}
|
||||
self.logger.debug(`Found ${validNewItems.length} valid, unseen Activities out of ${batch.length} returned`);
|
||||
|
||||
// only emit if not new start since we are "buffering" already existing activities
|
||||
for(const item of validNewItems) {
|
||||
self.emit('item', item);
|
||||
}
|
||||
|
||||
// Emit the new listing of all new items
|
||||
self.emit("listing", newItems);
|
||||
self.emit("listing", validNewItems);
|
||||
}
|
||||
// no longer new start on n+1 interval
|
||||
self.newStart = false;
|
||||
@@ -146,6 +196,7 @@ export class SPoll<T extends object> extends Poll<T> {
|
||||
this.logger.debug(msg);
|
||||
this.running = false;
|
||||
this.newStart = true;
|
||||
this.dateCutoff = undefined;
|
||||
super.end();
|
||||
}
|
||||
}
|
||||
@@ -159,6 +210,7 @@ export class UnmoderatedStream extends SPoll<Snoowrap.Submission | Snoowrap.Comm
|
||||
get: async () => client.getSubreddit(options.subreddit).getUnmoderated(options),
|
||||
identifier: "id",
|
||||
name: 'Unmoderated',
|
||||
dateCutoff: true,
|
||||
...options,
|
||||
});
|
||||
}
|
||||
@@ -173,6 +225,9 @@ export class ModQueueStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment
|
||||
get: async () => client.getSubreddit(options.subreddit).getModqueue(options),
|
||||
identifier: "id",
|
||||
name: 'Modqueue',
|
||||
// cannot use cutoff date since 'new' items in this list are based on when they were reported, not when the item was created
|
||||
// and unfortunately there is no way to use that "reported at" time since reddit doesn't include it in the returned items
|
||||
dateCutoff: false,
|
||||
...options,
|
||||
});
|
||||
}
|
||||
@@ -187,6 +242,7 @@ export class SubmissionStream extends SPoll<Snoowrap.Submission | Snoowrap.Comme
|
||||
get: async () => client.getNew(options.subreddit, options),
|
||||
identifier: "id",
|
||||
name: 'Submission',
|
||||
dateCutoff: true,
|
||||
...options,
|
||||
});
|
||||
}
|
||||
@@ -201,6 +257,7 @@ export class CommentStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment>
|
||||
get: async () => client.getNewComments(options.subreddit, options),
|
||||
identifier: "id",
|
||||
name: 'Comment',
|
||||
dateCutoff: true,
|
||||
...options,
|
||||
});
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,6 +15,7 @@ import {RichContent} from "../Common/interfaces";
|
||||
import {Cache} from 'cache-manager';
|
||||
import {isScopeError} from "../Utils/Errors";
|
||||
import {ErrorWithCause} from "pony-cause";
|
||||
import {UserNoteType} from "../Common/Infrastructure/Atomic";
|
||||
|
||||
interface RawUserNotesPayload {
|
||||
ver: number,
|
||||
@@ -125,7 +126,7 @@ export class UserNotes {
|
||||
return this.mod as RedditUser;
|
||||
}
|
||||
|
||||
async addUserNote(item: (Submission|Comment), type: string | number, text: string = '', wikiEditReasonPrefix?: string): Promise<UserNote>
|
||||
async addUserNote(item: (Submission|Comment), type: UserNoteType | number, text: string = '', wikiEditReasonPrefix?: string): Promise<UserNote>
|
||||
{
|
||||
const payload = await this.retrieveData();
|
||||
const userName = getActivityAuthorName(item.author);
|
||||
@@ -235,7 +236,7 @@ export interface UserNoteJson extends RichContent {
|
||||
* User Note type key
|
||||
* @examples ["spamwarn"]
|
||||
* */
|
||||
type: string,
|
||||
type: UserNoteType,
|
||||
}
|
||||
|
||||
export class UserNote {
|
||||
@@ -246,7 +247,7 @@ export class UserNote {
|
||||
// noteType: string | null;
|
||||
// link: string;
|
||||
|
||||
constructor(public time: Dayjs, public text: string, public modIndex: number, public noteType: string | number, public link: (string | null) = null, public moderator?: RedditUser) {
|
||||
constructor(public time: Dayjs, public text: string, public modIndex: number, public noteType: UserNoteType | number, public link: (string | null) = null, public moderator?: RedditUser) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
33
src/SubredditConfigData.ts
Normal file
33
src/SubredditConfigData.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
import {
|
||||
ActivityCheckConfigValue,
|
||||
} from "./Check";
|
||||
import {ManagerOptions} from "./Common/interfaces";
|
||||
import {RunConfigHydratedData, RunConfigValue, RunConfigObject} from "./Run";
|
||||
|
||||
export interface SubredditConfigData extends ManagerOptions {
|
||||
/**
|
||||
* A list of all the checks that should be run for a subreddit.
|
||||
*
|
||||
* Checks are split into two lists -- submission or comment -- based on kind and run independently.
|
||||
*
|
||||
* Checks in each list are run in the order found in the configuration.
|
||||
*
|
||||
* When a check "passes", and actions are performed, then all subsequent checks are skipped.
|
||||
* @minItems 1
|
||||
* */
|
||||
checks?: ActivityCheckConfigValue[]
|
||||
|
||||
/**
|
||||
* A list of sets of Checks to run
|
||||
* @minItems 1
|
||||
* */
|
||||
runs?: RunConfigValue[]
|
||||
}
|
||||
|
||||
export interface SubredditConfigHydratedData extends Omit<SubredditConfigData, 'checks'> {
|
||||
runs?: RunConfigHydratedData[]
|
||||
}
|
||||
|
||||
export interface SubredditConfigObject extends SubredditConfigHydratedData {
|
||||
runs?: RunConfigObject[]
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
import ExtendableError from "es6-error";
|
||||
|
||||
class InvalidRegexError extends ExtendableError {
|
||||
constructor(regex: RegExp | RegExp[], val?: string, url?: string) {
|
||||
constructor(regex: RegExp | RegExp[], val?: string, url?: string, message?: string) {
|
||||
const msgParts = [
|
||||
'Regex(es) did not match the value given.',
|
||||
message ?? 'Regex(es) did not match the value given.',
|
||||
];
|
||||
let regArr = Array.isArray(regex) ? regex : [regex];
|
||||
for(const r of regArr) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user