Compare commits

...

141 Commits

Author SHA1 Message Date
FoxxMD
fc51928054 Merge branch 'edge' 2022-02-02 16:59:56 -05:00
FoxxMD
c07276a3be fix(logging): Fix typo in error transform 2022-02-01 13:13:27 -05:00
FoxxMD
4a2297f5cd docs: Add github sponsor link 2022-02-01 12:01:34 -05:00
FoxxMD
f8967d55c4 feat(repeat): Use newer text comparison technique to improve repeat detection
* Use same technique as repost rule which has high accuracy and let false-positives
* Implement ability to see similarity score, case sensitivity, and text transformations
2022-01-31 14:08:21 -05:00
FoxxMD
e2590e50f8 Merge branch 'edge' 2022-01-28 17:27:51 -05:00
FoxxMD
7e8745d226 fix(polling): Fix shared polling behavior for nanny mode changes
* On hard limit stop shared streams
* On nanny mode turned off restart any stopped shared streams
2022-01-27 16:49:03 -05:00
FoxxMD
e2efc85833 fix(polling): Fix running state not changed on error
* Set running to false when error is caught. Was not caught on last stream refactor which changed polling behavior to end if any error is caught rather than waiting for external source to clear interval
* Add debugging/error messages on polling start/stop
2022-01-27 16:47:43 -05:00
FoxxMD
41038b9bcd feat(logging): Implement richer errors everywhere
* Use ErrorWithCause so we can get and print a chain of error causes
* Make reddit error response in stack trace more readable by replacing them with a "translated" parent response and add them as the cause
* Properly handle error formatting for winston by looking at shape of log object for error rather than testing instanceof (see comments in errorAwareFormat)
* Fix formatting in web interface for log lines with white-space pre css and properly splitting timestamp from rest of the message
2022-01-27 16:27:03 -05:00
FoxxMD
9fe8c9568c refactor: Move SimpleError into main Errors module 2022-01-27 11:48:23 -05:00
FoxxMD
9614f7a209 refactor(logging): Implement snoowrap errors "the right way" and implement consolidated logging function
* Implement declaration file for snoowrap errors so they can be imported directly
* Implement logging function to handle boilerplate for known error responses (reddit HTTP response, rate limit, etc.)
2022-01-27 11:43:39 -05:00
FoxxMD
8dbaaf6798 fix(logging): Defaults for log file dir 2022-01-26 12:28:56 -05:00
FoxxMD
c14ad6cb76 feat(logging): Implement separate logging options for each transport type
* Add properties for file, console, and stream in logging object of operator config
* Each property inherits a (useful) subset of winston transport options
2022-01-26 12:09:03 -05:00
FoxxMD
adda280dd3 fix(logging): Fix parsing log dir
* Correct else condition to use log dir when value is not true
* Set level to 'debug' on init logger if no value is provided to help with debugging
2022-01-26 10:27:01 -05:00
FoxxMD
15fd47bdb4 fix(polling): Correct typings for stream getter and check isFinished for Listing 2022-01-26 10:11:06 -05:00
FoxxMD
78b6d8b7b6 feat(polling): Add debug messages when streams are stopped 2022-01-26 10:00:09 -05:00
FoxxMD
61bc63ccc5 fix(polling): Emit config change event to bot only after manager has rebuilt polling 2022-01-26 09:50:30 -05:00
FoxxMD
05df8b7fe2 fix(polling): Use manager eventState to control shared stream callback rather than removing callback when events are stopped
Should prevent edge cases where shared streams are re-parsed while managers are stopped (hard limit) and then removed due to there being no callbacks
2022-01-25 18:07:15 -05:00
FoxxMD
3cb7dffb90 fix(polling): Prevent endless loop when trying to enforce continuity on a stream with no items returned 2022-01-25 09:25:59 -05:00
FoxxMD
d0aafc34b9 feat(remove): Add option to mark activity as spam 2022-01-21 13:03:05 -05:00
FoxxMD
d2e1b5019f chore: Update packages 2022-01-21 13:02:31 -05:00
FoxxMD
aaed0d3419 Merge branch 'edge' 2022-01-21 10:46:11 -05:00
FoxxMD
2a77c71645 fix(usernotes): Fix wiki entity handling to avoid unhandled rejection
Since snoowrap's WikiPage isn't a "real" object setting it as a property on the class means if it rejects the whole application crashes. Fix this by building wiki proxy every time we need it before awaiting promise for edit/retrieval so that promise scope is bound to the function we are in (that has try-catch)
2022-01-20 14:10:39 -05:00
FoxxMD
780e5c185e refactor(author filter): Strongly structure comparison/matching data for more consistent manipulation and output
* Use interface for comparison results at both criteria property level and criteria level
* Implement summary functions to build string results of comparisons
* Output all comparisons to debug and provide summaries to verbose (when applicable)
2022-01-20 14:08:54 -05:00
FoxxMD
38e2a4e69a fix(filter): Missing return on flair failure comparison 2022-01-19 15:49:44 -05:00
FoxxMD
7e0c34b6a3 fix(userflair): Fix wrong assignment for css 2022-01-19 13:10:11 -05:00
FoxxMD
e3ceb90d6f fix(filter): Fix default excludeCondition type
* Expected (prior) behavior is that all exclude criteria must pass, not just one
* Fix missing AND condition logic when all conditions pass
2022-01-19 13:09:45 -05:00
FoxxMD
6977e3bcdf feat(author): Add flair template criteria for author/submission
* Add filtering by flairTemplate id for author/submission
* Refactor flair properties for author/submission to accept string or array of strings
2022-01-19 12:48:58 -05:00
FoxxMD
f382cddc2a fix(filter): Change array merging behavior for authorIs defaults to be more sane
* Don't just overwrite (duh)
* Drop any default filters that include object keys that are also present in user-defined filters -- this way user-defined always takes precedence on merge
2022-01-19 11:52:18 -05:00
FoxxMD
99a5642bdf fix(ui): Change time formatting from 12 to 24 hour 2022-01-18 16:49:07 -05:00
FoxxMD
174d832ab0 docs: Pretty up readme header 2022-01-18 16:08:40 -05:00
FoxxMD
3ee7586fe2 fix(approve): Fix touched entity 2022-01-18 13:37:56 -05:00
FoxxMD
e2c724b4ae feat(approve): Implement approving parent submission of comment 2022-01-18 13:37:22 -05:00
FoxxMD
d581f19a36 feat(logs): Use log objects in api to improve parsing client-side
* Add options for /logs endpoint to stream objects instead of strings
* Always return log objects from /status endpoint -- fixes bug where all bots/subreddits got lines from logs that had newlines
* Return context-aware, formatted log lines to client to reduce line length IE if returning to botA -> subA then do not need to include labels for botA,subA #40
* Shorten timestamp to just time and wrap full timestamp in tooltip #40
* Emit log objects to client to reduce parsing complexity (don't have to regex for bot/subreddit name)
2022-01-18 12:59:59 -05:00
FoxxMD
48dea24bea feat: Improve first-run display in ui and add system view
* Fix bugs in UI when bot does not have a name (configured incorrectly)
* Implement instance system log view for operators
2022-01-18 10:38:39 -05:00
FoxxMD
5fc2a693a0 fix(config): Fix empty yaml config document initialization 2022-01-18 00:06:52 -05:00
FoxxMD
7be0722140 fix(bot): Fix limit rate expiration getter when there is no client initialized 2022-01-18 00:06:24 -05:00
FoxxMD
6ab9fe4bf4 feat(config): Implement persisting bots from invite process to application and config
* write to config when bot is added
* replace/add based on existing bot
* implement specify instance from instances user is operator of
* implement specify subreddits to run on using comma-separated list
* rewrite invite flow ending to be more clear on results and next steps
2022-01-17 17:47:27 -05:00
FoxxMD
5811af0342 feat(config): Refactor config parsing to preserve comments and enable writing
* use node-comment and yaml@next to keep comment information intact
* store ast/source version of parsed config for operator
* implement generic yaml/json operator config classes to keep everything organized and simplify marshalling source to js/string
* refactor file parsing and json/yaml parsing to have better single responsibility
2022-01-17 15:51:43 -05:00
FoxxMD
ed2924264a feat(util): Better check for file/dir permissions 2022-01-17 11:18:23 -05:00
FoxxMD
e9394ccf2e refactor(tooling): Ignore sqlite files 2022-01-17 09:52:18 -05:00
FoxxMD
dec72f95c6 docs: Add discord invite link 2022-01-14 16:42:01 -05:00
FoxxMD
bc7eff8928 Merge branch 'edge' 2022-01-14 15:27:09 -05:00
FoxxMD
80c11b2c7f refactor(filter): Consolidate authorIs logic and add additional control to exclude logic
* Add excludeCondition to control how exclude sets are tested (and/or)
* Refactor authorIs logic from check/rule/action into standalone function (DRY)
* Simplify filter defaults -- don't need to specify automoderator since it is always a mod
2022-01-14 10:51:29 -05:00
FoxxMD
e6a2a86828 feat(config): Implement default filter criteria behavior
* Add default behavior config to operator and manager config
* Implement configurable behavior when filter is present on check
* Add defaults to exclude mods and automoderator from checks
2022-01-13 16:46:32 -05:00
FoxxMD
96749be571 refactor(polling): Simplify and cleanup all polling logic
* Remove unused clearProcessing code
* Use same data structures (Map) for storing polling objects in both Manager and Bot to reduce cognitive load and re-use some logic
* Rename "mod" streams to "shared" streams
* Implement detection and updating of polling when manager config changes
* Implement detection and updating of shared streams on manager config update
* Use shared retry handler for manager polling to better handle general reddit api issues (all polling stops faster)
* Move initial polling buffer into polling object (instead of in manager) for better logic encapsulation and add debug logging for it
* Add more debug logging for manager/bot poll building
2022-01-13 11:39:16 -05:00
FoxxMD
6b7e8e7749 feat(polling): Implement shared streams for all polling sources
* Refactor polling config to use new 'shared' string list of polling sources and deprecate 'sharedMod' property
* Refactor how shared sources are built to look for shared intention in manager polling options before creating
* Implement continuity check for comment/submission polling to ensure no activities are missed
* Add debug logging to polling
2022-01-12 15:47:43 -05:00
FoxxMD
43b29432a2 refactor(auth): Refactor auth data structures to consolidate logic
* Add abstract user class with auth methods with implementations for client/server
* Refactor client/server logic to use class methods instead of inline auth checks

Closes #71
2022-01-12 09:57:38 -05:00
FoxxMD
ff84946068 feat(regex): Experimental support for parsing regex expressions from fetched URL
* Support fetching from reddit wiki
* Support fetching from raw URL
* Support parsing and fetching from gist, github blob, and regexr (very experimental)
2022-01-11 14:05:57 -05:00
FoxxMD
7cdde99864 fix(recent): Potential fix for reddit ACID issues on history retrieval 2022-01-11 13:00:51 -05:00
FoxxMD
8eee1fe2e1 fix(recent): Remove code that should have been deleted during refactor
Refactored recent to use batch subreddit testing but forgot to remove old, individual subreddit testing, code so activities were being counted twice
2022-01-11 10:15:16 -05:00
FoxxMD
6fc09864f6 fix: Don't delete property from object
Object passed by ref, duh
2022-01-11 10:13:48 -05:00
FoxxMD
1510980ce3 fix(util): Ensure provided state description is reattached to strong sub state 2022-01-11 10:13:14 -05:00
FoxxMD
56005f0f28 fix(bot): Fix own profile detection when building managers 2022-01-11 09:52:44 -05:00
FoxxMD
03b655515c fix(server): Fix logs not persisting for managers
* Change manager acquisition so all managers belong to a bot before they start logging so all logs are captured correctly
* Fix log capture logic that prevented all subreddits from being populated
2022-01-11 09:45:25 -05:00
FoxxMD
edd874f356 fix(server): Correctly filter bots and managers on auth on server 2022-01-11 09:15:52 -05:00
FoxxMD
7f13debe3b fix(client): Make sure all moderated subreddits are fetched 2022-01-10 16:17:24 -05:00
Matt Foxx
1565bdbf1a Merge pull request #67 from rysie/feature/dry-run-buttons
Run/Dry run buttons
2022-01-10 14:54:42 -05:00
FoxxMD
ec4cee8c77 refactor(ui): Fix and simplify button logic
* Fix url query selector to constrain to sub
* Use shared class between run buttons to simplify class modification and click event
2022-01-10 14:54:17 -05:00
FoxxMD
d6954533a0 Merge branch 'edge' 2022-01-10 12:32:14 -05:00
Matt Foxx
04b8762926 Merge pull request #68 from rysie/feature/flair-docs
feat(docs): User flair and submission flair docs
2022-01-10 12:31:56 -05:00
FoxxMD
dcc5f87c30 refactor(docs): Clean up flair docs
* Fix regex escaped characters
* Use authorIs
* make flair action type usage more clear
2022-01-10 12:22:16 -05:00
FoxxMD
66d9c0b2a7 fix(auth): Fix bug allowing any valid moderator to see all instances 2022-01-10 10:27:13 -05:00
FoxxMD
00e7cad423 fix(auth): Logout bot after auth flow is complete 2022-01-10 10:26:45 -05:00
Marcin Macinski
bc541d00d4 feat(docs): User flair and submission flair docs 2022-01-08 00:02:37 +01:00
Marcin Macinski
c5b27628b0 feat(ui): Run/Dry run buttons 2022-01-07 23:32:12 +01:00
FoxxMD
ba53233640 Merge branch 'edge' 2022-01-07 09:31:14 -05:00
Matt Foxx
ede86d285b Merge pull request #62 from rysie/user-flair-action
UserFlairAction added
2022-01-06 14:55:27 -05:00
FoxxMD
52f6aabb69 feat: Prevent bot from running on reports/comments it just created
Cache reported items or new comments made by bot for a short time (default to twice polling interval, 1 minute) to prevent bot from running on things it did itself
2022-01-06 14:54:17 -05:00
FoxxMD
18175f3662 feat(item filter): Support checking for different report types: total, user, mod 2022-01-06 13:13:10 -05:00
FoxxMD
68a272d305 fix(ui): Fix subreddit intersection check for bot related routes
Remove any prefixed r/ from a bot's subreddits when checking intersection with user subreddits
2022-01-06 12:29:19 -05:00
FoxxMD
3dac91fafc fix(recent): Fix default behavior for submissionReference based on activity type
Eliminates noisy logging when it's not specified but activity is comment
2022-01-06 12:09:04 -05:00
FoxxMD
e5bb8c2a38 fix(bot): Reduce retries for more aggressive fallback on reddit api issues
* Reduce retry for snoowrap to 2 since we do our own error handling in-app and 2 is enough for the occasional, non-systemic blip
* Reduce manager retries
2022-01-05 20:46:54 -05:00
FoxxMD
61e0baf3fd feat(recent): Add combined karma to template variables 2022-01-05 17:08:15 -05:00
FoxxMD
37e9d1fcc2 fix(polling): Fix set timeout args 2022-01-05 14:28:19 -05:00
FoxxMD
5e70ca1cb6 fix: Fix and improve code related to stopping bots when reddit api is not OK
* Fix polling timeout to actually stop on error by simplifying timeout and waiting until response is OK to recreate next timeout call
* Use "unexpected exception" retry count for all non well-known "reddit blip" responses in retry handler rather than failing immediately AND log this distinction
* Fix managers not emitting errors from checks
* Fix bot not awaiting retry handler on manager error emit
* Increase nanny loop delay on error to reduce api pressure when there are many bots running
* (unrelated) set bot as running before starting managers so UI is available earlier
2022-01-05 12:58:17 -05:00
FoxxMD
7f7ed18927 refactor(server): return app earlier so UI is available earlier
Bot init can finish asynchronously without any negative affect to server/client. Returning earlier means we can access server info earlier in startup
2022-01-05 12:50:55 -05:00
FoxxMD
efed3381fd feat(config): Allow top-level operator snoowrap config 2022-01-05 10:39:43 -05:00
FoxxMD
5ac5d65a28 refactor(userflair): Fix dryrun usage and add unflair functionality
* Can flair user on comment/submission
* fix dryrun if-else block (maybe a debugging artifact?)
* allow all properties to be undefined/null/empty and use as intention to unflair user
2022-01-03 21:02:21 -05:00
FoxxMD
1ac7ad4724 Merge branch 'edge' 2022-01-03 16:35:01 -05:00
FoxxMD
0ae74fdce1 docs: Add missing config box screenshot 2022-01-03 13:42:35 -05:00
FoxxMD
845173822c docs(onboarding): Update moderator getting started to reflect new editor capabilities 2022-01-03 13:40:17 -05:00
FoxxMD
edb3036957 feat(editor): Resolve raw url from github blob url 2022-01-03 13:31:54 -05:00
FoxxMD
3790f0e061 feat(editor): Implement wiki page creation
If page is not created yet detect it and require more perms for re-authorization
2022-01-03 13:23:31 -05:00
FoxxMD
e3e4e4abff feat(editor): Resolve raw gist url from gist id 2022-01-03 11:50:23 -05:00
FoxxMD
fd9b83437b feat(editor): Use manager format to infer format for editor
* Provide wiki format in manager data to status api
* Remove additional View link and instead infer format for querystring from manager data
2022-01-03 11:17:04 -05:00
FoxxMD
05694f115c fix(editor): Fix how initial editor value is set
For some reason providing the data directly to a new model doesn't trigger validation and also had some other weird effects. Instead, using an empty string as initial value and then set model value to data afterwards -- which fixes everything b/c idk why.
2022-01-03 11:15:29 -05:00
FoxxMD
70ee157198 feat(config): Make manager aware of config format
* Default format to yaml
* Add detected config format as property to manager
* When neither format is valid use starting character to (naively) detect json or not
* Reduce config error noise by only show one format error based on likely type and print other to debug
2022-01-03 10:58:02 -05:00
FoxxMD
bbb4ec3c2d fix: Fix missing hyphen character on regex for parsing reddit entity name
Reddit users can have hyphens in their names. Slight tradeoff for allowing hyphens for subreddit names (they are non existent) to allow all valid reddit user names is worth it.

Found and corrected by @prometheus-22
2021-12-31 00:04:25 -05:00
FoxxMD
acb72551ec fix(bot): Add missing invite check in healthloop 2021-12-30 23:09:09 -05:00
FoxxMD
bf6affe592 feat(bot): Add basic programmatic moderator invitation acceptance
* Store subreddits to try to accept invites from in bot's default cache
* Handle invitation scenarios (none, modself missing, accepted) and starting manager after invitation
* Implement basic invitation acceptance list control in UI for bot operators
2021-12-30 22:52:37 -05:00
FoxxMD
8c2cb02a46 feat(bot): Add modself oauth permission
Used for accepting moderator invitations
2021-12-30 20:25:51 -05:00
FoxxMD
73e2af2100 feat(manager): Improve wiki page creation
* Only try page creation if response error is a 404
* Improve permission error descriptions
* Only create if it can also set page permissions to improve security
2021-12-30 19:08:47 -05:00
FoxxMD
ba4c4af5a7 Update invite view 2021-12-30 18:00:11 -05:00
FoxxMD
9ad21ee2dd feat(bot): Add ability to create non-existing wiki page and change visibility 2021-12-25 21:13:55 -05:00
FoxxMD
b32c4f213c docs(yaml): Finish adding yaml examples
#61
2021-12-25 16:42:44 -05:00
FoxxMD
7e01c8d1f8 docs(yaml): add yaml examples fo activities window
#61
2021-12-25 16:28:58 -05:00
FoxxMD
aee158ecc9 docs(yaml): add attribution and advanced concepts yaml examples
#61
2021-12-25 16:18:13 -05:00
FoxxMD
8cd2243c2d docs(yaml): add history and author yaml examples
#61
2021-12-25 16:07:46 -05:00
FoxxMD
4969789532 docs(yaml): add recentActivity yaml examples
#61
2021-12-25 15:53:54 -05:00
FoxxMD
1dcfdc14d1 docs(yaml): add regex yaml examples
#61
2021-12-25 15:23:34 -05:00
FoxxMD
f1c9b64f64 docs(yaml): Add more examples
#61
2021-12-23 21:35:18 -05:00
FoxxMD
2e5a61566b docs(yaml): create yaml versions of subreddit ready examples
#61
2021-12-23 12:34:50 -05:00
Matt Foxx
85761fa662 Merge pull request #65 from rysie/edge-submission-flair-fix
Flair action fix: import + assigning flair by flair_template_id
2021-12-23 09:33:34 -05:00
Marcin Macinski
0b1a6bd77b Flair action fix: import + assigning flair by flair_template_id 2021-12-23 12:34:08 +01:00
Marcin Macinski
51e299ca99 Merge branch 'edge' into user-flair-action 2021-12-22 01:13:33 +01:00
Marcin Macinski
7696f3c2ff UserFlairAction added 2021-12-22 00:45:59 +01:00
FoxxMD
1c9ed41e70 feat(ui): Implement basic wiki editing capabilities for editor
* Use stored scope to determine if user can save
* Only show save action if loaded from a subreddit
* Implement re-authorization flow through popup window and sockets to update status in editor
* Implement wiki location endpoint for server and wiki save endpoint for client
2021-12-21 16:30:57 -05:00
FoxxMD
2d67f9f57d refactor(ui): Migrate all editor usage to monaco-yaml base
* monaco-yaml can also do json validation since its just normal monaco
* simplifies config.ejs greatly not having to maintain two different monaco implementation, at the expense of a larger project
2021-12-21 14:22:26 -05:00
FoxxMD
975bcb6ad7 feat(ui): Enable additional scopes usage
* Store scopes in user auth object/sessions
* Implement requesting additional scopes through login route
2021-12-21 12:30:33 -05:00
FoxxMD
2a282a0d6f Merge branch 'edge' 2021-12-21 09:35:21 -05:00
FoxxMD
0d087521a7 feat(docs): Specify yaml can be validated in editor 2021-12-21 09:35:06 -05:00
FoxxMD
fb5fc961cc feat(docs): Add youtube credentials requirements for repost rule 2021-12-21 09:33:24 -05:00
FoxxMD
c04b305881 feat(docs): Add link-spamming regex config 2021-12-21 09:22:35 -05:00
FoxxMD
5c5e9a26aa refactor(ui): Unify editor look-and-feel
* Use named entry files for webpack so we can access them from config.ejs
* Move some of the validation logic to config.ejs so it can be used by json editor as well
* Use same controls for loading files/schemas/urls for both editors
* Add validation errors box for both editors
2021-12-20 23:30:41 -05:00
Matt Foxx
477d1a10ae Merge pull request #60 from rysie/edge
Support setting user flair by flair_template_id
2021-12-20 18:37:36 -05:00
Marcin Macinski
bbee92699c Support setting user flair by flair_template_id 2021-12-21 00:10:40 +01:00
FoxxMD
7f09043cdf fix(ui): Add missing yaml asset files
Need to unignore them...
2021-12-20 15:56:36 -05:00
FoxxMD
768a199c40 feat(ui): Add quick-n-dirty YAML editor
Using one-off project cm-yaml http://github.com/foxxmd/cm-yaml to run a static version of monaco-yaml inside CM
2021-12-20 15:42:37 -05:00
FoxxMD
6e4b0c7719 feat(regex): Add match sample to rule results so it can be used in content template 2021-12-20 12:33:40 -05:00
FoxxMD
89b21e6073 fix(repost): Use average of all algos to get high score
Ensures no anomalous scores dominate
2021-12-18 18:44:52 -05:00
FoxxMD
da611c5894 fix(repost): Fix max weighted score and formatting 2021-12-18 18:22:38 -05:00
FoxxMD
2c90a260c0 fix(stream): Remove processing clear behavior for polling to try to fix weird polling behavior 2021-12-18 18:08:00 -05:00
FoxxMD
f081598da6 feat(repost): Improve text matching and default values
* Use 3 different matching algorithms using the highest score out of the three
* Weight score based on length of the sentence
* Increase minimum number of words to 3
* Enforce min word count on external (youtube) comments
2021-12-18 17:42:43 -05:00
FoxxMD
55f45163a4 feat(repost): Fix some bugs and improve result data
* Wrong hash check
* Add closest match summary and percent for use in content
2021-12-18 14:54:01 -05:00
FoxxMD
e4dfa9dde3 refactor(docker): reduce image size with best practices
* use --no-cache so repository index isn't included in image
* use FROM instructions to build project with dev dependencies, then copy built project to new stage and install production-only libraries
* properly ignore docs and node_modules with dockerignore
2021-12-17 15:10:22 -05:00
FoxxMD
0e395792db refactor: Remove unusused ts-auto-guard package
Has @ts-morph as a dependency which is a huge package
2021-12-17 14:36:03 -05:00
FoxxMD
dcbeb784e8 refactor: Remove set-random-interval
* depended on and always downloaded an entire, older typescript version (even with production install) -- was not necessary for one function
* refactor project to use newer TS version (specify any type for catch blocks to fix compiler errors)
2021-12-17 14:25:48 -05:00
FoxxMD
aeaeb6ce27 refactor(docker): Remove monaco editor local dependency to reduce image size
Wasn't necessary to have it installed locally (can use CDN) and is not a part of the core experience
2021-12-17 14:00:54 -05:00
FoxxMD
d6a29c5914 fix(bot): Actually get all moderated subreddits
Previously was only getting first page of moderated subreddits (listing). Make sure to get all pages.
2021-12-17 13:24:09 -05:00
FoxxMD
c1224121d4 feat(subreddit): Implement user profile filtering for SubredditState
* Add convenience property for filtering subreddits by user profile prefix (u_)
* Do some smart property comparison to check if SubredditState name is regex or not -- use additional user prof check if it is so we don't clobber existing name regex

Closes #56
2021-12-17 13:01:47 -05:00
FoxxMD
9790e681ea docs(regex): Add subreddit-ready example for removing discord link spam 2021-12-16 16:27:06 -05:00
FoxxMD
a48a850c98 Update winston repository package format/information 2021-12-16 14:58:43 -05:00
FoxxMD
b8369a9e9f Package bump for security
https://github.com/advisories/GHSA-93q8-gq69-wqmw
2021-12-14 12:06:48 -05:00
FoxxMD
0c31bdf25e refactor(repost): Improve repost criteria configuration and add documentation
* Simplify usage b/w comment-submission by removing "criteria" in main criteria property -- comment checks can just use additional properties
* Consolidated occurrence count/time into one property to allow and/or operands on both conditions (more powerful!)
* Added documentation describing repost configuration
* Added repost configuration examples
2021-12-07 16:30:05 -05:00
FoxxMD
4b14e581dd fix: Add more staggering on heartbeat check
Always stagger subreddit checks
2021-12-06 17:31:00 -05:00
FoxxMD
b2846efd2b fix: Improve falloff behavior when reddit api error are encountered
* Turn off snoowrap request queuing
* Aggregate errors from all managers at bot-level and force all to stop (and clear queue/polling) if a small threshold is met
* Add activity refresh on check into try-catch so delayed activities in queue don't cause a loop if they fail due to api issues
2021-12-06 17:16:04 -05:00
FoxxMD
a787e4515b feat(repost): Implement occurrence count and time comparisons
* User-defined set of comparisons for testing how many reposts were found
* User-defined set of comparisons for testing when a slice/all of reposts were created
2021-12-06 13:28:02 -05:00
FoxxMD
f63e2a0ec4 Merge branch 'topComments' into edge 2021-12-01 17:34:51 -05:00
FoxxMD
9d0e098db1 feat(rule): Implement repost rule
* checks for both submission/comment
* search by submission title, url, dups/crossposts, and external (just youtube for now)
* user-defined string sameness for all search facets
* user-defined case-sensitivity and regex-based transformations for activity/repost item values
* cache comment checks
* implemented youtube client for retrieving video comments
2021-12-01 17:34:25 -05:00
FoxxMD
181390f0eb fix: Correct error description for reading a config file
Was using copy-pasted statement from wiki error which was confusing
2021-12-01 09:33:03 -05:00
FoxxMD
a8c7b1dac9 Interim implementation of repost rule 2021-11-30 20:42:40 -05:00
235 changed files with 13311 additions and 4268 deletions

View File

@@ -1,8 +1,8 @@
node_modules
Dockerfile
.dockerignore
.gitignore
.git
src/logs
/docs
.github
/docs/
/node_modules/

2
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,2 @@
github: [FoxxMD]
custom: ["bitcoincash:qqmpsh365r8n9jhp4p8ks7f7qdr7203cws4kmkmr8q"]

10
.gitignore vendored
View File

@@ -381,4 +381,14 @@ dist
.pnp.*
**/src/**/*.js
!src/Web/assets/public/yaml/*
**/src/**/*.map
/**/*.sqlite
/**/*.bak
*.yaml
*.json5
!src/Schema/*.json
!docs/**/*.json5
!docs/**/*.yaml
!docs/**/*.json

View File

@@ -1,15 +1,17 @@
FROM node:16-alpine3.14
FROM node:16-alpine3.14 as base
ENV TZ=Etc/GMT
# vips required to run sharp library for image comparison
RUN echo "http://dl-4.alpinelinux.org/alpine/v3.14/community" >> /etc/apk/repositories \
&& apk --update add vips
&& apk --no-cache add vips
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
WORKDIR /usr/app
FROM base as build
COPY package*.json ./
COPY tsconfig.json .
@@ -17,7 +19,13 @@ RUN npm install
ADD . /usr/app
RUN npm run build
RUN npm run build && rm -rf node_modules
FROM base as app
COPY --from=build /usr/app /usr/app
RUN npm install --production
ENV NPM_CONFIG_LOGLEVEL debug

View File

@@ -1,6 +1,7 @@
[![Latest Release](https://img.shields.io/github/v/release/foxxmd/context-mod)](https://github.com/FoxxMD/context-mod/releases)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Docker Pulls](https://img.shields.io/docker/pulls/foxxmd/context-mod)](https://hub.docker.com/r/foxxmd/context-mod)
# ContextMod [![Latest Release](https://img.shields.io/github/v/release/foxxmd/context-mod)](https://github.com/FoxxMD/context-mod/releases) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Docker Pulls](https://img.shields.io/docker/pulls/foxxmd/context-mod)](https://hub.docker.com/r/foxxmd/context-mod)
<img src="/docs/logo.png" align="right"
alt="ContextMod logo" width="180" height="176">
**Context Mod** (CM) is an event-based, [reddit](https://reddit.com) moderation bot built on top of [snoowrap](https://github.com/not-an-aardvark/snoowrap) and written in [typescript](https://www.typescriptlang.org/).
@@ -19,14 +20,15 @@ Some feature highlights:
* Default/no configuration runs "All In One" behavior
* Additional configuration allows web interface to connect to multiple servers
* Each server instance can run multiple reddit accounts as bots
* **Per-subreddit configuration** is handled by JSON stored in the subreddit wiki
* Any text-based actions (comment, submission, message, usernotes, ban, etc...) can be configured via a wiki page or raw text in JSON and support [mustache](https://mustache.github.io) [templating](/docs/actionTemplating.md)
* **Per-subreddit configuration** is handled by YAML (**like automoderator!**) or JSON stored in the subreddit wiki
* Any text-based actions (comment, submission, message, usernotes, ban, etc...) can be configured via a wiki page or raw text and supports [mustache](https://mustache.github.io) [templating](/docs/actionTemplating.md)
* History-based rules support multiple "valid window" types -- [ISO 8601 Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations), [Day.js Durations](https://day.js.org/docs/en/durations/creating), and submission/comment count limits.
* Support Activity skipping based on:
* Author criteria (name, css flair/text, age, karma, moderator status, and [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes))
* Activity state (removed, locked, distinguished, etc.)
* Rules and Actions support named references (write once, reference anywhere)
* [**Image Comparisons**](/docs/imageComparison.md) via fingerprinting and/or pixel differences
* [**Repost detection**](/docs/examples/repost) with support for external services (youtube, etc...)
* Global/subreddit-level **API caching**
* Support for [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes) as criteria or Actions (writing notes)
* Docker container support
@@ -84,7 +86,7 @@ See the [Moderator's Getting Started Guide](/docs/gettingStartedMod.md)
## Configuration and Documentation
Context Bot's configuration can be written in JSON, [JSON5](https://json5.org/) or YAML. Its schema conforms to [JSON Schema Draft 7](https://json-schema.org/). Additionally, many **operator** settings can be passed via command line or environmental variables.
Context Bot's configuration can be written in YAML (like automoderator) or [JSON5](https://json5.org/). Its schema conforms to [JSON Schema Draft 7](https://json-schema.org/). Additionally, many **operator** settings can be passed via command line or environmental variables.
* For **operators** (running the bot instance) see the [Operator Configuration](/docs/operatorConfiguration.md) guide
* For **moderators** consult the [app schema and examples folder](/docs/#configuration-and-usage)
@@ -125,7 +127,7 @@ Moderator view/invite and authorization:
A built-in editor using [monaco-editor](https://microsoft.github.io/monaco-editor/) makes editing configurations easy:
* Automatic JSON syntax validation and formatting
* Automatic JSON or YAML syntax validation and formatting
* Automatic Schema (subreddit or operator) validation
* All properties are annotated via hover popups
* Unauthenticated view via `yourdomain.com/config`

View File

@@ -102,6 +102,7 @@ Find detailed descriptions of all the Rules, with examples, below:
* [History](/docs/examples/history)
* [Author](/docs/examples/author)
* [Regex](/docs/examples/regex)
* [Repost](/docs/examples/repost)
### Rule Set
@@ -119,6 +120,15 @@ It consists of:
* **rules** -- The **Rules** for the Rule Set.
Example
YAML
```yaml
condition: AND
# rules are an array
rules:
- aRule
```
JSON
```json5
{
"condition": "AND",

View File

@@ -17,7 +17,28 @@ Examples of all of the above
<details>
```yaml
# count, last 100 activities
window: 100
# duration, last 10 days
window: 10 days
# duration object, last 2 months and 5 days
window:
months: 2
days: 5
# iso 8601 string, last 15 minutes
window: PT15M
# ActivityWindowCriteria, last 100 activities or 6 weeks of activities (whichever is found first)
window:
count: 100
duration: 6 weeks
```
```json5
// count, last 100 activities
{
"window": 100
@@ -49,6 +70,7 @@ Examples of all of the above
}
}
```
</details>
## Types of Ranges
@@ -95,6 +117,7 @@ If you need to specify multiple units of time for your duration you can instead
Example
JSON
```json
{
"days": 4,
@@ -102,6 +125,13 @@ Example
"minutes": 20
}
```
YAML
```yaml
window:
days: 4
hours: 6
minutes: 20
```
##### An ISO 8601 duration string
@@ -119,6 +149,7 @@ This is an object that lets you specify more granular conditions for your range.
The full object looks like this:
JSON
```json
{
"count": 100,
@@ -130,6 +161,19 @@ The full object looks like this:
}
}
```
YAML
```yaml
window:
count: 100
duration: 10 days
satisfyOn: any
subreddits:
include:
- mealtimevideos
- pooptimevideos
exclude:
- videos
```
### Specifying Range
@@ -142,7 +186,9 @@ If both range properties are specified then the value `satisfyOn` determines how
If **any** then Activities will be retrieved until one of the range properties is met, **whichever occurs first.**
Example
Example
JSON
```json
{
"count": 80,
@@ -150,6 +196,13 @@ Example
"satisfyOn": "any"
}
```
YAML
```yaml
window:
count: 80
duration: 90 days
satisfyOn: any
```
Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
* If 90 days of activities returns only 40 activities => returns 40 activities
@@ -160,6 +213,8 @@ Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
If **all** then both ranges must be satisfied. Effectively, whichever range produces the most Activities will be the one that is used.
Example
JSON
```json
{
"count": 100,
@@ -167,6 +222,13 @@ Example
"satisfyOn": "all"
}
```
YAML
```yaml
window:
count: 100
duration: 90 days
satisfyOn: all
```
Activities are retrieved in chunks of 100 (or `count`, whichever is smaller)
* If at 90 days of activities => 40 activities retrieved
@@ -187,6 +249,8 @@ You may filter retrieved Activities using an array of subreddits.
Use **include** to specify which subreddits should be included from results
Example where only activities from /r/mealtimevideos and /r/modsupport will be returned
JSON
```json
{
"count": 100,
@@ -196,7 +260,17 @@ Example where only activities from /r/mealtimevideos and /r/modsupport will be r
"include": ["mealtimevideos","modsupport"]
}
}
```
YAML
```yaml
window:
count: 100
duruation: 90 days
satisfyOn: any
subreddits:
include:
- mealtimevideos
- modsupport
```
#### Exclude
@@ -204,6 +278,8 @@ Example where only activities from /r/mealtimevideos and /r/modsupport will be r
Use **exclude** to specify which subreddits should NOT be in the results
Example where activities from /r/mealtimevideos and /r/modsupport will not be returned in results
JSON
```json
{
"count": 100,
@@ -214,4 +290,15 @@ Example where activities from /r/mealtimevideos and /r/modsupport will not be re
}
}
```
YAML
```yaml
window:
count: 100
duruation: 90 days
satisfyOn: any
subreddits:
exclude:
- mealtimevideos
- modsupport
```
**Note:** `exclude` will be ignored if `include` is also present.

View File

@@ -17,6 +17,8 @@ This directory contains example of valid, ready-to-go configurations for Context
* [History](/docs/examples/history)
* [Author](/docs/examples/author)
* [Regex](/docs/examples/regex)
* [Repost](/docs/examples/repost)
* [Author and post flairs](/docs/examples/onlyfansFlair)
* [Toolbox User Notes](/docs/examples/userNotes)
* [Advanced Concepts](/docs/examples/advancedConcepts)
* [Rule Sets](/docs/examples/advancedConcepts/ruleSets.json5)

View File

@@ -1,6 +1,6 @@
### Named Rules
See [ruleNameReuse.json5](/docs/examples/advancedConcepts/ruleNameReuse.json5)
See **Rule Name Reuse Examples [YAML](/docs/examples/advancedConcepts/ruleNameReuse.yaml) | [JSON](/docs/examples/advancedConcepts/ruleNameReuse.json5)**
### Check Order
@@ -23,7 +23,7 @@ The `rules` array on a `Checks` can contain both `Rule` objects and `RuleSet` ob
A **Rule Set** is a "nested" set of `Rule` objects with a passing condition specified. These allow you to create more complex trigger behavior by combining multiple rules.
See **[ruleSets.json5](/docs/examples/advancedConcepts/ruleSets.json5)** for a complete example as well as consulting the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRuleSetJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json).
See **ruleSets [YAML](/docs/examples/advancedConcepts/ruleSets.yaml) | [JSON](/docs/examples/advancedConcepts/ruleSets.json5)** for a complete example as well as consulting the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRuleSetJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json).
### Rule Order

View File

@@ -0,0 +1,52 @@
checks:
- name: Auto Remove SP Karma
description: >-
Remove submission because author has self-promo >10% and posted in karma
subs recently
kind: submission
rules:
# named rules can be referenced at any point in the configuration (where they occur does not matter)
# and can be used in any Check
# Note: rules do not transfer between subreddit configurations
- freekarmasub
- name: attr10all
kind: attribution
criteria:
- threshold: '> 10%'
window: 90 days
- threshold: '> 10%'
window: 100
actions:
- kind: remove
- kind: comment
content: >-
Your submission was removed because you are over reddit's threshold
for self-promotion and recently posted this content in a karma sub
- name: Free Karma On Submission Alert
description: Check if author has posted this submission in 'freekarma' subreddits
kind: submission
rules:
# rules can be re-used throughout a configuration by referencing them by name
#
# The rule name itself can only contain spaces, hyphens and underscores
# The value used to reference it will have all of these removed, and lower-cased
#
# so to reference this rule use the value 'freekarmasub'
- name: Free_Karma-SUB
kind: recentActivity
lookAt: submissions
useSubmissionAsReference: true
thresholds:
- threshold: '>= 1'
subreddits:
- DeFreeKarma
- FreeKarma4U
- FreeKarma4You
- upvote
window: 7 days
actions:
- kind: report
content: >-
Submission posted {{rules.freekarmasub.totalCount}} times in karma
{{rules.freekarmasub.subCount}} subs over
{{rules.freekarmasub.window}}: {{rules.freekarmasub.subSummary}}

View File

@@ -0,0 +1,53 @@
checks:
- name: Self Promo All or low comment
description: >-
SP >10% of all activities or >10% of submissions with low comment
engagement
kind: submission
rules:
# this attribution rule is looking at all activities
#
# we want want this one rule to trigger the check because >10% of all activity (submission AND comments) is a good requirement
- name: attr10all
kind: attribution
criteria:
- threshold: '> 10%'
window: 90 days
- threshold: '> 10%'
window: 100
# this is a RULE SET
#
# it is made up of "nested" rules with a pass condition (AND/OR)
# if the nested rules pass the condition then the Rule Set triggers the Check
#
# AND = all nested rules must be triggered to make the Rule Set trigger
# AND = any of the nested Rules will be the Rule Set trigger
- condition: AND
# in this check we use an Attribution >10% on ONLY submissions, which is a lower requirement then the above attribution rule
# and combine it with a History rule looking for low comment engagement
# to make a "higher" requirement Rule Set our of two low requirement Rules
rules:
- name: attr20sub
kind: attribution
criteria:
- threshold: '> 10%'
thresholdOn: submissions
window: 90 days
- threshold: '> 10%'
thresholdOn: submissions
window: 100
lookAt: media
- name: lowOrOpComm
kind: history
criteriaJoin: OR
criteria:
- window: 90 days
comment: < 50%
- window: 90 days
comment: '> 40% OP'
actions:
- kind: remove
- kind: comment
content: >-
Your submission was removed because you are over reddit's threshold
for self-promotion or exhibit low comment engagement

View File

@@ -10,5 +10,5 @@ Consult the [schema](https://json-schema.app/view/%23/%23%2Fdefinitions%2FCheckJ
### Examples
* [Self Promotion as percentage of all Activities](/docs/examples/attribution/redditSelfPromoAll.json5) - Check if Author is submitting much more than they comment.
* [Self Promotion as percentage of Submissions](/docs/examplesm/attribution/redditSelfPromoSubmissionsOnly.json5) - Check if any of Author's aggregated submission origins are >10% of their submissions
* Self Promotion as percentage of all Activities [YAML](/docs/examples/attribution/redditSelfPromoAll.yaml) | [JSON](/docs/examples/attribution/redditSelfPromoAll.json5) - Check if Author is submitting much more than they comment.
* Self Promotion as percentage of Submissions [YAML](/docs/examples/attribution/redditSelfPromoSubmissionsOnly.yaml) | [JSON](/docs/examplesm/attribution/redditSelfPromoSubmissionsOnly.json5) - Check if any of Author's aggregated submission origins are >10% of their submissions

View File

@@ -0,0 +1,27 @@
checks:
- name: Self Promo Activities
description: >-
Check if any of Author's aggregated submission origins are >10% of entire
history
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: attr10all
kind: attribution
# criteria defaults to OR -- so either of these criteria will trigger the rule
criteria:
- threshold: '> 10%' # threshold can be a percent or an absolute number
# The default is "all" -- calculate percentage of entire history (submissions & comments)
#thresholdOn: all
#
# look at last 90 days of Author's activities (comments and submissions)
window: 90 days
- threshold: '> 10%'
# look at Author's last 100 activities (comments and submissions)
window: 100
actions:
- kind: report
content: >-
{{rules.attr10all.largestPercent}}% of
{{rules.attr10all.activityTotal}} items over
{{rules.attr10all.window}}

View File

@@ -0,0 +1,24 @@
checks:
- name: Self Promo Submissions
description: >-
Check if any of Author's aggregated submission origins are >10% of their
submissions
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: attr10sub
kind: attribution
# criteria defaults to OR -- so either of these criteria will trigger the rule
criteria:
- threshold: '> 10%' # threshold can be a percent or an absolute number
thresholdOn: submissions # calculate percentage of submissions, rather than entire history (submissions & comments)
window: 90 days # look at last 90 days of Author's activities (comments and submissions)
- threshold: '> 10%'
thresholdOn: submissions
window: 100 # look at Author's last 100 activities (comments and submissions)
actions:
- kind: report
content: >-
{{rules.attr10sub.largestPercent}}% of
{{rules.attr10sub.activityTotal}} items over
{{rules.attr10sub.window}}

View File

@@ -18,10 +18,10 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FAuthorRule
### Examples
* Basic examples
* [Flair new user Submission](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does not have the `vet` flair then flair the Submission with `New User`
* [Flair vetted user Submission](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does have the `vet` flair then flair the Submission with `Vetted`
* Flair new user Submission [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does not have the `vet` flair then flair the Submission with `New User`
* Flair vetted user Submission [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - If the Author does have the `vet` flair then flair the Submission with `Vetted`
* Used with other Rules
* [Ignore vetted user](/docs/examples/author/flairNewUserSubmission.json5) - Short-circuit the Check if the Author has the `vet` flair
* Ignore vetted user [YAML](/docs/examples/author/flairNewUserSubmission.yaml) | [JSON](/docs/examples/author/flairNewUserSubmission.json5) - Short-circuit the Check if the Author has the `vet` flair
## Filter
@@ -35,4 +35,4 @@ All **Rules** and **Checks** have an optional `authorIs` property that takes an
### Examples
* [Skip recent activity check based on author](/docs/examples/author/authorFilter.json5) - Skip a Recent Activity check for a set of subreddits if the Author of the Submission has any set of flairs.
* Skip recent activity check based on author [YAML](/docs/examples/author/authorFilter.yaml) | [JSON](/docs/examples/author/authorFilter.json5) - Skip a Recent Activity check for a set of subreddits if the Author of the Submission has any set of flairs.

View File

@@ -0,0 +1,48 @@
checks:
- name: Karma/Meme Sub Activity
description: Report on karma sub activity or meme sub activity if user isn't a memelord
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: freekarma
kind: recentActivity
lookAt: submissions
thresholds:
- threshold: '>= 1'
subreddits:
- DeFreeKarma
- FreeKarma4U
window: 7 days
- name: noobmemer
kind: recentActivity
# authors filter will be checked before a rule is run. If anything passes then the Rule is skipped -- it is not failed or triggered.
# if *all* Rules for a Check are skipped due to authors filter then the Check will fail
authorIs:
# each property (include/exclude) can contain multiple AuthorCriteria
# if any AuthorCriteria passes its test the Rule is skipped
#
# for an AuthorCriteria to pass all properties present on it must pass
#
# if include is present it will always run and exclude will be skipped
#-include:
exclude:
# for this to pass the Author of the Submission must not have the flair "Supreme Memer" and have the name "user1" or "user2"
- flairText:
- Supreme Memer
names:
- user1
- user2
# for this to pass the Author of the Submission must not have the flair "Decent Memer"
- flairText:
- Decent Memer
lookAt: submissions
thresholds:
- threshold: '>= 1'
subreddits:
- dankmemes
window: 7 days
actions:
- kind: report
content: >-
Author has posted in free karma sub, or in /r/dankmemes and does not
have meme flair in this subreddit

View File

@@ -0,0 +1,16 @@
checks:
- name: Flair New User Sub
description: Flair submission as sketchy if user does not have vet flair
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: newflair
kind: author
# rule will trigger if Author does not have "vet" flair text
exclude:
- flairText:
- vet
actions:
- kind: flair
text: New User
css: orange

View File

@@ -0,0 +1,16 @@
checks:
- name: Flair Vetted User Submission
description: Flair submission as Approved if user has vet flair
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: newflair
kind: author
# rule will trigger if Author has "vet" flair text
include:
- flairText:
- vet
actions:
- kind: flair
text: Vetted
css: green

View File

@@ -0,0 +1,45 @@
checks:
- name: non-vetted karma/meme activity
description: >-
Report if Author has SP and has recent karma/meme sub activity and isn't
vetted
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
# The Author Rule is best used in conjunction with other Rules --
# instead of having to write an AuthorFilter for every Rule where you want to skip it based on Author criteria
# you can write one Author Rule and make it fail on the required criteria
# so that the check fails and Actions don't run
- name: nonvet
kind: author
exclude:
- flairText:
- vet
- name: attr10
kind: attribution
criteria:
- threshold: '> 10%'
window: 90 days
- threshold: '> 10%'
window: 100
- name: freekarma
kind: recentActivity
lookAt: submissions
thresholds:
- threshold: '>= 1'
subreddits:
- DeFreeKarma
- FreeKarma4U
window: 7 days
- name: memes
kind: recentActivity
lookAt: submissions
thresholds:
- threshold: '>= 3'
subreddits:
- dankmemes
window: 7 days
# will NOT run if the Author for this Submission has the flair "vet"
actions:
- kind: report
content: Author has posted in free karma or meme subs recently

View File

@@ -9,5 +9,5 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FHistoryJSO
### Examples
* [Low Comment Engagement](/docs/examples/history/lowEngagement.json5) - Check if Author is submitting much more than they comment.
* [OP Comment Engagement](/docs/examples/history/opOnlyEngagement.json5) - Check if Author is mostly engaging only in their own content
* Low Comment Engagement [YAML](/docs/examples/history/lowEngagement.yaml) | [JSON](/docs/examples/history/lowEngagement.json5) - Check if Author is submitting much more than they comment.
* OP Comment Engagement [YAML](/docs/examples/history/opOnlyEngagement.yaml) | [JSON](/docs/examples/history/opOnlyEngagement.json5) - Check if Author is mostly engaging only in their own content

View File

@@ -0,0 +1,21 @@
checks:
- name: Low Comment Engagement
description: Check if Author is submitting much more than they comment
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: lowComm
kind: history
criteria:
- comment: '< 30%'
window:
# get author's last 90 days of activities or 100 activities, whichever is less
duration: 90 days
count: 100
# trigger if less than 30% of their activities in this time period are comments
actions:
- kind: report
content: >-
Low engagement: comments were {{rules.lowcomm.commentPercent}} of
{{rules.lowcomm.activityTotal}} over {{rules.lowcomm.window}}

View File

@@ -0,0 +1,22 @@
checks:
- name: Engaging Own Content Only
description: Check if Author is mostly engaging in their own content only
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: opOnly
kind: history
criteria:
# trigger if more than 60% of their activities in this time period are comments as OP
- comment: '> 60% OP'
window:
# get author's last 90 days of activities or 100 activities, whichever is less
duration: 90 days
count: 100
actions:
- kind: report
content: >-
Selfish OP: {{rules.oponly.opPercent}} of
{{rules.oponly.commentTotal}} comments over {{rules.oponly.window}}
are as OP

View File

@@ -0,0 +1,9 @@
# Flair users and submissions
Flair users and submissions based on certain keywords from submitter's profile.
Consult [User Flair schema](https://json-schema.app/view/%23%2Fdefinitions%2FUserFlairActionJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) and [Submission Flair schema](https://json-schema.app/view/%23%2Fdefinitions%2FFlairActionJson?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) for a complete reference of the rule's properties.
### Examples
* OnlyFans submissions [YAML](/docs/examples/onlyFansFlair/onlyFansFlair.yaml) | [JSON](/docs/examples/onlyfansFlair/onlyfansFlair.json5) - Check whether submitter has typical OF keywords in their profile and flair both author + submission accordingly.

View File

@@ -0,0 +1,68 @@
{
"checks": [
{
"name": "Flair OF submitters",
"description": "Flair submission as OF if user does not have Verified flair and has certain keywords in their profile",
"kind": "submission",
"authorIs": {
"exclude": [
{
"flairCssClass": ["verified"]
}
]
},
"rules": [
{
"name": "OnlyFans strings in description",
"kind": "author",
"include": [
{
"description": [
"/(cashapp|allmylinks|linktr|onlyfans\\.com)/i",
"/(see|check|my|view) (out|of|onlyfans|kik|skype|insta|ig|profile|links)/i",
"my links",
"$"
]
}
]
}
],
"actions": [
{
"name": "Set OnlyFans user flair",
"kind": "userflair",
"flair_template_id": "put-your-onlyfans-user-flair-id-here"
},
{
"name":"Set OF Creator SUBMISSION flair",
"kind": "flair",
"flair_template_id": "put-your-onlyfans-post-flair-id-here"
}
]
},
{
"name": "Flair posts of OF submitters",
"description": "Flair submission as OnlyFans if submitter has OnlyFans userflair (override post flair set by submitter)",
"kind": "submission",
"rules": [
{
"name": "Include OF submitters",
"kind": "author",
"include": [
{
"flairCssClass": ["onlyfans"]
}
]
}
],
"actions": [
{
"name":"Set OF Creator SUBMISSION flair",
"kind": "flair",
"flair_template_id": "put-your-onlyfans-post-flair-id-here"
}
]
}
]
}

View File

@@ -0,0 +1,38 @@
checks:
- name: Flair OF submitters
description: Flair submission as OF if user does not have Verified flair and has
certain keywords in their profile
kind: submission
authorIs:
exclude:
- flairCssClass:
- verified
rules:
- name: OnlyFans strings in description
kind: author
include:
- description:
- '/(cashapp|allmylinks|linktr|onlyfans\.com)/i'
- '/(see|check|my|view) (out|of|onlyfans|kik|skype|insta|ig|profile|links)/i'
- my links
- "$"
actions:
- name: Set OnlyFans user flair
kind: userflair
flair_template_id: put-your-onlyfans-user-flair-id-here
- name: Set OF Creator SUBMISSION flair
kind: flair
flair_template_id: put-your-onlyfans-post-flair-id-here
- name: Flair posts of OF submitters
description: Flair submission as OnlyFans if submitter has OnlyFans userflair (override post flair set by submitter)
kind: submission
rules:
- name: Include OF submitters
kind: author
include:
- flairCssClass:
- onlyfans
actions:
- name: Set OF Creator SUBMISSION flair
kind: flair
flair_template_id: put-your-onlyfans-post-flair-id-here

View File

@@ -6,5 +6,5 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FRecentActi
### Examples
* [Free Karma Subreddits](/docs/examples/recentActivity/freeKarma.json5) - Check if the Author has recently posted in any "free karma" subreddits
* [Submission in Free Karma Subreddits](/docs/examples/recentActivity/freeKarmaOnSubmission.json5) - Check if the Author has posted the Submission this check is running on in any "free karma" subreddits recently
* Free Karma Subreddits [YAML](/docs/examples/recentActivity/freeKarma.yaml) | [JSON](/docs/examples/recentActivity/freeKarma.json5) - Check if the Author has recently posted in any "free karma" subreddits
* Submission in Free Karma Subreddits [YAML](/docs/examples/recentActivity/freeKarmaOnSubmission.yaml) | [JSON](/docs/examples/recentActivity/freeKarmaOnSubmission.json5) - Check if the Author has posted the Submission this check is running on in any "free karma" subreddits recently

View File

@@ -0,0 +1,27 @@
checks:
- name: Free Karma Alert
description: Check if author has posted in 'freekarma' subreddits
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: freekarma
kind: recentActivity
# // when lookAt is not present this rule will look for submissions and comments
#lookAt: comments
useSubmissionAsReference: false
thresholds:
# if the number of activities (sub/comment) found CUMULATIVELY in the subreddits listed is
# equal to or greater than 1 then the rule is triggered
- threshold: '>= 1'
subreddits:
- DeFreeKarma
- FreeKarma4U
- FreeKarma4You
- upvote
window: 7 days
actions:
- kind: report
content: >-
{{rules.freekarma.totalCount}} activities in karma
{{rules.freekarma.subCount}} subs over {{rules.freekarma.window}}:
{{rules.freekarma.subSummary}}

View File

@@ -0,0 +1,26 @@
checks:
- name: Free Karma On Submission Alert
description: Check if author has posted this submission in 'freekarma' subreddits
kind: submission
rules:
- name: freekarmasub
kind: recentActivity
# rule will only look at Author's submissions in these subreddits
lookAt: submissions
# rule will only look at Author's submissions in these subreddits that have the same content (link) as the submission this event was made on
# In simpler terms -- rule will only check to see if the same link the author just posted is also posted in these subreddits
useSubmissionAsReference: true
thresholds:
- threshold: '>= 1'
subreddits:
- DeFreeKarma
- FreeKarma4U
- FreeKarma4You
- upvote
window: 7 days
actions:
- kind: report
content: >-
Submission posted {{rules.freekarmasub.totalCount}} times in karma
{{rules.freekarmasub.subCount}} subs over
{{rules.freekarmasub.window}}: {{rules.freekarmasub.subSummary}}

View File

@@ -11,10 +11,12 @@ Which can then be used in conjunction with a [`window`](https://github.com/FoxxM
### Examples
* [Trigger if regex matches against the current activity](/docs/examples/regex/matchAnyCurrentActivity.json5)
* [Trigger if regex matches 5 times against the current activity](/docs/examples/regex/matchThresholdCurrentActivity.json5)
* [Trigger if regex matches against any part of a Submission](/docs/examples/regex/matchSubmissionParts.json5)
* [Trigger if regex matches any of Author's last 10 activities](/docs/examples/regex/matchHistoryActivity.json5)
* [Trigger if regex matches at least 3 of Author's last 10 activities](/docs/examples/regex/matchActivityThresholdHistory.json5)
* [Trigger if there are 5 regex matches in the Author's last 10 activities](/docs/examples/regex/matchTotalHistoryActivity.json5)
* [Trigger if there are 5 regex matches in the Author's last 10 comments](/docs/examples/regex/matchSubsetHistoryActivity.json5)
* Trigger if regex matches against the current activity - [YAML](/docs/examples/regex/matchAnyCurrentActivity.yaml) | [JSON](/docs/examples/regex/matchAnyCurrentActivity.json5)
* Trigger if regex matches 5 times against the current activity - [YAML](/docs/examples/regex/matchThresholdCurrentActivity.yaml) | [JSON](/docs/examples/regex/matchThresholdCurrentActivity.json5)
* Trigger if regex matches against any part of a Submission - [YAML](/docs/examples/regex/matchSubmissionParts.yaml) | [JSON](/docs/examples/regex/matchSubmissionParts.json5)
* Trigger if regex matches any of Author's last 10 activities - [YAML](/docs/examples/regex/matchHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchHistoryActivity.json5)
* Trigger if regex matches at least 3 of Author's last 10 activities - [YAML](/docs/examples/regex/matchActivityThresholdHistory.json5) | [JSON](/docs/examples/regex/matchActivityThresholdHistory.json5)
* Trigger if there are 5 regex matches in the Author's last 10 activities - [YAML](/docs/examples/regex/matchTotalHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchTotalHistoryActivity.json5)
* Trigger if there are 5 regex matches in the Author's last 10 comments - [YAML](/docs/examples/regex/matchSubsetHistoryActivity.yaml) | [JSON](/docs/examples/regex/matchSubsetHistoryActivity.json5)
* Remove comments that are spamming discord links - [YAML](/docs/examples/regex/removeDiscordSpam.yaml) | [JSON](/docs/examples/regex/removeDiscordSpam.json5)
* Differs from just using automod because this config can allow one-off/organic links from users who DO NOT spam discord links but will still remove the comment if the user is spamming them

View File

@@ -0,0 +1,13 @@
name: swear
kind: regex
criteria:
# triggers if more than 3 activities in the last 10 match the regex
- regex: '/fuck|shit|damn/'
# this differs from "totalMatchThreshold"
#
# activityMatchThreshold => # of activities from window must match regex
# totalMatchThreshold => # of matches across all activities from window must match regex
activityMatchThreshold: '> 3'
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
window: 10

View File

@@ -0,0 +1,6 @@
name: swear
kind: regex
criteria:
- regex: '/fuck|shit|damn/'
# if "matchThreshold" is not specified it defaults to this -- default behavior is to trigger if there are any matches
#matchThreshold: "> 0"

View File

@@ -0,0 +1,8 @@
name: swear
kind: regex
criteria:
# triggers if any activity in the last 10 (including current activity) match the regex
- regex: '/fuck|shit|damn/'
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
window: 10

View File

@@ -0,0 +1,11 @@
name: swear
kind: regex
criteria:
- regex: '/fuck|shit|damn/'
# triggers if the current activity has more than 0 matches
# if the activity is a submission then matches against title, body, and url
# if "testOn" is not provided then `title, body` are the defaults
testOn:
- title
- body
- url

View File

@@ -0,0 +1,16 @@
name: swear
kind: regex
criteria:
# triggers if there are more than 5 regex matches in the last 10 activities (comments only)
- regex: '/fuck|shit|damn/'
# this differs from "activityMatchThreshold"
#
# activityMatchThreshold => # of activities from window must match regex
# totalMatchThreshold => # of matches across all activities from window must match regex
totalMatchThreshold: '> 5'
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
window: 10
# determines which activities from window to consider
# defaults to "all" (submissions and comments)
lookAt: comments

View File

@@ -0,0 +1,6 @@
name: swear
kind: regex
criteria:
- regex: '/fuck|shit|damn/'
# triggers if current activity has greater than 5 matches
matchThreshold: '> 5'

View File

@@ -0,0 +1,13 @@
name: swear
kind: regex
criteria:
# triggers if there are more than 5 regex matches in the last 10 activities (comments or submission)
- regex: '/fuck|shit|damn/'
# this differs from "activityMatchThreshold"
#
# activityMatchThreshold => # of activities from window must match regex
# totalMatchThreshold => # of matches across all activities from window must match regex
totalMatchThreshold: '> 5'
# if `window` is specified it tells the rule to check the current activity as well as the activities returned from `window`
# learn more about `window` here https://github.com/FoxxMD/context-mod/blob/master/docs/activitiesWindow.md
window: 10

View File

@@ -0,0 +1,73 @@
{
"checks": [
{
"name": "remove discord spam",
"notifyOnTrigger": true,
"description": "remove comments from users who are spamming discord links",
"kind": "comment",
"authorIs": {
"exclude": [
{
"isMod": true
}
]
},
"itemIs": [
{
"removed": false,
"approved": false,
}
],
"condition": "OR",
"rules": [
{
// set to false if you want to allow comments with a discord link ONLY IF
// the author doesn't have a history of spamming discord links
// -- basically allows one-off/organic discord links
"enable": true,
"name": "linkOnlySpam",
"kind": "regex",
"criteria": [
{
"name": "only link",
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+)$/i",
}
]
},
{
"condition": "AND",
"rules": [
{
"name": "linkAnywhereSpam",
"kind": "regex",
"criteria": [
{
"name": "contains link anywhere",
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
}
]
},
{
"name": "linkAnywhereHistoricalSpam",
"kind": "regex",
"criteria": [
{
"name": "contains links anywhere historically",
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
"totalMatchThreshold": ">= 3",
"lookAt": "comments",
"window": 10
}
]
}
]
}
],
"actions": [
{
"kind": "remove"
}
]
}
]
}

View File

@@ -0,0 +1,36 @@
checks:
- name: remove discord spam
notifyOnTrigger: true
description: remove comments from users who are spamming discord links
kind: comment
authorIs:
exclude:
- isMod: true
itemIs:
- removed: false
approved: false
condition: OR
rules:
- enable: true
name: linkOnlySpam
kind: regex
criteria:
- name: only link
regex: '/^.*(discord\.gg\/[\w\d]+)$/i'
- condition: AND
rules:
- name: linkAnywhereSpam
kind: regex
criteria:
- name: contains link anywhere
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
- name: linkAnywhereHistoricalSpam
kind: regex
criteria:
- name: contains links anywhere historically
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
totalMatchThreshold: '>= 3'
lookAt: comments
window: 10
actions:
- kind: remove

View File

@@ -45,5 +45,5 @@ With only `gapAllowance: 2` this rule **would trigger** because the the 1 and 2
## Examples
* [Crosspost Spamming](/docs/examples/repeatActivity/crosspostSpamming.json5) - Check if an Author is spamming their Submissions across multiple subreddits
* [Burst-posting](/docs/examples/repeatActivity/burstPosting.json5) - Check if Author is crossposting their Submissions in short bursts
* Crosspost Spamming [JSON](/docs/examples/repeatActivity/crosspostSpamming.json5) | [YAML](/docs/examples/repeatActivity/crosspostSpamming.yaml) - Check if an Author is spamming their Submissions across multiple subreddits
* Burst-posting [JSON](/docs/examples/repeatActivity/burstPosting.json5) | [YAML](/docs/examples/repeatActivity/burstPosting.yaml) - Check if Author is crossposting their Submissions in short bursts

View File

@@ -0,0 +1,23 @@
checks:
- name: Burstpost Spam
description: Check if Author is crossposting in short bursts
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: burstpost
kind: repeatActivity
# will only look at Submissions in Author's history that contain the same content (link) as the Submission this check was initiated by
useSubmissionAsReference: true
# the number of non-repeat activities (submissions or comments) to ignore between repeat submissions
gapAllowance: 3
# if the Author has posted this Submission 6 times, ignoring 3 non-repeat activities between each repeat, then this rule will trigger
threshold: '>= 6'
# look at all of the Author's submissions in the last 7 days or 100 submissions
window:
duration: 7 days
count: 100
actions:
- kind: report
content: >-
Author has burst-posted this link {{rules.burstpost.largestRepeat}}
times over {{rules.burstpost.window}}

View File

@@ -0,0 +1,19 @@
checks:
- name: Crosspost Spam
description: Check if Author is spamming Submissions across subreddits
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: xpostspam
kind: repeatActivity
# will only look at Submissions in Author's history that contain the same content (link) as the Submission this check was initiated by
useSubmissionAsReference: true
# if the Author has posted this Submission 5 times consecutively then this rule will trigger
threshold: '>= 5'
# look at all of the Author's submissions in the last 7 days
window: 7 days
actions:
- kind: report
content: >-
Author has posted this link {{rules.xpostspam.largestRepeat}} times
over {{rules.xpostspam.window}}

View File

@@ -0,0 +1,927 @@
The **Repost** rule is used to find reposts for both **Submissions** and **Comments**, depending on what type of **Check** it is used on.
Note: This rule is for searching **all of Reddit** for reposts, as opposed to just the Author of the Activity being checked. If you only want to check for reposts by the Author of the Activity being checked you should use the [Repeat Activity](/docs/examples/repeatActivity) rule.
# TLDR
Out of the box CM generates a repost rule with sensible default behavior without any configuration. You do not need to configure any of below options (facets, modifiers, criteria) yourself in order to have a working repost rule. Default behavior is as follows...
* When looking for Submission reposts CM will find any Submissions with
* a very similar title
* or independent of title...
* any crossposts/duplicates
* any submissions with the exact URL
* When looking for Comment reposts CM will do the above AND THEN
* compare the top 50 most-upvoted comments from the top 10 most-upvoted Submissions against the comment being checked
* compare any items found from external source (Youtube comments, etc...) against the comment being checked
# Configuration
## Search Facets
ContextMod has several ways to search for reposts -- all of which look at different elements of a Submission in order to find repost candidates. You can define any/all of these **Search Facets** you want to use to search Reddit inside the configuration for the Repost Rule in the `searchOn` property.
### Usage
Facets are specified in the `searchOn` array property within the rule's configuration.
**String**
Specify one or more types of facets as a string to use their default configurations
<details>
YAML
```yaml
kind: repost
criteria:
- searchOn:
- title
- url
- crossposts
```
JSON
```json5
{
"kind": "repost",
"criteria": [
{
// ...
"searchOn": ["title", "url", "crossposts"],
// ....
}
]
}
```
</details>
**Object**
**string** and object configurations can be mixed
<details>
```yaml
kind: repost
criteria:
- searchOn:
- title
- kind: url
matchScore: 90
- external
```
```json5
{
"kind": "repost",
"criteria": [
{
// ...
"searchOn": [
"title",
{
"kind": "url",
// could also specify multiple types to use the same config for all
//"kind": ["url", "duplicates"]
"matchScore": 90,
//...
},
"external"
],
// ....
}
]
}
```
</details>
### Facet Types
* **title** -- search reddit for Submissions with a similar title
* **url** -- search reddit for Submissions with the same URL
* **duplicates** -- get all Submissions **reddit has identified** as duplicates that are **NOT** crossposts
* these are found under *View discussions in other communities* (new reddit) or *other discussions* (old reddit) on the Submission
* **crossposts** -- get all Submissions where the current Submission is the source of an **official** crosspost
* this differs from duplicates in that crossposts use reddit's built-in crosspost functionality, respect subreddit crosspost rules, and link back to the original Submission
* **external** -- get items from the Submission's link source that may be reposted (currently implemented for **Comment Checks** only)
* When the Submission link is for...
* **Youtube** -- get top comments on video by replies/like count
* **NOTE:** An **API Key** for the [Youtube Data API](https://developers.google.com/youtube/v3) must be provided for this facet to work. This can be provided by the operator alongside [bot credentials](/docs/operatorConfiguration.md) or in the top-level `credentials` property for a [subreddit configuration.](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Freddit-context-bot%2Fmaster%2Fsrc%2FSchema%2FApp.json)
### Facet Modifiers
For all **Facets**, except for **external**, there are options that be configured to determine if the found Submissions is a "valid" repost IE filtering. These options can be configured **per facet**.
* **matchScore** -- The percentage, as a whole number, of a repost title that must match the title being checked in order to consider both a match
* **minWordCount** -- The minimum number of words a title must have
* **caseSensitive** -- If the match comparison should be case-sensitive (defaults to `false`)
Additionally, the current Activity's title and/or each repost's title can be transformed before matching:
* **transformations** -- An array of SearchAndReplace objects used to transform the repost's title
* **transformationsActivity** -- An array of SearchAndReplace objects used to transform the current Activity's title
#### Modifier Defaults
To make facets easier to use without configuration sensible defaults are applied to each when no other configuration is defined...
* **title**
* `matchScore: 85` -- The candidate repost's title must be at least 85% similar to the current Activity's title
* `minWordCount: 2` -- The candidate repost's title must have at least 2 words
For `url`,`duplicates`, and `crossposts` the only default is `matchScore: 0` because the assumption is you want to treat any actual dups/x-posts or exact URLs as reposts, regardless of their title.
## Additional Criteria Properties
A **criteria** object may also specify some additional tests to run against the reposts found from searching.
### For Submissions and Comments
#### Occurrences
Define a set of criteria to test against the **number of reposts**, **time reposts were created**, or both.
##### Count
<details>
```yaml
kind: repost
criteria:
- searchOn:
- title
- url
- crossposts
occurrences:
criteria:
- count:
condition: AND
test:
- '> 3'
- <= 5
```
```json5
{
"kind": "repost",
"criteria": [
{
// ...
"searchOn": ["title", "url", "crossposts"],
"occurrences": {
"criteria": [
{
// passes if BOTH tests are true
"count": {
"condition": "AND", // default is AND
"test": [
"> 3", // TRUE if there are GREATER THAN 3 reposts found
"<= 5" // TRUE if there are LESS THAN OR EQUAL TO 5 reposts found
]
}
}
],
}
}
]
}
```
</details>
##### Time
Define a test or array of tests to run against **when reposts were created**
<details>
```yaml
kind: repost
criteria:
- searchOn:
- title
- url
- crossposts
occurrences:
criteria:
- time:
condition: AND
test:
- testOn: all
condition: '> 3 months'
```
```json5
{
"kind": "repost",
"criteria": [
{
// ...
"searchOn": [
"title",
"url",
"crossposts"
],
"occurrences": {
"criteria": [
{
time: {
// how to test array of comparisons. AND => all must pass, OR => any must pass
"condition": "AND",
"test": [
{
// which of the found reposts to test the time comparison on
//
// "all" => ALL reposts must pass time comparison
// "any" => ANY repost must pass time comparison
// "newest" => The newest (closest in time to now) repost must pass time comparison
// "oldest" => The oldest (furthest in time from now) repost must pass time comparison
//
"testOn": "all",
// Tested items must be OLDER THAN 3 months
"condition": "> 3 months"
}
]
}
}
]
},
}
]
}
```
</details>
### For Comments
When the rule is run in a **Comment Check** you may specify text comparisons (like those found in Search Facets) to run on the contents of the repost comments *against* the contents of the comment being checked.
* **matchScore** -- The percentage, as a whole number, of a repost comment that must match the comment being checked in order to consider both a match (defaults to 85% IE `85`)
* **minWordCount** -- The minimum number of words a comment must have
* **caseSensitive** -- If the match comparison should be case-sensitive (defaults to `false`)
# Examples
Examples of a *full* CM configuration, including the Repost Rule, in various scenarios. In each scenario the parts of the configuration that affect the rule are indicated.
## Submissions
When the Repost Rule is run on a **Submission Check** IE the activity being checked is a Submission.
### Default Behavior (No configuration)
This is the same behavior described in the [TLDR](#TLDR) section above -- find any submissions with:
* a very similar title (85% or more the same)
* or ignoring title...
* any crossposts/duplicates
* any submissions with the exact URL
<details>
```yaml
polling:
- unmoderated
checks:
- name: subRepost
description: Check if submission has been reposted
kind: submission
condition: AND
rules:
- kind: repost
actions:
- kind: report
content: This submission was reposted
```
```json5
{
"polling": [
"unmoderated"
],
"checks": [
{
"name": "subRepost",
"description": "Check if submission has been reposted",
// kind specifies this check is for SUBMISSIONS
"kind": "submission",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost"
},
//
// repost rule configuration is above
],
"actions": [
{
"kind": "report",
"content": "This submission was reposted"
}
]
}
]
}
```
</details>
### Search by Title Only
Find any submissions with:
* a very similar title (85% or more the same)
<details>
```yaml
polling:
- unmoderated
checks:
- name: subRepost
description: Check if submission has been reposted
kind: submission
condition: AND
rules:
- kind: repost
criteria:
- searchOn:
- title
actions:
- kind: report
content: This submission was reposted
```
```json5
{
"polling": [
"unmoderated"
],
"checks": [
{
"name": "subRepost",
"description": "Check if submission has been reposted",
// kind specifies this check is for SUBMISSIONS
"kind": "submission",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost",
"criteria": [
{
// specify only title to search on
"searchOn": [
"title" // uses default configuration since only string is specified
]
}
]
},
//
// repost rule configuration is above
],
"actions": [
{
"kind": "report",
"content": "This submission was reposted"
}
]
}
]
}
```
</details>
### Search by Title only and specify similarity percentage
* a very similar title (95% or more the same)
<details>
```yaml
polling:
- unmoderated
checks:
- name: subRepost
description: Check if submission has been reposted
kind: submission
condition: AND
rules:
- kind: repost
criteria:
- searchOn:
- kind: title
matchScore: '95'
actions:
- kind: report
content: This submission was reposted
```
```json5
{
"polling": [
"unmoderated"
],
"checks": [
{
"name": "subRepost",
"description": "Check if submission has been reposted",
// kind specifies this check is for SUBMISSIONS
"kind": "submission",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost",
"criteria": [
{
// specify only title to search on
"searchOn": [
{
"kind": "title",
// titles must be 95% or more similar
"matchScore": "95"
}
]
}
]
},
//
// repost rule configuration is above
],
"actions": [
{
"kind": "report",
"content": "This submission was reposted"
}
]
}
]
}
```
</details>
### Search by Title, specify similarity percentage, AND any duplicates
<details>
```yaml
polling:
- unmoderated
checks:
- name: subRepost
description: Check if submission has been reposted
kind: submission
condition: AND
rules:
- kind: repost
criteria:
- searchOn:
- duplicates
- kind: title
matchScore: '95'
actions:
- kind: report
content: This submission was reposted
```
```json5
{
"polling": [
"unmoderated"
],
"checks": [
{
"name": "subRepost",
"description": "Check if submission has been reposted",
// kind specifies this check is for SUBMISSIONS
"kind": "submission",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost",
"criteria": [
{
"searchOn": [
// look for duplicates (NON crossposts) using default configuration
"duplicates",
// search by title
{
"kind": "title",
// titles must be 95% or more similar
"matchScore": "95"
}
]
}
]
},
//
// repost rule configuration is above
],
"actions": [
{
"kind": "report",
"content": "This submission was reposted"
}
]
}
]
}
```
</details>
### Approve Submission if not reposted in the last month, by title
<details>
```yaml
polling:
- unmoderated
checks:
- name: subRepost
description: Check there are no reposts with same title in the last month
kind: submission
condition: AND
rules:
- kind: repost
criteria:
- searchOn:
- title
occurrences:
condition: OR
criteria:
- count:
test:
- < 1
- time:
test:
- testOn: newest
condition: '> 1 month'
actions:
- kind: approve
```
```json5
{
"polling": [
"unmoderated"
],
"checks": [
{
"name": "subRepost",
"description": "Check there are no reposts with same title in the last month",
// kind specifies this check is for SUBMISSIONS
"kind": "submission",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost",
"criteria": [
{
"searchOn": [
"title"
],
"occurrences": {
// if EITHER criteria is TRUE then it "passes"
"condition": "OR",
"criteria": [
// first criteria:
// TRUE if there are LESS THAN 1 reposts (no reposts found)
{
"count": {
"test": ["< 1"]
}
},
// second criteria:
// TRUE if the newest repost is older than one month
{
"time": {
"test": [
{
"testOn": "newest",
"condition": "> 1 month"
}
]
}
}
]
},
}
]
},
//
// repost rule configuration is above
],
"actions": [
{
// approve this post since we know it is not a repost of anything within the last month
"kind": "approve",
}
]
}
]
}
```
</details>
## Comments
### Default Behavior (No configuration)
This is the same behavior described in the [TLDR](#TLDR) section above -- find any submissions with:
* a very similar title (85% or more the same)
* or ignoring title...
* any crossposts/duplicates
* any submissions with the exact URL
* If comment being checked is on a Submission for Youtube then get top 50 comments on youtube video as well...
AND THEN
* sort submissions by votes
* take top 20 (upvoted) comments from top 10 (upvoted) submissions
* sort comments by votes, take top 50 + top 50 external items
FINALLY
* filter all gathered comments by default `matchScore: 85` to find very similar matches
* rules is triggered if any are found
<details>
```yaml
polling:
- newComm
checks:
- name: commRepost
description: Check if comment has been reposted
kind: common
condition: AND
rules:
- kind: repost
actions:
- kind: report
content: This comment was reposted
```
```json5
{
"polling": [
"newComm"
],
"checks": [
{
"name": "commRepost",
"description": "Check if comment has been reposted",
// kind specifies this check is for COMMENTS
"kind": "common",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost"
},
//
// repost rule configuration is above
],
"actions": [
{
"kind": "report",
"content": "This comment was reposted"
}
]
}
]
}
```
</details>
### Search by external (youtube) comments only
<details>
```yaml
polling:
- newComm
checks:
- name: commRepost
description: Check if comment has been reposted from youtube
kind: comment
condition: AND
rules:
- kind: repost
criteria:
- searchOn:
- external
actions:
- kind: report
content: This comment was reposted from youtube
```
```json5
{
"polling": [
"newComm"
],
"checks": [
{
"name": "commRepost",
"description": "Check if comment has been reposted from youtube",
// kind specifies this check is for SUBMISSIONS
"kind": "comment",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost",
"criteria": [
{
// specify only external (youtube) to search on
"searchOn": [
"external"
]
}
]
},
//
// repost rule configuration is above
],
"actions": [
{
"kind": "report",
"content": "This comment was reposted from youtube"
}
]
}
]
}
```
</details>
### Search by external (youtube) comments only, with higher comment match percentage
<details>
```yaml
polling:
- newComm
checks:
- name: commRepost
description: Check if comment has been reposted from youtube
kind: comment
condition: AND
rules:
- kind: repost
criteria:
- searchOn:
- external
matchScore: 95
actions:
- kind: report
content: This comment was reposted from youtube
```
```json5
{
"polling": [
"newComm"
],
"checks": [
{
"name": "commRepost",
"description": "Check if comment has been reposted from youtube",
// kind specifies this check is for SUBMISSIONS
"kind": "comment",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost",
"criteria": [
{
// specify only external (youtube) to search on
"searchOn": [
"external"
],
"matchScore": 95 // matchScore for comments is on criteria instead of searchOn config...
},
]
},
//
// repost rule configuration is above
],
"actions": [
{
"kind": "report",
"content": "This comment was reposted from youtube"
}
]
}
]
}
```
</details>
### Search by external (youtube) comments and submission URL, with higher comment match percentage
<details>
```yaml
polling:
- newComm
checks:
- name: commRepost
description: Check if comment has been reposted
kind: comment
condition: AND
rules:
- kind: repost
criteria:
- searchOn:
- external
- url
matchScore: 95
actions:
- kind: report
content: >-
This comment was reposted from youtube or from submission with the
same URL
```
```json5
{
"polling": [
"newComm"
],
"checks": [
{
"name": "commRepost",
"description": "Check if comment has been reposted",
// kind specifies this check is for SUBMISSIONS
"kind": "comment",
"condition": "AND",
"rules": [
// repost rule configuration is below
//
{
"kind": "repost",
"criteria": [
{
// specify only external (youtube) to search on
"searchOn": [
"external",
// can specify any/all submission search facets to acquire comments from
"url"
],
"matchScore": 95 // matchScore for comments is on criteria instead of searchOn config...
},
]
},
//
// repost rule configuration is above
],
"actions": [
{
"kind": "report",
"content": "This comment was reposted from youtube or from submission with the same URL"
}
]
}
]
}
```
</details>

View File

@@ -11,21 +11,31 @@ All actions for these configurations are non-destructive in that:
**You will have to remove the `report` action and `dryRun` settings yourself.** This is to ensure that you understand the behavior the bot will be performing. If you are unsure of this you should leave them in place until you are certain the behavior the bot is performing is acceptable.
**YAML** is the same format as **automoderator**
## Submission-based Behavior
### [Remove submissions from users who have used 'freekarma' subs to bypass karma checks](/docs/examples/subredditReady/freekarma.json5)
### Remove submissions from users who have used 'freekarma' subs to bypass karma checks
[YAML](/docs/examples/subredditReady/freekarma.yaml) | [JSON](/docs/examples/subredditReady/freekarma.json5)
If the user has any activity (comment/submission) in known freekarma subreddits in the past (50 activities or 6 months) then remove the submission.
### [Remove submissions from users who have crossposted the same submission 4 or more times](/docs/examples/subredditReady/crosspostSpam.json5)
### Remove submissions from users who have crossposted the same submission 4 or more times
[YAML](/docs/examples/subredditReady/crosspostSpam.yaml) | [JSON](/docs/examples/subredditReady/crosspostSpam.yaml)
If the user has crossposted the same submission in the past (50 activities or 6 months) 4 or more times in a row then remove the submission.
### [Remove submissions from users who have crossposted or used 'freekarma' subs](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5)
### Remove submissions from users who have crossposted or used 'freekarma' subs
[YAML](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.yaml) | [JSON](/docs/examples/subredditReady/freeKarmaOrCrosspostSpam.json5)
Will remove submission if either of the above two behaviors is detected
### [Remove link submissions where the user's history is comprised of 10% or more of the same link](/docs/examples/subredditReady/selfPromo.json5)
### Remove link submissions where the user's history is comprised of 10% or more of the same link
[YAML](/docs/examples/subredditReady/selfPromo.yaml) | [JSON](/docs/examples/subredditReady/selfPromo.json5)
If the link origin (youtube author, twitter author, etc. or regular domain for non-media links)
@@ -36,6 +46,33 @@ then remove the submission
## Comment-based behavior
### [Remove comment if the user has posted the same comment 4 or more times in a row](/docs/examples/subredditReady/commentSpam.json5)
### Remove comment if the user has posted the same comment 4 or more times in a row
[YAML](/docs/examples/subredditReady/commentSpam.yaml) | [JSON](/docs/examples/subredditReady/commentSpam.json5)
If the user made the same comment (with some fuzzy matching) 4 or more times in a row in the past (50 activities or 6 months) then remove the comment.
### Remove comment if it is discord invite link spam
[YAML](/docs/examples/subredditReady/discordSpam.yaml) | [JSON](/docs/examples/subredditReady/discordSpam.json5)
This rule goes a step further than automod can by being more discretionary about how it handles this type of spam.
* Remove the comment and **ban a user** if:
* Comment being checked contains **only** a discord link (no other text) AND
* Discord links appear **anywhere** in three or more of the last 10 comments the Author has made
otherwise...
* Remove the comment if:
* Comment being checked contains **only** a discord link (no other text) OR
* Comment contains a discord link **anywhere** AND
* Discord links appear **anywhere** in three or more of the last 10 comments the Author has made
Using these checks ContextMod can more easily distinguish between these use cases for a user commenting with a discord link:
* actual spammers who only spam a discord link
* users who may comment with a link but have context for it either in the current comment or in their history
* users who many comment with a link but it's a one-off event (no other links historically)
Additionally, you could modify both/either of these checks to not remove one-off discord link comments but still remove if the user has a historical trend for spamming links

View File

@@ -0,0 +1,25 @@
polling:
- newComm
checks:
# Stop users who spam the same comment many times
- name: low xp comment spam
description: X-posted comment >=4x
kind: comment
condition: AND
rules:
- name: xPostLow
kind: repeatActivity
# number of "non-repeat" comments allowed between "repeat comments"
gapAllowance: 2
# greater or more than 4 repeat comments triggers this rule
threshold: '>= 4'
# retrieve either last 50 comments or 6 months' of history, whichever is less
window:
count: 50
duration: 6 months
actions:
- kind: report
enable: true
content: 'Remove => Posted same comment {{rules.xpostlow.largestRepeat}}x times'
- kind: remove
enable: true

View File

@@ -0,0 +1,48 @@
polling:
- unmoderated
checks:
# stop users who post low-effort, crossposted spam submissions
#
# Remove a SUBMISSION if the user has crossposted it at least 4 times in recent history AND
# less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
- name: low xp spam and engagement
description: X-posted 4x and low comment engagement
kind: submission
itemIs:
- removed: false
condition: AND
rules:
- name: xPostLow
kind: repeatActivity
gapAllowance: 2
threshold: '>= 4'
window:
count: 50
duration: 6 months
- name: lowOrOpComm
kind: history
criteriaJoin: OR
criteria:
- window:
count: 100
duration: 6 months
comment: < 50%
- window:
count: 100
duration: 6 months
comment: '> 40% OP'
actions:
- kind: report
enable: true
content: >-
Remove=>{{rules.xpostlow.largestRepeat}} X-P =>
{{rules.loworopcomm.thresholdSummary}}
- kind: remove
enable: true
- kind: comment
enable: true
content: >-
Your submission has been removed because you cross-posted it
{{rules.xpostlow.largestRepeat}} times and you have very low
engagement outside of making submissions
distinguish: true

View File

@@ -0,0 +1,75 @@
{
"polling": ["newComm"],
"checks": [
{
"name": "ban discord only spammer",
"description": "ban a user who spams only a discord link many times historically",
"kind": "comment",
"condition": "AND",
"rules": [
"linkOnlySpam",
"linkAnywhereHistoricalSpam",
],
"actions": [
{
"kind": "remove"
},
{
"kind": "ban",
"content": "spamming discord links"
}
]
},
{
"name": "remove discord spam",
"description": "remove comments from users who only link to discord or mention discord link many times historically",
"kind": "comment",
"condition": "OR",
"rules": [
{
"name": "linkOnlySpam",
"kind": "regex",
"criteria": [
{
"name": "only link",
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+)$/i",
}
]
},
{
"condition": "AND",
"rules": [
{
"name": "linkAnywhereSpam",
"kind": "regex",
"criteria": [
{
"name": "contains link anywhere",
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
}
]
},
{
"name": "linkAnywhereHistoricalSpam",
"kind": "regex",
"criteria": [
{
"name": "contains links anywhere historically",
"regex": "/^.*(discord\\.gg\\/[\\w\\d]+).*$/i",
"totalMatchThreshold": ">= 3",
"lookAt": "comments",
"window": 10
}
]
}
]
}
],
"actions": [
{
"kind": "remove"
}
]
}
]
}

View File

@@ -0,0 +1,46 @@
polling:
- newComm
checks:
- name: ban discord only spammer
description: ban a user who spams only a discord link many times historically
kind: comment
condition: AND
rules:
- linkOnlySpam
- linkAnywhereHistoricalSpam
actions:
- kind: remove
- kind: ban
content: spamming discord links
- name: remove discord spam
description: >-
remove comments from users who only link to discord or mention discord
link many times historically
kind: comment
condition: OR
rules:
- name: linkOnlySpam
kind: regex
criteria:
- name: only link
# single quotes are required to escape special characters
regex: '/^.*(discord\.gg\/[\w\d]+)$/i'
- condition: AND
rules:
- name: linkAnywhereSpam
kind: regex
criteria:
- name: contains link anywhere
# single quotes are required to escape special characters
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
- name: linkAnywhereHistoricalSpam
kind: regex
criteria:
- name: contains links anywhere historically
# single quotes are required to escape special characters
regex: '/^.*(discord\.gg\/[\w\d]+).*$/i'
totalMatchThreshold: '>= 3'
lookAt: comments
window: 10
actions:
- kind: remove

View File

@@ -0,0 +1,84 @@
polling:
- unmoderated
checks:
# stop users who post low-effort, crossposted spam submissions
#
# Remove a SUBMISSION if the user has crossposted it at least 4 times in recent history AND
# less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
- name: remove on low xp spam and engagement
description: X-posted 4x and low comment engagement
kind: submission
itemIs:
- removed: false
condition: AND
rules:
- name: xPostLow
kind: repeatActivity
gapAllowance: 2
threshold: '>= 4'
window:
count: 50
duration: 6 months
- name: lowOrOpComm
kind: history
criteriaJoin: OR
criteria:
- window:
count: 100
duration: 6 months
comment: < 50%
- window:
count: 100
duration: 6 months
comment: '> 40% OP'
actions:
- kind: report
enable: true
content: >-
Remove=>{{rules.xpostlow.largestRepeat}} X-P =>
{{rules.loworopcomm.thresholdSummary}}
- kind: remove
enable: false
- kind: comment
enable: true
content: >-
Your submission has been removed because you cross-posted it
{{rules.xpostlow.largestRepeat}} times and you have very low
engagement outside of making submissions
distinguish: true
dryRun: true
# Remove submissions from users who have recent activity in freekarma subs within the last 50 activities or 6 months (whichever is less)
- name: freekarma removal
description: Remove submission if user has used freekarma sub recently
kind: submission
itemIs:
- removed: false
condition: AND
rules:
- name: freekarma
kind: recentActivity
window:
count: 50
duration: 6 months
useSubmissionAsReference: false
thresholds:
- subreddits:
- FreeKarma4U
- FreeKarma4You
- KarmaStore
- promote
- shamelessplug
- upvote
actions:
- kind: report
enable: true
content: 'Remove=> {{rules.newtube.totalCount}} activities in freekarma subs'
- kind: remove
enable: false
- kind: comment
enable: true
content: >-
Your submission has been removed because you have recent activity in
'freekarma' subs
distinguish: true
dryRun: true

View File

@@ -0,0 +1,35 @@
polling:
- unmoderated
checks:
# Remove submissions from users who have recent activity in freekarma subs within the last 50 activities or 6 months (whichever is less)
- name: freekarma removal
description: Remove submission if user has used freekarma sub recently
kind: submission
itemIs:
- removed: false
condition: AND
rules:
- name: freekarma
kind: recentActivity
window:
count: 50
duration: 6 months
useSubmissionAsReference: false
thresholds:
- subreddits:
- FreeKarma4U
- FreeKarma4You
- KarmaStore
- upvote
actions:
- kind: report
enable: true
content: 'Remove=> {{rules.newtube.totalCount}} activities in freekarma subs'
- kind: remove
enable: true
- kind: comment
enable: false
content: >-
Your submission has been removed because you have recent activity in
'freekarma' subs
distinguish: true

View File

@@ -0,0 +1,71 @@
polling:
- unmoderated
checks:
#
# Stop users who make link submissions with a self-promotional agenda (with reddit's suggested 10% rule)
# https://www.reddit.com/wiki/selfpromotion#wiki_guidelines_for_self-promotion_on_reddit
#
# Remove a SUBMISSION if the link comprises more than or equal to 10% of users history (100 activities or 6 months) OR
#
# if link comprises 10% of submission history (100 activities or 6 months)
# AND less than 50% of their activity is comments OR more than 40% of those comments are as OP (in the own submissions)
#
- name: Self-promo all AND low engagement
description: Self-promo is >10% for all or just sub and low comment engagement
kind: submission
condition: OR
rules:
- name: attr
kind: attribution
criteria:
- threshold: '>= 10%'
window:
count: 100
duration: 6 months
domains:
- 'AGG:SELF'
- condition: AND
rules:
- name: attrsub
kind: attribution
criteria:
- threshold: '>= 10%'
thresholdOn: submissions
window:
count: 100
duration: 6 months
domains:
- 'AGG:SELF'
- name: lowOrOpComm
kind: history
criteriaJoin: OR
criteria:
- window:
count: 100
duration: 6 months
comment: < 50%
- window:
count: 100
duration: 6 months
comment: '> 40% OP'
actions:
- kind: report
enable: true
content: >-
{{rules.attr.largestPercent}}{{rules.attrsub.largestPercent}} of
{{rules.attr.activityTotal}}{{rules.attrsub.activityTotal}} items
({{rules.attr.window}}{{rules.attrsub.window}}){{#rules.loworopcomm.thresholdSummary}}
=>
{{rules.loworopcomm.thresholdSummary}}{{/rules.loworopcomm.thresholdSummary}}
- kind: remove
enable: false
- kind: comment
enable: true
content: >-
Your submission has been removed it comprises 10% or more of your
recent history
({{rules.attr.largestPercent}}{{rules.attrsub.largestPercent}}). This
is against [reddit's self promotional
guidelines.](https://www.reddit.com/wiki/selfpromotion#wiki_guidelines_for_self-promotion_on_reddit)
distinguish: true
dryRun: true

View File

@@ -14,7 +14,7 @@ Consult the [schema](https://json-schema.app/view/%23%2Fdefinitions%2FUserNoteCr
### Examples
* [Do not tag user with Good User note](/docs/examples/userNotes/usernoteFilter.json5)
* Do not tag user with Good User note [JSON](/docs/examples/userNotes/usernoteFilter.json5) | [YAML](/docs/examples/userNotes/usernoteFilter.yaml)
## Action
@@ -23,4 +23,4 @@ A User Note can also be added to the Author of a Submission or Comment with the
### Examples
* [Add note on user doing self promotion](/docs/examples/userNotes/usernoteSP.json5)
* Add note on user doing self promotion [JSON](/docs/examples/userNotes/usernoteSP.json5) | [YAML](/docs/examples/userNotes/usernoteSP.yaml)

View File

@@ -0,0 +1,27 @@
checks:
- name: Self Promo Activities
description: Tag SP only if user does not have good contributor user note
# check will run on a new submission in your subreddit and look at the Author of that submission
kind: submission
rules:
- name: attr10all
kind: attribution
author:
exclude:
# the key of the usernote type to look for https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
# rule will not run if current usernote on Author is of type 'gooduser'
- type: gooduser
criteria:
- threshold: '> 10%'
window: 90 days
- threshold: '> 10%'
window: 100
actions:
- kind: usernote
# the key of usernote type
# https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
type: spamwarn
# content is mustache templated
content: >-
Self Promotion: {{rules.attr10all.titlesDelim}}
{{rules.attr10sub.largestPercent}}%

View File

@@ -0,0 +1,23 @@
checks:
- name: Self Promo Activities
# check will run on a new submission in your subreddit and look at the Author of that submission
description: >-
Check if any of Author's aggregated submission origins are >10% of entire
history
kind: submission
rules:
- name: attr10all
kind: attribution
criteria:
- threshold: '> 10%'
window: 90 days
- threshold: '> 10%'
window: 100
actions:
- kind: usernote
# the key of usernote type
# https://github.com/toolbox-team/reddit-moderator-toolbox/wiki/Subreddit-Wikis%3A-usernotes#working-with-note-types
type: spamwarn
content: >-
Self Promotion: {{rules.attr10all.titlesDelim}}
{{rules.attr10sub.largestPercent}}%

View File

@@ -14,8 +14,8 @@ This getting started guide is for **reddit moderators** -- that is, someone who
Before continuing with this guide you should first make sure you understand how a ContextMod works. Please review this documentation:
* [How It Works](/docs#how-it-works)
* [Core Concepts](/docs#concepts)
* [How It Works](/docs/README.md#how-it-works)
* [Core Concepts](/docs/README.md#concepts)
# Choose A Bot
@@ -36,15 +36,16 @@ If the Operator has communicated that **you should add a bot they control as a m
___
Ensure that you are in communication with the **operator** for this bot. The bot **will not automatically accept a moderator invitation,** it must be manually done by the bot operator. This is an intentional barrier to ensure moderators and the operator are familiar with their respective needs and have some form of trust.
Ensure that you are in communication with the **operator** of this bot. The bot **will only accept a moderator invitation if your subreddit has been whitelisted by the operator.** This is an intentional barrier to ensure moderators and the operator are familiar with their respective needs and have some form of trust.
Now invite the bot to moderate your subreddit. The bot should have at least these permissions:
* Manage Users
* Manage Posts and Comments
* Manage Flair
Additionally, the bot must have the **Manage Wiki Pages** permission if you plan to use [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes). If you are not planning on using this feature and do not want the bot to have this permission then you **must** ensure the bot has visibility to the configuration wiki page (detailed below).
* Manage Wiki Pages
* Required to read the moderator-only visible wiki page used to configure the bot
* Required to read/write to [Toolbox User Notes](https://www.reddit.com/r/toolbox/wiki/docs/usernotes)
## Bring Your Own Bot (BYOB)
@@ -60,7 +61,7 @@ If the operator has communicated that **they want to use a bot you control** thi
**Cons:**
* More setup required for both moderators and operators
* You must have access to the credentials for the reddit account (bot)
___
@@ -72,15 +73,28 @@ Review the information shown on the invite link webpage and then follow the dire
# Configuring the Bot
The bot's behavior is defined using a configuration, like automoderator, that is stored in the **wiki** of each subreddit it moderates.
The default location for this page is at `https://old.reddit.com/r/YOURSUBERDDIT/wiki/botconfig/contextbot`
## Setup wiki page
The bot automatically tries to create its configuration wiki page. You can find the result of this in the log for your subreddit in the web interface.
If this fails for some reason you can create the wiki page through the web interface by navigating to your subreddit's tab, opening the [built-in editor (click **View**)](/docs/screenshots/configBox.png), and following the directions in **Create configuration for...** link found there.
If neither of the above approaches work, or you do not wish to use the web interface, expand the section below for directions on how to manually setup the wiki page:
<details>
* Visit the wiki page of the subreddit you want the bot to moderate
* The default location the bot checks for a configuration is at `https://old.reddit.com/r/YOURSUBERDDIT/wiki/botconfig/contextbot`
* If the page does not exist create it
* Ensure the wiki page visibility is restricted
* On the wiki page click **settings** (**Page settings** in new reddit)
* Check the box for **Only mods may edit and view** and then **save**
* Alternatively, if you did not give the bot the **Manage Wiki Pages** permission then add it to the **allow users to edit page** setting
</details>
## Procure a configuration
@@ -94,25 +108,46 @@ Visit the [Examples](https://github.com/FoxxMD/context-mod/tree/master/docs/exam
After you have found a configuration to use as a starting point:
* In a new tab open the github page for the configuration you want ([example](/docs/examples/repeatActivity/crosspostSpamming.json5))
* Click the **Raw** button, then select all and copy all of the text to your clipboard.
* Copy the URL for the configuration file EX `https://github.com/FoxxMD/context-mod/blob/master/docs/examples/subredditReady/freekarma.json5` and either:
* (Easiest) **Load** it into your [subreddit's built-in editor](#using-the-built-in-editor) and **Save**
* or on the file's page, click the **Raw** button, select all and copy to your clipboard, and [manually save to your wiki page](#manually-saving)
### Build Your Own Config
Additionally, you can use [this schema editor](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) to build your configuration. The editor features a ton of handy features:
CM comes equipped with a [configuration explorer](https://json-schema.app/view/%23?url=https%3A%2F%2Fraw.githubusercontent.com%2FFoxxMD%2Fcontext-mod%2Fmaster%2Fsrc%2FSchema%2FApp.json) to help you see all available options, with descriptions and examples, that can be used in your configuration.
* fully annotated configuration data/structure
* generated examples in json/yaml
* built-in editor that automatically validates your config
To create or edit a configuration you should use **CM's buit-in editor** which features:
* syntax validation and formatting
* full configuration validation with error highlighting, hints, and fixes
* hover over properties to see documentation and examples
PROTIP: Find an example config to use as a starting point and then build on it using the editor.
To use the editor either:
* [use your subreddit's built-in editor](#using-the-built-in-editor)
* or use the public editor at https://cm.foxxmd.dev/config
PROTIP: Find an [example config](#using-an-example-config) to use as a starting point and then build on it using the editor.
## Saving Your Configuration
* Open the wiki page you created in the [previous step](#setup-wiki-page) and click **edit**
### Using the built-in Editor
In the web interface each subreddit's tab has access to the built-in editor. Use this built-in editor to automatically create, load, or save the configuration for that subreddit's wiki.
* Visit the tab for the subreddit you want to edit the configuration of
* Open the [built-in editor by click **View**](/docs/screenshots/configBox.png)
* Edit your configuration
* Follow the directions on the **Save to r/..** link found at the top of the editor to automatically save your configuration
### Manually Saving
<details>
* Open the wiki page you created in the [wiki setup step](#setup-wiki-page) and click **edit**
* Copy-paste your configuration into the wiki text box
* Save the edited wiki page
</details>
___
The bot automatically checks for new configurations on your wiki page every 5 minutes. If your operator has the web interface accessible you may login there and force the config to update on your subreddit.

View File

@@ -7,6 +7,7 @@ ContextMod supports comparing image content, for the purpose of detecting duplic
To enable comparisons reference the example below (at the top-level of your rule) and configure as needed:
JSON
```json5
{
"name": "ruleWithImageDetection",
@@ -31,9 +32,18 @@ To enable comparisons reference the example below (at the top-level of your rule
},
//
// And above ^^^
...
//...
}
```
YAML
```yaml
name: ruleWithImageDetection
kind: recentActivity
enable: true
threshold: 5
fetchBehavior: extension
```
**Perceptual Hashing** (`hash`) and **Pixel Comparisons** (`pixel`) may be used at the same time. Refer to the documentation below to see how they interact.
@@ -114,10 +124,12 @@ To further configure hashing refer to this code block:
// the higher the bits the more accurate the comparison
//
// NOTE: Hashes of different sizes (bits) cannot be compared. If you are caching hashes make sure all rules where results may be shared use the same bit count to ensure hashes can be compared. Otherwise hashes will be recomputed.
"bits": 32, // default is 32 if not defined
"bits": 32,
// default is 32 if not defined
//
// number of seconds to cache an image hash
"ttl": 60, // default is 60 if not defined
"ttl": 60,
// default is 60 if not defined
//
// "High Confidence" Threshold
// If the difference in comparison is equal to or less than this number the images are considered the same and pixel comparison WILL NOT occur
@@ -139,8 +151,22 @@ To further configure hashing refer to this code block:
//
// And above ^^^
//"pixel": {...}
},
}
//...
}
```
YAML
```yaml
name: ruleWithImageDetectionAndConfiguredHashing
kind: recentActivity
imageDetection:
enable: true
hash:
enable: true
bits: 32
ttl: 60
hardThreshold: 5
softThreshold: 0
```
## Pixel Comparison
@@ -184,18 +210,28 @@ To configure pixel comparisons refer to this code block:
```json5
{
"name": "ruleWithImageDetectionAndPixelEnabled",
"kind": "recentActivity",
"imageDetection": {
//"hash": {...}
"pixel": {
// enable or disable pixel comparisons (disabled by default)
"enable": true,
// if the comparison difference percentage is equal to or less than this value the images are considered the same
//
// if not defined the value from imageDetection.threshold will be used
"threshold": 5
}
},
//...
"name": "ruleWithImageDetectionAndPixelEnabled",
"kind": "recentActivity",
"imageDetection": {
//"hash": {...}
"pixel": {
// enable or disable pixel comparisons (disabled by default)
"enable": true,
// if the comparison difference percentage is equal to or less than this value the images are considered the same
//
// if not defined the value from imageDetection.threshold will be used
"threshold": 5
}
},
//...
}
```
YAML
```yaml
name: ruleWithImageDetectionAndPixelEnabled
kind: recentActivity
imageDetection:
pixel:
enable: true
threshold: 5
```

BIN
docs/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@@ -121,6 +121,16 @@ Below are examples of the minimum required config to run the application using a
Using **FILE**
<details>
YAML
```yaml
bots:
- credentials:
clientId: f4b4df1c7b2
clientSecret: 34v5q1c56ub
refreshToken: 34_f1w1v4
accessToken: p75_1c467b2
```
JSON
```json5
{
"bots": [
@@ -175,6 +185,11 @@ An example of using multiple configuration levels together IE all are provided t
}
}
```
YAML
```yaml
logging:
level: debug
```
</details>
@@ -220,6 +235,30 @@ See the [Architecture Docs](/docs/serverClientArchitecture.md) for more informat
<details>
YAML
```yaml
bots:
- credentials:
clientId: f4b4df1c7b2
clientSecret: 34v5q1c56ub
refreshToken: 34_f1w1v4
accessToken: p75_1c467b2
web:
credentials:
clientId: f4b4df1c7b2
clientSecret: 34v5q1c56ub
redirectUri: 'http://localhost:8085/callback'
clients:
# server application running on this same CM instance
- host: 'localhost:8095'
secret: localSecret
# a server application running somewhere else
- host: 'mySecondContextMod.com:8095'
secret: anotherSecret
api:
secret: localSecret
```
JSON
```json5
{
"bots": [
@@ -289,3 +328,14 @@ A caching object in the json configuration:
}
}
```
YAML
```yaml
provider:
store: memory
ttl: 60
max: 500
host: localhost
port: 6379
auth_pass: null
db: 0
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

4395
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,6 @@
"test": "echo \"Error: no tests installed\" && exit 1",
"build": "tsc",
"start": "node src/index.js run",
"guard": "ts-auto-guard src/JsonConfig.ts",
"schema": "npm run -s schema-app & npm run -s schema-ruleset & npm run -s schema-rule & npm run -s schema-action & npm run -s schema-config",
"schema-app": "typescript-json-schema tsconfig.json JSONConfig --out src/Schema/App.json --required --tsNodeRegister --refs",
"schema-ruleset": "typescript-json-schema tsconfig.json RuleSetJson --out src/Schema/RuleSet.json --required --tsNodeRegister --refs",
@@ -26,14 +25,17 @@
"license": "ISC",
"dependencies": {
"@awaitjs/express": "^0.8.0",
"@googleapis/youtube": "^2.0.0",
"@stdlib/regexp-regexp": "^0.0.6",
"ajv": "^7.2.4",
"ansi-regex": ">=5.0.1",
"async": "^3.2.0",
"autolinker": "^3.14.3",
"body-parser": "^1.19.0",
"cache-manager": "^3.4.4",
"cache-manager-redis-store": "^2.0.0",
"commander": "^8.0.0",
"comment-json": "^4.1.1",
"cookie-parser": "^1.3.5",
"dayjs": "^1.10.5",
"deepmerge": "^4.2.2",
@@ -51,13 +53,11 @@
"he": "^1.2.0",
"http-proxy": "^1.18.1",
"image-size": "^1.0.0",
"js-yaml": "^4.1.0",
"json5": "^2.2.0",
"jsonwebtoken": "^8.5.1",
"leven": "^3.1.0",
"lodash": "^4.17.21",
"lru-cache": "^6.0.0",
"monaco-editor": "^0.27.0",
"mustache": "^4.2.0",
"node-fetch": "^2.6.1",
"normalize-url": "^6.1.0",
@@ -68,20 +68,22 @@
"passport-custom": "^1.1.1",
"passport-jwt": "^4.0.0",
"pixelmatch": "^5.2.1",
"pony-cause": "^1.1.1",
"pretty-print-json": "^1.0.3",
"safe-stable-stringify": "^1.1.1",
"set-random-interval": "^1.1.0",
"snoostorm": "^1.5.2",
"snoowrap": "^1.23.0",
"socket.io": "^4.1.3",
"string-similarity": "^4.0.4",
"tcp-port-used": "^1.0.2",
"triple-beam": "^1.3.0",
"typescript": "^4.3.4",
"webhook-discord": "^3.7.7",
"winston": "FoxxMD/winston#fbab8de969ecee578981c77846156c7f43b5f01e",
"winston": "github:FoxxMD/winston#fbab8de969ecee578981c77846156c7f43b5f01e",
"winston-daily-rotate-file": "^4.5.5",
"winston-duplex": "^0.1.1",
"winston-transport": "^4.4.0",
"yaml": "2.0.0-10",
"zlib": "^1.0.5"
},
"devDependencies": {
@@ -108,11 +110,12 @@
"@types/passport-jwt": "^3.0.6",
"@types/pixelmatch": "^5.2.4",
"@types/sharp": "^0.29.2",
"@types/string-similarity": "^4.0.0",
"@types/tcp-port-used": "^1.0.0",
"@types/triple-beam": "^1.3.2",
"ts-auto-guard": "*",
"ts-essentials": "^9.1.2",
"ts-json-schema-generator": "^0.93.0",
"typescript-json-schema": "^0.50.1"
"typescript-json-schema": "~0.53"
},
"optionalDependencies": {
"sharp": "^0.29.1"

View File

@@ -10,10 +10,11 @@ import ApproveAction, {ApproveActionConfig} from "./ApproveAction";
import BanAction, {BanActionJson} from "./BanAction";
import {MessageAction, MessageActionJson} from "./MessageAction";
import {SubredditResources} from "../Subreddit/SubredditResources";
import Snoowrap from "snoowrap";
import {UserFlairAction, UserFlairActionJson} from './UserFlairAction';
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
export function actionFactory
(config: ActionJson, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Action {
(config: ActionJson, logger: Logger, subredditName: string, resources: SubredditResources, client: ExtendedSnoowrap): Action {
switch (config.kind) {
case 'comment':
return new CommentAction({...config as CommentActionJson, logger, subredditName, resources, client});
@@ -25,6 +26,8 @@ export function actionFactory
return new ReportAction({...config as ReportActionJson, logger, subredditName, resources, client});
case 'flair':
return new FlairAction({...config as FlairActionJson, logger, subredditName, resources, client});
case 'userflair':
return new UserFlairAction({...config as UserFlairActionJson, logger, subredditName, resources, client});
case 'approve':
return new ApproveAction({...config as ApproveActionConfig, logger, subredditName, resources, client});
case 'usernote':

View File

@@ -1,39 +1,84 @@
import {ActionJson, ActionConfig} from "./index";
import {ActionJson, ActionConfig, ActionOptions} from "./index";
import Action from "./index";
import Snoowrap, {Comment, Submission} from "snoowrap";
import Snoowrap from "snoowrap";
import {RuleResult} from "../Rule";
import {ActionProcessResult} from "../Common/interfaces";
import Submission from "snoowrap/dist/objects/Submission";
import Comment from "snoowrap/dist/objects/Comment";
export class ApproveAction extends Action {
targets: ApproveTarget[]
getKind() {
return 'Approve';
}
constructor(options: ApproveOptions) {
super(options);
const {
targets = ['self']
} = options;
this.targets = targets;
}
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
const dryRun = runtimeDryrun || this.dryRun;
//snoowrap typing issue, thinks comments can't be locked
// @ts-ignore
if (item.approved) {
this.logger.warn('Item is already approved');
return {
dryRun,
success: false,
result: 'Item is already approved'
const touchedEntities = [];
const realTargets = item instanceof Submission ? ['self'] : this.targets;
for(const target of realTargets) {
let targetItem = item;
if(target !== 'self' && item instanceof Comment) {
targetItem = await this.resources.getActivity(this.client.getSubmission(item.link_id));
}
// @ts-ignore
if (item.approved) {
const msg = `${target === 'self' ? 'Item' : 'Comment\'s parent Submission'} is already approved`;
this.logger.warn(msg);
return {
dryRun,
success: false,
result: msg
}
}
if (!dryRun) {
// make sure we have an actual item and not just a plain object from cache
if(target !== 'self' && !(targetItem instanceof Submission)) {
// @ts-ignore
targetItem = await this.client.getSubmission((item as Comment).link_id).fetch();
}
// @ts-ignore
touchedEntities.push(await targetItem.approve());
}
}
if (!dryRun) {
// @ts-ignore
await item.approve();
}
return {
dryRun,
success: true,
touchedEntities
}
}
}
export interface ApproveActionConfig extends ActionConfig {
export type ApproveTarget = 'self' | 'parent';
export interface ApproveOptions extends ApproveActionConfig, ActionOptions {}
export interface ApproveActionConfig extends ActionConfig {
/**
* Specify which Activities to approve
*
* This setting is only applicable if the Activity being acted on is a **comment**. On a **submission** the setting does nothing
*
* * self => approve activity being checked (comment)
* * parent => approve parent (submission) of activity being checked (comment)
* */
targets?: ApproveTarget[]
}
/**

View File

@@ -39,6 +39,7 @@ export class BanAction extends Action {
const renderedBody = content === undefined ? undefined : await renderContent(content, item, ruleResults, this.resources.userNotes);
const renderedContent = renderedBody === undefined ? undefined : `${renderedBody}${await this.resources.generateFooter(item, this.footer)}`;
const touchedEntities = [];
let banPieces = [];
banPieces.push(`Message: ${renderedContent === undefined ? 'None' : `${renderedContent.length > 100 ? `\r\n${renderedContent}` : renderedContent}`}`);
banPieces.push(`Reason: ${this.reason || 'None'}`);
@@ -50,18 +51,20 @@ export class BanAction extends Action {
// @ts-ignore
const fetchedSub = await item.subreddit.fetch();
const fetchedName = await item.author.name;
await fetchedSub.banUser({
const bannedUser = await fetchedSub.banUser({
name: fetchedName,
banMessage: renderedContent === undefined ? undefined : renderedContent,
banReason: this.reason,
banNote: this.note,
duration: this.duration
});
touchedEntities.push(bannedUser);
}
return {
dryRun,
success: true,
result: `Banned ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`
result: `Banned ${item.author.name} ${durText}${this.reason !== undefined ? ` (${this.reason})` : ''}`,
touchedEntities
};
}
}

View File

@@ -51,16 +51,19 @@ export class CommentAction extends Action {
result: 'Cannot comment because Item is archived'
};
}
const touchedEntities = [];
let reply: Comment;
if(!dryRun) {
// @ts-ignore
reply = await item.reply(renderedContent);
touchedEntities.push(reply);
}
if (this.lock) {
if (!dryRun) {
// snoopwrap typing issue, thinks comments can't be locked
// @ts-ignore
await item.lock();
touchedEntities.push(item);
}
}
if (this.distinguish && !dryRun) {
@@ -78,7 +81,8 @@ export class CommentAction extends Action {
return {
dryRun,
success: true,
result: `${modifierStr}${this.lock ? ' - Locked Author\'s Activity - ' : ''}${truncateStringToLength(100)(body)}`
result: `${modifierStr}${this.lock ? ' - Locked Author\'s Activity - ' : ''}${truncateStringToLength(100)(body)}`,
touchedEntities,
};
}
}

View File

@@ -11,6 +11,7 @@ export class LockAction extends Action {
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
const dryRun = runtimeDryrun || this.dryRun;
const touchedEntities = [];
//snoowrap typing issue, thinks comments can't be locked
// @ts-ignore
if (item.locked) {
@@ -25,10 +26,12 @@ export class LockAction extends Action {
//snoowrap typing issue, thinks comments can't be locked
// @ts-ignore
await item.lock();
touchedEntities.push(item);
}
return {
dryRun,
success: true
success: true,
touchedEntities
}
}
}

View File

@@ -12,7 +12,8 @@ import {
REDDIT_ENTITY_REGEX_URL,
truncateStringToLength
} from "../util";
import SimpleError from "../Utils/SimpleError";
import {SimpleError} from "../Utils/Errors";
import {ErrorWithCause} from "pony-cause";
export class MessageAction extends Action {
content: string;
@@ -58,17 +59,14 @@ export class MessageAction extends Action {
if(this.to !== undefined) {
// parse to value
try {
const entityData = parseRedditEntity(this.to);
const entityData = parseRedditEntity(this.to, 'user');
if(entityData.type === 'user') {
recipient = entityData.name;
} else {
recipient = `/r/${entityData.name}`;
}
} catch (err) {
this.logger.error(`'to' field for message was not in a valid format. See ${REDDIT_ENTITY_REGEX_URL} for valid examples`);
this.logger.error(err);
err.logged = true;
throw err;
} catch (err: any) {
throw new ErrorWithCause(`'to' field for message was not in a valid format. See ${REDDIT_ENTITY_REGEX_URL} for valid examples`, {cause: err});
}
if(recipient.includes('/r/') && this.asSubreddit) {
throw new SimpleError(`Cannot send a message as a subreddit to another subreddit. Requested recipient: ${recipient}`);

View File

@@ -1,4 +1,4 @@
import {ActionJson, ActionConfig} from "./index";
import {ActionJson, ActionConfig, ActionOptions} from "./index";
import Action from "./index";
import Snoowrap, {Comment, Submission} from "snoowrap";
import {RuleResult} from "../Rule";
@@ -6,12 +6,23 @@ import {activityIsRemoved} from "../Utils/SnoowrapUtils";
import {ActionProcessResult} from "../Common/interfaces";
export class RemoveAction extends Action {
spam: boolean;
getKind() {
return 'Remove';
}
constructor(options: RemoveOptions) {
super(options);
const {
spam = false,
} = options;
this.spam = spam;
}
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
const dryRun = runtimeDryrun || this.dryRun;
const touchedEntities = [];
// issue with snoowrap typings, doesn't think prop exists on Submission
// @ts-ignore
if (activityIsRemoved(item)) {
@@ -21,25 +32,33 @@ export class RemoveAction extends Action {
result: 'Item is already removed',
}
}
if (this.spam) {
this.logger.verbose('Marking as spam on removal');
}
if (!dryRun) {
// @ts-ignore
await item.remove();
await item.remove({spam: this.spam});
touchedEntities.push(item);
}
return {
dryRun,
success: true,
touchedEntities
}
}
}
export interface RemoveActionConfig extends ActionConfig {
export interface RemoveOptions extends RemoveActionConfig, ActionOptions {
}
export interface RemoveActionConfig extends ActionConfig {
spam?: boolean
}
/**
* Remove the Activity
* */
export interface RemoveActionJson extends RemoveActionConfig, ActionJson {
kind: 'remove'
kind: 'remove'
}

View File

@@ -29,15 +29,20 @@ export class ReportAction extends Action {
const renderedContent = await renderContent(content, item, ruleResults, this.resources.userNotes);
this.logger.verbose(`Contents:\r\n${renderedContent}`);
const truncatedContent = reportTrunc(renderedContent);
const touchedEntities = [];
if(!dryRun) {
// @ts-ignore
await item.report({reason: truncatedContent});
// due to reddit not updating this in response (maybe)?? just increment stale activity
item.num_reports++;
touchedEntities.push(item);
}
return {
dryRun,
success: true,
result: truncatedContent
result: truncatedContent,
touchedEntities
};
}
}

View File

@@ -1,20 +1,23 @@
import {SubmissionActionConfig} from "./index";
import Action, {ActionJson, ActionOptions} from "../index";
import Snoowrap, {Comment, Submission} from "snoowrap";
import {RuleResult} from "../../Rule";
import {ActionProcessResult} from "../../Common/interfaces";
import Submission from 'snoowrap/dist/objects/Submission';
import Comment from 'snoowrap/dist/objects/Comment';
export class FlairAction extends Action {
text: string;
css: string;
flair_template_id: string;
constructor(options: FlairActionOptions) {
super(options);
if (options.text === undefined && options.css === undefined) {
throw new Error('Must define either text or css on FlairAction');
if (options.text === undefined && options.css === undefined && options.flair_template_id === undefined) {
throw new Error('Must define either text+css or flair_template_id on FlairAction');
}
this.text = options.text || '';
this.css = options.css || '';
this.flair_template_id = options.flair_template_id || '';
}
getKind() {
@@ -34,8 +37,12 @@ export class FlairAction extends Action {
this.logger.verbose(flairSummary);
if (item instanceof Submission) {
if(!this.dryRun) {
// @ts-ignore
await item.assignFlair({text: this.text, cssClass: this.css})
if (this.flair_template_id) {
await item.selectFlair({flair_template_id: this.flair_template_id}).then(() => {});
} else {
await item.assignFlair({text: this.text, cssClass: this.css}).then(() => {});
}
}
} else {
this.logger.warn('Cannot flair Comment');
@@ -60,12 +67,16 @@ export class FlairAction extends Action {
export interface FlairActionConfig extends SubmissionActionConfig {
/**
* The text of the flair to apply
* */
* */
text?: string,
/**
* The text of the css class of the flair to apply
* */
css?: string,
/**
* Flair template ID to assign
* */
flair_template_id?: string,
}
export interface FlairActionOptions extends FlairActionConfig,ActionOptions {
@@ -76,5 +87,5 @@ export interface FlairActionOptions extends FlairActionConfig,ActionOptions {
* Flair the Submission
* */
export interface FlairActionJson extends FlairActionConfig, ActionJson {
kind: 'flair'
kind: 'flair'
}

View File

@@ -0,0 +1,109 @@
import Action, {ActionConfig, ActionJson, ActionOptions} from './index';
import {Comment, RedditUser, Submission} from 'snoowrap';
import {RuleResult} from '../Rule';
import {ActionProcessResult} from '../Common/interfaces';
export class UserFlairAction extends Action {
text?: string;
css?: string;
flair_template_id?: string;
constructor(options: UserFlairActionOptions) {
super(options);
this.text = options.text === null || options.text === '' ? undefined : options.text;
this.css = options.css === null || options.css === '' ? undefined : options.css;
this.flair_template_id = options.flair_template_id === null || options.flair_template_id === '' ? undefined : options.flair_template_id;
}
getKind() {
return 'User Flair';
}
async process(item: Comment | Submission, ruleResults: RuleResult[], runtimeDryrun?: boolean): Promise<ActionProcessResult> {
const dryRun = runtimeDryrun || this.dryRun;
let flairParts = [];
if (this.flair_template_id !== undefined) {
flairParts.push(`Flair template ID: ${this.flair_template_id}`)
if(this.text !== undefined || this.css !== undefined) {
this.logger.warn('Text/CSS properties will be ignored since a flair template is specified');
}
} else {
if (this.text !== undefined) {
flairParts.push(`Text: ${this.text}`);
}
if (this.css !== undefined) {
flairParts.push(`CSS: ${this.css}`);
}
}
const flairSummary = flairParts.length === 0 ? 'Unflair user' : flairParts.join(' | ');
this.logger.verbose(flairSummary);
if (!this.dryRun) {
if (this.flair_template_id !== undefined) {
try {
// @ts-ignore
await this.client.assignUserFlairByTemplateId({
subredditName: item.subreddit.display_name,
flairTemplateId: this.flair_template_id,
username: item.author.name,
});
} catch (err: any) {
this.logger.error('Either the flair template ID is incorrect or you do not have permission to access it.');
throw err;
}
} else if (this.text === undefined && this.css === undefined) {
// @ts-ignore
await item.subreddit.deleteUserFlair(item.author.name);
} else {
// @ts-ignore
await item.author.assignFlair({
subredditName: item.subreddit.display_name,
cssClass: this.css,
text: this.text,
});
}
}
return {
dryRun,
success: true,
result: flairSummary,
}
}
}
/**
* Flair the Author of an Activity
*
* Leave all properties blank or null to remove a User's existing flair
* */
export interface UserFlairActionConfig extends ActionConfig {
/**
* The text of the flair to apply
* */
text?: string,
/**
* The text of the css class of the flair to apply
* */
css?: string,
/**
* Flair template to pick.
*
* **Note:** If this template is used text/css are ignored
* */
flair_template_id?: string;
}
export interface UserFlairActionOptions extends UserFlairActionConfig, ActionOptions {
}
/**
* Flair the Submission
* */
export interface UserFlairActionJson extends UserFlairActionConfig, ActionJson {
kind: 'userflair'
}

View File

@@ -1,17 +1,18 @@
import Snoowrap, {Comment, Submission} from "snoowrap";
import {Comment, Submission} from "snoowrap";
import {Logger} from "winston";
import {RuleResult} from "../Rule";
import {SubredditResources} from "../Subreddit/SubredditResources";
import {checkAuthorFilter, SubredditResources} from "../Subreddit/SubredditResources";
import {ActionProcessResult, ActionResult, ChecksActivityState, TypedActivityStates} from "../Common/interfaces";
import Author, {AuthorOptions} from "../Author/Author";
import {mergeArr} from "../util";
import LoggedError from "../Utils/LoggedError";
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
export abstract class Action {
name?: string;
logger: Logger;
resources: SubredditResources;
client: Snoowrap
client: ExtendedSnoowrap;
authorIs: AuthorOptions;
itemIs: TypedActivityStates;
dryRun: boolean;
@@ -27,6 +28,7 @@ export abstract class Action {
subredditName,
dryRun = false,
authorIs: {
excludeCondition = 'OR',
include = [],
exclude = [],
} = {},
@@ -41,6 +43,7 @@ export abstract class Action {
this.logger = logger.child({labels: [`Action ${this.getActionUniqueName()}`]}, mergeArr);
this.authorIs = {
excludeCondition,
exclude: exclude.map(x => new Author(x)),
include: include.map(x => new Author(x)),
}
@@ -71,34 +74,17 @@ export abstract class Action {
actRes.runReason = `Activity did not pass 'itemIs' test, Action not run`;
return actRes;
}
if (this.authorIs.include !== undefined && this.authorIs.include.length > 0) {
for (const auth of this.authorIs.include) {
if (await this.resources.testAuthorCriteria(item, auth)) {
actRes.run = true;
const results = await this.process(item, ruleResults, runtimeDryrun);
return {...actRes, ...results};
}
}
this.logger.verbose('Inclusive author criteria not matched, Action not run');
actRes.runReason = 'Inclusive author criteria not matched';
return actRes;
} else if (this.authorIs.exclude !== undefined && this.authorIs.exclude.length > 0) {
for (const auth of this.authorIs.exclude) {
if (await this.resources.testAuthorCriteria(item, auth, false)) {
actRes.run = true;
const results = await this.process(item, ruleResults, runtimeDryrun);
return {...actRes, ...results};
}
}
this.logger.verbose('Exclusive author criteria not matched, Action not run');
actRes.runReason = 'Exclusive author criteria not matched';
const [authFilterResult, authFilterType] = await checkAuthorFilter(item, this.authorIs, this.resources, this.logger);
if(!authFilterResult) {
this.logger.verbose(`${authFilterType} author criteria not matched, Action not run`);
actRes.runReason = `${authFilterType} author criteria not matched`;
return actRes;
}
actRes.run = true;
const results = await this.process(item, ruleResults, runtimeDryrun);
return {...actRes, ...results};
} catch (err) {
} catch (err: any) {
if(!(err instanceof LoggedError)) {
this.logger.error(`Encountered error while running`, err);
}
@@ -114,8 +100,8 @@ export abstract class Action {
export interface ActionOptions extends ActionConfig {
logger: Logger;
subredditName: string;
resources: SubredditResources
client: Snoowrap
resources: SubredditResources;
client: ExtendedSnoowrap;
}
export interface ActionConfig extends ChecksActivityState {
@@ -162,7 +148,7 @@ export interface ActionJson extends ActionConfig {
/**
* The type of action that will be performed
*/
kind: 'comment' | 'lock' | 'remove' | 'report' | 'approve' | 'ban' | 'flair' | 'usernote' | 'message'
kind: 'comment' | 'lock' | 'remove' | 'report' | 'approve' | 'ban' | 'flair' | 'usernote' | 'message' | 'userflair'
}
export const isActionJson = (obj: object): obj is ActionJson => {

View File

@@ -1,7 +1,7 @@
import winston, {Logger} from "winston";
import dayjs, {Dayjs} from "dayjs";
import {getLogger} from "./Utils/loggerFactory";
import {Invokee, OperatorConfig} from "./Common/interfaces";
import {Invokee, OperatorConfig, OperatorConfigWithFileContext, OperatorFileConfig} from "./Common/interfaces";
import Bot from "./Bot";
import LoggedError from "./Utils/LoggedError";
import {sleep} from "./util";
@@ -14,7 +14,10 @@ export class App {
error: any;
constructor(config: OperatorConfig) {
config: OperatorConfig;
fileConfig: OperatorFileConfig;
constructor(config: OperatorConfigWithFileContext) {
const {
operator: {
name,
@@ -23,6 +26,11 @@ export class App {
bots = [],
} = config;
const {fileConfig, ...rest} = config;
this.config = rest;
this.fileConfig = fileConfig;
this.logger = getLogger(config.logging);
this.logger.info(`Operators: ${name.length === 0 ? 'None Specified' : name.join(', ')}`)
@@ -74,7 +82,7 @@ export class App {
this.logger.error(err);
}
});
} catch (err) {
} catch (err: any) {
if (b.error === undefined) {
b.error = err.message;
}

View File

@@ -1,5 +1,5 @@
import {UserNoteCriteria} from "../Rule";
import {CompareValue, CompareValueOrPercent, DurationComparor} from "../Common/interfaces";
import {CompareValue, CompareValueOrPercent, DurationComparor, JoinOperands} from "../Common/interfaces";
import {parseStringToRegex} from "../util";
/**
@@ -12,7 +12,17 @@ export interface AuthorOptions {
* */
include?: AuthorCriteria[];
/**
* Only runs if `include` is not present. Will "pass" if any of set of the AuthorCriteria **does not** pass
* * OR => if ANY exclude condition "does not" pass then the exclude test passes
* * AND => if ALL exclude conditions "do not" pass then the exclude test passes
*
* Defaults to OR
* @default OR
* */
excludeCondition?: JoinOperands
/**
* Only runs if `include` is not present. Each AuthorCriteria is comprised of conditions that the Author being checked must "not" pass. See excludeCondition for set behavior
*
* EX: `isMod: true, name: Automoderator` => Will pass if the Author IS NOT a mod and IS NOT named Automoderator
* */
exclude?: AuthorCriteria[];
}
@@ -36,15 +46,20 @@ export interface AuthorCriteria {
* */
name?: string[],
/**
* A list of (user) flair css class values from the subreddit to match against
* A (user) flair css class (or list of) from the subreddit to match against
* @examples ["red"]
* */
flairCssClass?: string[],
flairCssClass?: string | string[],
/**
* A list of (user) flair text values from the subreddit to match against
* A (user) flair text value (or list of) from the subreddit to match against
* @examples ["Approved"]
* */
flairText?: string[],
flairText?: string | string[],
/**
* A (user) flair template id (or list of) from the subreddit to match against
* */
flairTemplate?: string | string[]
/**
* Is the author a moderator?
* */
@@ -136,8 +151,12 @@ export class Author implements AuthorCriteria {
constructor(options: AuthorCriteria) {
this.name = options.name;
this.flairCssClass = options.flairCssClass;
this.flairText = options.flairText;
if(options.flairCssClass !== undefined) {
this.flairCssClass = typeof options.flairCssClass === 'string' ? [options.flairCssClass] : options.flairCssClass;
}
if(options.flairText !== undefined) {
this.flairText = typeof options.flairText === 'string' ? [options.flairText] : options.flairText;
}
this.isMod = options.isMod;
this.userNotes = options.userNotes;
this.age = options.age;

View File

@@ -3,23 +3,35 @@ import {Logger} from "winston";
import dayjs, {Dayjs} from "dayjs";
import {Duration} from "dayjs/plugin/duration";
import EventEmitter from "events";
import {BotInstanceConfig, Invokee, PAUSED, RUNNING, STOPPED, SYSTEM, USER} from "../Common/interfaces";
import {
BotInstanceConfig,
FilterCriteriaDefaults,
Invokee,
PAUSED,
PollOn,
RUNNING,
STOPPED,
SYSTEM,
USER
} from "../Common/interfaces";
import {
createRetryHandler,
formatNumber,
formatNumber, getExceptionMessage,
mergeArr,
parseBool,
parseDuration,
parseSubredditName,
parseDuration, parseMatchMessage,
parseSubredditName, RetryOptions,
sleep,
snooLogWrapper
} from "../util";
import {Manager} from "../Subreddit/Manager";
import {ExtendedSnoowrap, ProxiedSnoowrap} from "../Utils/SnoowrapClients";
import {ModQueueStream, UnmoderatedStream} from "../Subreddit/Streams";
import {CommentStream, ModQueueStream, SPoll, SubmissionStream, UnmoderatedStream} from "../Subreddit/Streams";
import {BotResourcesManager} from "../Subreddit/SubredditResources";
import LoggedError from "../Utils/LoggedError";
import pEvent from "p-event";
import {SimpleError, isRateLimitError, isRequestError, isScopeError, isStatusError, CMError} from "../Utils/Errors";
import {ErrorWithCause} from "pony-cause";
class Bot {
@@ -31,6 +43,7 @@ class Bot {
running: boolean = false;
subreddits: string[];
excludeSubreddits: string[];
filterCriteriaDefaults?: FilterCriteriaDefaults
subManagers: Manager[] = [];
heartbeatInterval: number;
nextHeartbeat: Dayjs = dayjs();
@@ -41,16 +54,20 @@ class Bot {
nannyMode?: 'soft' | 'hard';
nannyRunning: boolean = false;
nextNannyCheck: Dayjs = dayjs().add(10, 'second');
sharedStreamRetryHandler: Function;
nannyRetryHandler: Function;
managerRetryHandler: Function;
nextExpiration: Dayjs = dayjs();
botName?: string;
botLink?: string;
botAccount?: string;
maxWorkers: number;
startedAt: Dayjs = dayjs();
sharedModqueue: boolean = false;
sharedStreams: PollOn[] = [];
streamListedOnce: string[] = [];
stagger: number;
apiSample: number[] = [];
apiRollingAvg: number = 0;
apiEstDepletion?: Duration;
@@ -73,6 +90,7 @@ class Bot {
const {
notifications,
name,
filterCriteriaDefaults,
subreddits: {
names = [],
exclude = [],
@@ -81,18 +99,20 @@ class Bot {
heartbeatInterval,
},
credentials: {
clientId,
clientSecret,
refreshToken,
accessToken,
reddit: {
clientId,
clientSecret,
refreshToken,
accessToken,
},
},
snoowrap: {
proxy,
debug,
},
polling: {
sharedMod,
stagger,
shared = [],
stagger = 2000,
},
queue: {
maxWorkers,
@@ -116,7 +136,8 @@ class Bot {
this.hardLimit = hardLimit;
this.wikiLocation = wikiConfig;
this.heartbeatInterval = heartbeatInterval;
this.sharedModqueue = sharedMod;
this.filterCriteriaDefaults = filterCriteriaDefaults;
this.sharedStreams = shared;
if(name !== undefined) {
this.botName = name;
}
@@ -171,64 +192,23 @@ class Bot {
this.client = proxy === undefined ? new ExtendedSnoowrap(creds) : new ProxiedSnoowrap({...creds, proxy});
this.client.config({
warnings: true,
maxRetryAttempts: 5,
maxRetryAttempts: 2,
debug,
logger: snooLogWrapper(this.logger.child({labels: ['Snoowrap']}, mergeArr)),
continueAfterRatelimitError: true,
continueAfterRatelimitError: false,
});
} catch (err) {
} catch (err: any) {
if(this.error === undefined) {
this.error = err.message;
this.logger.error(err);
}
}
const retryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 1}, this.logger);
this.sharedStreamRetryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 2}, this.logger);
this.nannyRetryHandler = createRetryHandler({maxRequestRetry: 5, maxOtherRetry: 1}, this.logger);
this.managerRetryHandler = createRetryHandler({maxRequestRetry: 8, maxOtherRetry: 8, waitOnRetry: false, clearRetryCountAfter: 2}, this.logger);
const modStreamErrorListener = (name: string) => async (err: any) => {
this.logger.error('Polling error occurred', err);
const shouldRetry = await retryHandler(err);
if(shouldRetry) {
defaultUnmoderatedStream.startInterval();
} else {
for(const m of this.subManagers) {
if(m.modStreamCallbacks.size > 0) {
m.notificationManager.handle('runStateChanged', `${name.toUpperCase()} Polling Stopped`, 'Encountered too many errors from Reddit while polling. Will try to restart on next heartbeat.');
}
}
this.logger.error(`Mod stream ${name.toUpperCase()} encountered too many errors while polling. Will try to restart on next heartbeat.`);
}
}
const modStreamListingListener = (name: string) => async (listing: (Comment|Submission)[]) => {
// dole out in order they were received
if(!this.streamListedOnce.includes(name)) {
this.streamListedOnce.push(name);
return;
}
for(const i of listing) {
const foundManager = this.subManagers.find(x => x.subreddit.display_name === i.subreddit.display_name && x.modStreamCallbacks.get(name) !== undefined);
if(foundManager !== undefined) {
foundManager.modStreamCallbacks.get(name)(i);
if(stagger !== undefined) {
await sleep(stagger);
}
}
}
}
const defaultUnmoderatedStream = new UnmoderatedStream(this.client, {subreddit: 'mod', limit: 100, clearProcessed: { size: 100, retain: 100 }});
// @ts-ignore
defaultUnmoderatedStream.on('error', modStreamErrorListener('unmoderated'));
defaultUnmoderatedStream.on('listing', modStreamListingListener('unmoderated'));
const defaultModqueueStream = new ModQueueStream(this.client, {subreddit: 'mod', limit: 100, clearProcessed: { size: 100, retain: 100 }});
// @ts-ignore
defaultModqueueStream.on('error', modStreamErrorListener('modqueue'));
defaultModqueueStream.on('listing', modStreamListingListener('modqueue'));
this.cacheManager.modStreams.set('unmoderated', defaultUnmoderatedStream);
this.cacheManager.modStreams.set('modqueue', defaultModqueueStream);
this.stagger = stagger ?? 2000;
process.on('uncaughtException', (e) => {
this.error = e;
@@ -253,33 +233,62 @@ class Bot {
});
}
createSharedStreamErrorListener = (name: string) => async (err: any) => {
const shouldRetry = await this.sharedStreamRetryHandler(err);
if(shouldRetry) {
(this.cacheManager.modStreams.get(name) as SPoll<any>).startInterval(false, 'Within retry limits');
} else {
for(const m of this.subManagers) {
if(m.sharedStreamCallbacks.size > 0) {
m.notificationManager.handle('runStateChanged', `${name.toUpperCase()} Polling Stopped`, 'Encountered too many errors from Reddit while polling. Will try to restart on next heartbeat.');
}
}
this.logger.error(`Mod stream ${name.toUpperCase()} encountered too many errors while polling. Will try to restart on next heartbeat.`);
}
}
createSharedStreamListingListener = (name: string) => async (listing: (Comment|Submission)[]) => {
// dole out in order they were received
if(!this.streamListedOnce.includes(name)) {
this.streamListedOnce.push(name);
return;
}
for(const i of listing) {
const foundManager = this.subManagers.find(x => x.subreddit.display_name === i.subreddit.display_name && x.sharedStreamCallbacks.get(name) !== undefined && x.eventsState.state === RUNNING);
if(foundManager !== undefined) {
foundManager.sharedStreamCallbacks.get(name)(i);
if(this.stagger !== undefined) {
await sleep(this.stagger);
}
}
}
}
async onTerminate(reason = 'The application was shutdown') {
for(const m of this.subManagers) {
await m.notificationManager.handle('runStateChanged', 'Application Shutdown', reason);
}
}
async testClient() {
async testClient(initial = true) {
try {
// @ts-ignore
await this.client.getMe();
this.logger.info('Test API call successful');
} catch (err) {
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
if(err.name === 'StatusCodeError') {
const authHeader = err.response.headers['www-authenticate'];
if (authHeader !== undefined && authHeader.includes('insufficient_scope')) {
this.logger.error('Reddit responded with a 403 insufficient_scope. Please ensure you have chosen the correct scopes when authorizing your account.');
} else if(err.statusCode === 401) {
this.logger.error('It is likely a credential is missing or incorrect. Check clientId, clientSecret, refreshToken, and accessToken');
}
this.logger.error(`Error Message: ${err.message}`);
} else {
this.logger.error(err);
} catch (err: any) {
if (initial) {
this.logger.error('An error occurred while trying to initialize the Reddit API Client which would prevent the entire application from running.');
}
this.error = `Error occurred while testing Reddit API client: ${err.message}`;
err.logged = true;
throw err;
const hint = getExceptionMessage(err, {
401: 'Likely a credential is missing or incorrect. Check clientId, clientSecret, refreshToken, and accessToken',
400: 'Credentials may have been invalidated manually or by reddit due to behavior',
});
let msg = `Error occurred while testing Reddit API client${hint !== undefined ? `: ${hint}` : ''}`;
this.error = msg;
const clientError = new CMError(msg, {cause: err});
clientError.logged = true;
this.logger.error(clientError);
throw clientError;
}
}
@@ -298,10 +307,12 @@ class Bot {
}
this.logger.info(`Bot Name${botNameFromConfig ? ' (from config)' : ''}: ${this.botName}`);
for (const sub of await this.client.getModeratedSubreddits()) {
// TODO don't know a way to check permissions yet
availSubs.push(sub);
let subListing = await this.client.getModeratedSubreddits({count: 100});
while(!subListing.isFinished) {
subListing = await subListing.fetchMore({amount: 100});
}
availSubs = subListing.filter(x => x.display_name !== `u_${user.name}`);
this.logger.info(`u/${user.name} is a moderator of these subreddits: ${availSubs.map(x => x.display_name_prefixed).join(', ')}`);
let subsToRun: Subreddit[] = [];
@@ -320,30 +331,177 @@ class Bot {
}
} else {
if(this.excludeSubreddits.length > 0) {
this.logger.info(`Will run on all moderated subreddits but user-defined excluded: ${this.excludeSubreddits.join(', ')}`);
this.logger.info(`Will run on all moderated subreddits but own profile and user-defined excluded: ${this.excludeSubreddits.join(', ')}`);
const normalExcludes = this.excludeSubreddits.map(x => x.toLowerCase());
subsToRun = availSubs.filter(x => !normalExcludes.includes(x.display_name.toLowerCase()));
} else {
this.logger.info(`No user-defined subreddit constraints detected, will run on all moderated subreddits EXCEPT own profile (${this.botAccount})`);
subsToRun = availSubs.filter(x => x.display_name_prefixed !== this.botAccount);
subsToRun = availSubs;
}
}
let subSchedule: Manager[] = [];
// get configs for subs we want to run on and build/validate them
for (const sub of subsToRun) {
const manager = new Manager(sub, this.client, this.logger, this.cacheManager, {dryRun: this.dryRun, sharedModqueue: this.sharedModqueue, wikiLocation: this.wikiLocation, botName: this.botName, maxWorkers: this.maxWorkers});
try {
await manager.parseConfiguration('system', true, {suppressNotification: true});
} catch (err) {
if (!(err instanceof LoggedError)) {
this.logger.error(`Config was not valid:`, {subreddit: sub.display_name_prefixed});
this.logger.error(err, {subreddit: sub.display_name_prefixed});
}
this.subManagers.push(this.createManager(sub));
} catch (err: any) {
}
}
for(const m of this.subManagers) {
try {
await this.initManager(m);
} catch (err: any) {
}
}
this.parseSharedStreams();
}
parseSharedStreams() {
const sharedCommentsSubreddits = !this.sharedStreams.includes('newComm') ? [] : this.subManagers.filter(x => x.isPollingShared('newComm')).map(x => x.subreddit.display_name);
if (sharedCommentsSubreddits.length > 0) {
const stream = this.cacheManager.modStreams.get('newComm');
if (stream === undefined || stream.subreddit !== sharedCommentsSubreddits.join('+')) {
let processed;
if (stream !== undefined) {
this.logger.info('Restarting SHARED COMMENT STREAM due to a subreddit config change');
stream.end('Replacing with a new stream with updated subreddits');
processed = stream.processed;
}
if (sharedCommentsSubreddits.length > 100) {
this.logger.warn(`SHARED COMMENT STREAM => Reddit can only combine 100 subreddits for getting new Comments but this bot has ${sharedCommentsSubreddits.length}`);
}
const defaultCommentStream = new CommentStream(this.client, {
subreddit: sharedCommentsSubreddits.join('+'),
limit: 100,
enforceContinuity: true,
logger: this.logger,
processed,
label: 'Shared Polling'
});
// @ts-ignore
defaultCommentStream.on('error', this.createSharedStreamErrorListener('newComm'));
defaultCommentStream.on('listing', this.createSharedStreamListingListener('newComm'));
this.cacheManager.modStreams.set('newComm', defaultCommentStream);
}
} else {
const stream = this.cacheManager.modStreams.get('newComm');
if (stream !== undefined) {
stream.end('Determined no managers are listening on shared stream parsing');
}
}
const sharedSubmissionsSubreddits = !this.sharedStreams.includes('newSub') ? [] : this.subManagers.filter(x => x.isPollingShared('newSub')).map(x => x.subreddit.display_name);
if (sharedSubmissionsSubreddits.length > 0) {
const stream = this.cacheManager.modStreams.get('newSub');
if (stream === undefined || stream.subreddit !== sharedSubmissionsSubreddits.join('+')) {
let processed;
if (stream !== undefined) {
this.logger.info('Restarting SHARED SUBMISSION STREAM due to a subreddit config change');
stream.end('Replacing with a new stream with updated subreddits');
processed = stream.processed;
}
if (sharedSubmissionsSubreddits.length > 100) {
this.logger.warn(`SHARED SUBMISSION STREAM => Reddit can only combine 100 subreddits for getting new Submissions but this bot has ${sharedSubmissionsSubreddits.length}`);
}
const defaultSubStream = new SubmissionStream(this.client, {
subreddit: sharedSubmissionsSubreddits.join('+'),
limit: 100,
enforceContinuity: true,
logger: this.logger,
processed,
label: 'Shared Polling'
});
// @ts-ignore
defaultSubStream.on('error', this.createSharedStreamErrorListener('newSub'));
defaultSubStream.on('listing', this.createSharedStreamListingListener('newSub'));
this.cacheManager.modStreams.set('newSub', defaultSubStream);
}
} else {
const stream = this.cacheManager.modStreams.get('newSub');
if (stream !== undefined) {
stream.end('Determined no managers are listening on shared stream parsing');
}
}
const isUnmoderatedShared = !this.sharedStreams.includes('unmoderated') ? false : this.subManagers.some(x => x.isPollingShared('unmoderated'));
const unmoderatedstream = this.cacheManager.modStreams.get('unmoderated');
if (isUnmoderatedShared && unmoderatedstream === undefined) {
const defaultUnmoderatedStream = new UnmoderatedStream(this.client, {
subreddit: 'mod',
limit: 100,
logger: this.logger,
label: 'Shared Polling'
});
// @ts-ignore
defaultUnmoderatedStream.on('error', this.createSharedStreamErrorListener('unmoderated'));
defaultUnmoderatedStream.on('listing', this.createSharedStreamListingListener('unmoderated'));
this.cacheManager.modStreams.set('unmoderated', defaultUnmoderatedStream);
} else if (!isUnmoderatedShared && unmoderatedstream !== undefined) {
unmoderatedstream.end('Determined no managers are listening on shared stream parsing');
}
const isModqueueShared = !this.sharedStreams.includes('modqueue') ? false : this.subManagers.some(x => x.isPollingShared('modqueue'));
const modqueuestream = this.cacheManager.modStreams.get('modqueue');
if (isModqueueShared && modqueuestream === undefined) {
const defaultModqueueStream = new ModQueueStream(this.client, {
subreddit: 'mod',
limit: 100,
logger: this.logger,
label: 'Shared Polling'
});
// @ts-ignore
defaultModqueueStream.on('error', this.createSharedStreamErrorListener('modqueue'));
defaultModqueueStream.on('listing', this.createSharedStreamListingListener('modqueue'));
this.cacheManager.modStreams.set('modqueue', defaultModqueueStream);
} else if (isModqueueShared && modqueuestream !== undefined) {
modqueuestream.end('Determined no managers are listening on shared stream parsing');
}
}
async initManager(manager: Manager) {
try {
await manager.parseConfiguration('system', true, {suppressNotification: true, suppressChangeEvent: true});
} catch (err: any) {
if(err.logged !== true) {
const normalizedError = new ErrorWithCause(`Bot could not start manager because config was not valid`, {cause: err});
// @ts-ignore
this.logger.error(normalizedError, {subreddit: manager.subreddit.display_name_prefixed});
} else {
this.logger.error('Bot could not start manager because config was not valid', {subreddit: manager.subreddit.display_name_prefixed});
}
}
}
createManager(sub: Subreddit): Manager {
const manager = new Manager(sub, this.client, this.logger, this.cacheManager, {
dryRun: this.dryRun,
sharedStreams: this.sharedStreams,
wikiLocation: this.wikiLocation,
botName: this.botName as string,
maxWorkers: this.maxWorkers,
filterCriteriaDefaults: this.filterCriteriaDefaults,
});
// all errors from managers will count towards bot-level retry count
manager.on('error', async (err) => await this.panicOnRetries(err));
manager.on('configChange', async () => {
this.parseSharedStreams();
await this.runSharedStreams(false);
});
return manager;
}
// if the cumulative errors exceeds configured threshold then stop ALL managers as there is most likely something very bad happening
async panicOnRetries(err: any) {
if(!await this.managerRetryHandler(err)) {
this.logger.warn('Bot detected too many errors from managers within a short time. Stopping all managers and will try to restart on next heartbeat.');
for(const m of this.subManagers) {
await m.stop('system',{reason: 'Bot detected too many errors from all managers. Stopping all manager as a failsafe.'});
}
subSchedule.push(manager);
}
this.subManagers = subSchedule;
}
async destroy(causedBy: Invokee) {
@@ -357,14 +515,49 @@ class Bot {
this.logger.info('Bot is stopped.');
}
async runModStreams(notify = false) {
async checkModInvites() {
const subs: string[] = await this.cacheManager.getPendingSubredditInvites();
for (const name of subs) {
try {
// @ts-ignore
await this.client.getSubreddit(name).acceptModeratorInvite();
this.logger.info(`Accepted moderator invite for r/${name}!`);
await this.cacheManager.deletePendingSubredditInvite(name);
// @ts-ignore
const sub = await this.client.getSubreddit(name);
this.logger.info(`Attempting to add manager for r/${name}`);
try {
const manager = this.createManager(sub);
this.logger.info(`Starting manager for r/${name}`);
this.subManagers.push(manager);
await this.initManager(manager);
await manager.start('system', {reason: 'Caused by creation due to moderator invite'});
await this.runSharedStreams();
} catch (err: any) {
if (!(err instanceof LoggedError)) {
this.logger.error(err);
}
}
} catch (err: any) {
if (err.message.includes('NO_INVITE_FOUND')) {
this.logger.warn(`No pending moderation invite for r/${name} was found`);
} else if (isStatusError(err) && err.statusCode === 403) {
this.logger.error(`Error occurred while checking r/${name} for a pending moderation invite. It is likely that this bot does not have the 'modself' oauth permission. Error: ${err.message}`);
} else {
this.logger.error(`Error occurred while checking r/${name} for a pending moderation invite. Error: ${err.message}`);
}
}
}
}
async runSharedStreams(notify = false) {
for(const [k,v] of this.cacheManager.modStreams) {
if(!v.running && this.subManagers.some(x => x.modStreamCallbacks.get(k) !== undefined)) {
if(!v.running && this.subManagers.some(x => x.sharedStreamCallbacks.get(k) !== undefined)) {
v.startInterval();
this.logger.info(`Starting default ${k.toUpperCase()} mod stream`);
this.logger.info(`Starting ${k.toUpperCase()} shared polling`);
if(notify) {
for(const m of this.subManagers) {
if(m.modStreamCallbacks.size > 0) {
if(m.sharedStreamCallbacks.size > 0) {
await m.notificationManager.handle('runStateChanged', `${k.toUpperCase()} Polling Started`, 'Polling was successfully restarted on heartbeat.');
}
}
@@ -375,6 +568,8 @@ class Bot {
}
async runManagers(causedBy: Invokee = 'system') {
this.running = true;
if(this.subManagers.every(x => !x.validConfigLoaded)) {
this.logger.warn('All managers have invalid configs!');
this.error = 'All managers have invalid configs';
@@ -382,15 +577,15 @@ class Bot {
for (const manager of this.subManagers) {
if (manager.validConfigLoaded && manager.botState.state !== RUNNING) {
await manager.start(causedBy, {reason: 'Caused by application startup'});
await sleep(2000);
await sleep(this.stagger);
}
}
await this.runModStreams();
await this.runSharedStreams();
this.running = true;
this.nextNannyCheck = dayjs().add(10, 'second');
this.nextHeartbeat = dayjs().add(this.heartbeatInterval, 'second');
await this.checkModInvites();
await this.healthLoop();
}
@@ -404,15 +599,16 @@ class Bot {
try {
await this.runApiNanny();
this.nextNannyCheck = dayjs().add(10, 'second');
} catch (err) {
this.logger.info('Delaying next nanny check for 1 minute due to emitted error');
this.nextNannyCheck = dayjs().add(120, 'second');
} catch (err: any) {
this.logger.info('Delaying next nanny check for 4 minutes due to emitted error');
this.nextNannyCheck = dayjs().add(240, 'second');
}
}
if(dayjs().isSameOrAfter(this.nextHeartbeat)) {
try {
await this.heartbeat();
} catch (err) {
await this.checkModInvites();
} catch (err: any) {
this.logger.error(`Error occurred during heartbeat check: ${err.message}`);
}
this.nextHeartbeat = dayjs().add(this.heartbeatInterval, 'second');
@@ -424,20 +620,39 @@ class Bot {
async heartbeat() {
const heartbeat = `HEARTBEAT -- API Remaining: ${this.client.ratelimitRemaining} | Usage Rolling Avg: ~${formatNumber(this.apiRollingAvg)}/s | Est Depletion: ${this.apiEstDepletion === undefined ? 'N/A' : this.apiEstDepletion.humanize()} (${formatNumber(this.depletedInSecs, {toFixed: 0})} seconds)`
this.logger.info(heartbeat);
// run sanity check to see if there is a service issue
try {
await this.testClient(false);
} catch (err: any) {
throw new SimpleError(`Something isn't right! This could be a Reddit API issue (service is down? buggy??) or an issue with the Bot account. Will not run heartbeat operations and will wait until next heartbeat (${dayjs.duration(this.nextHeartbeat.diff(dayjs())).humanize()}) to try again`);
}
let startedAny = false;
for (const s of this.subManagers) {
if(s.botState.state === STOPPED && s.botState.causedBy === USER) {
this.logger.debug('Skipping config check/restart on heartbeat due to previously being stopped by user', {subreddit: s.displayLabel});
continue;
}
try {
// ensure calls to wiki page are also staggered so we aren't hitting api hard when bot has a ton of subreddits to check
await sleep(this.stagger);
const newConfig = await s.parseConfiguration();
if(newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM))
{
await s.startQueue('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running queue'});
}
if(newConfig || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM))
{
await s.startEvents('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running events'});
const willStart = newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM) || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM);
if(willStart) {
// stagger restart
if (startedAny) {
await sleep(this.stagger);
}
startedAny = true;
if(newConfig || (s.queueState.state !== RUNNING && s.queueState.causedBy === SYSTEM))
{
await s.startQueue('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running queue'});
}
if(newConfig || (s.eventsState.state !== RUNNING && s.eventsState.causedBy === SYSTEM))
{
await s.startEvents('system', {reason: newConfig ? 'Config updated on heartbeat triggered reload' : 'Heartbeat detected non-running events'});
}
}
if(s.botState.state !== RUNNING && s.eventsState.state === RUNNING && s.queueState.state === RUNNING) {
s.botState = {
@@ -445,10 +660,12 @@ class Bot {
causedBy: 'system',
}
}
} catch (err) {
this.logger.info('Stopping event polling to prevent activity processing queue from backing up. Will be restarted when config update succeeds.')
await s.stopEvents('system', {reason: 'Invalid config will cause events to pile up in queue. Will be restarted when config update succeeds (next heartbeat).'});
if(!(err instanceof LoggedError)) {
} catch (err: any) {
if(s.eventsState.state === RUNNING) {
this.logger.info('Stopping event polling to prevent activity processing queue from backing up. Will be restarted when config update succeeds.')
await s.stopEvents('system', {reason: 'Invalid config will cause events to pile up in queue. Will be restarted when config update succeeds (next heartbeat).'});
}
if(err.logged !== true) {
this.logger.error(err, {subreddit: s.displayLabel});
}
if(this.nextHeartbeat !== undefined) {
@@ -456,7 +673,7 @@ class Bot {
}
}
}
await this.runModStreams(true);
await this.runSharedStreams(true);
}
async runApiNanny() {
@@ -472,7 +689,10 @@ class Bot {
// @ts-ignore
await this.client.getMe();
shouldRetry = false;
} catch (err) {
} catch (err: any) {
if(isRateLimitError(err)) {
throw err;
}
shouldRetry = await this.nannyRetryHandler(err);
if (!shouldRetry) {
throw err;
@@ -521,6 +741,10 @@ class Bot {
m.notificationManager.handle('runStateChanged', 'Hard Limit Triggered', `Hard Limit of ${this.hardLimit} hit (API Remaining: ${this.client.ratelimitRemaining}). Subreddit event polling has been paused.`, 'system', 'warn');
}
for(const [k,v] of this.cacheManager.modStreams) {
v.end('Hard limit cutoff');
}
this.nannyMode = 'hard';
return;
}
@@ -587,10 +811,11 @@ class Bot {
await m.startEvents('system', {reason: 'API Nanny has been turned off due to better API conditions'});
}
}
await this.runSharedStreams(true);
this.nannyMode = undefined;
}
} catch (err) {
} catch (err: any) {
this.logger.error(`Error occurred during nanny loop: ${err.message}`);
throw err;
}

View File

@@ -28,8 +28,11 @@ import * as RuleSchema from '../Schema/Rule.json';
import * as RuleSetSchema from '../Schema/RuleSet.json';
import * as ActionSchema from '../Schema/Action.json';
import {ActionObjectJson, RuleJson, RuleObjectJson, ActionJson as ActionTypeJson} from "../Common/types";
import {SubredditResources} from "../Subreddit/SubredditResources";
import {Author, AuthorCriteria, AuthorOptions} from "../Author/Author";
import {checkAuthorFilter, SubredditResources} from "../Subreddit/SubredditResources";
import {Author, AuthorCriteria, AuthorOptions} from '..';
import {ExtendedSnoowrap} from '../Utils/SnoowrapClients';
import {isRateLimitError} from "../Utils/Errors";
import {ErrorWithCause} from "pony-cause";
const checkLogName = truncateStringToLength(25);
@@ -42,15 +45,12 @@ export abstract class Check implements ICheck {
rules: Array<RuleSet | Rule> = [];
logger: Logger;
itemIs: TypedActivityStates;
authorIs: {
include: AuthorCriteria[],
exclude: AuthorCriteria[]
};
authorIs: AuthorOptions;
cacheUserResult: Required<UserResultCacheOptions>;
dryRun?: boolean;
notifyOnTrigger: boolean;
resources: SubredditResources;
client: Snoowrap;
client: ExtendedSnoowrap;
constructor(options: CheckOptions) {
const {
@@ -68,6 +68,7 @@ export abstract class Check implements ICheck {
itemIs = [],
authorIs: {
include = [],
excludeCondition,
exclude = [],
} = {},
dryRun,
@@ -88,6 +89,7 @@ export abstract class Check implements ICheck {
this.condition = condition;
this.itemIs = itemIs;
this.authorIs = {
excludeCondition,
exclude: exclude.map(x => new Author(x)),
include: include.map(x => new Author(x)),
}
@@ -158,7 +160,7 @@ export abstract class Check implements ICheck {
runStats.push(`${this.actions.length} Actions`);
// not sure if this should be info or verbose
this.logger.info(`=${this.enabled ? 'Enabled' : 'Disabled'}= ${type.toUpperCase()} (${this.condition})${this.notifyOnTrigger ? ' ||Notify on Trigger|| ' : ''} => ${runStats.join(' | ')}${this.description !== undefined ? ` => ${this.description}` : ''}`);
if (this.rules.length === 0 && this.itemIs.length === 0 && this.authorIs.exclude.length === 0 && this.authorIs.include.length === 0) {
if (this.rules.length === 0 && this.itemIs.length === 0 && this.authorIs.exclude?.length === 0 && this.authorIs.include?.length === 0) {
this.logger.warn('No rules, item tests, or author test found -- this check will ALWAYS PASS!');
}
let ruleSetIndex = 1;
@@ -201,30 +203,9 @@ export abstract class Check implements ICheck {
this.logger.verbose(`${FAIL} => Item did not pass 'itemIs' test`);
return [false, allRuleResults];
}
let authorPass = null;
if (this.authorIs.include !== undefined && this.authorIs.include.length > 0) {
for (const auth of this.authorIs.include) {
if (await this.resources.testAuthorCriteria(item, auth)) {
authorPass = true;
break;
}
}
if (!authorPass) {
this.logger.verbose(`${FAIL} => Inclusive author criteria not matched`);
return Promise.resolve([false, allRuleResults]);
}
}
if (authorPass === null && this.authorIs.exclude !== undefined && this.authorIs.exclude.length > 0) {
for (const auth of this.authorIs.exclude) {
if (await this.resources.testAuthorCriteria(item, auth, false)) {
authorPass = true;
break;
}
}
if (!authorPass) {
this.logger.verbose(`${FAIL} => Exclusive author criteria not matched`);
return Promise.resolve([false, allRuleResults]);
}
const [authFilterResult, authFilterType] = await checkAuthorFilter(item, this.authorIs, this.resources, this.logger);
if(!authFilterResult) {
return Promise.resolve([false, allRuleResults]);
}
if (this.rules.length === 0) {
@@ -268,10 +249,8 @@ export abstract class Check implements ICheck {
// otherwise AND and did not return already so all passed
this.logger.info(`${PASS} => Rules: ${resultsSummary(allResults, this.condition)}`);
return [true, allRuleResults];
} catch (e) {
e.logged = true;
this.logger.warn(`Running rules failed due to uncaught exception`, e);
throw e;
} catch (e: any) {
throw new ErrorWithCause('Running rules failed due to error', {cause: e});
}
}
@@ -345,13 +324,13 @@ export interface ICheck extends JoinCondition, ChecksActivityState {
}
export interface CheckOptions extends ICheck {
rules: Array<IRuleSet | IRule>
actions: ActionConfig[]
logger: Logger
subredditName: string
notifyOnTrigger?: boolean
resources: SubredditResources
client: Snoowrap
rules: Array<IRuleSet | IRule>;
actions: ActionConfig[];
logger: Logger;
subredditName: string;
notifyOnTrigger?: boolean;
resources: SubredditResources;
client: ExtendedSnoowrap;
cacheUserResult?: UserResultCacheOptions;
}

View File

@@ -0,0 +1,27 @@
import {ConfigFormat} from "../types";
export interface ConfigDocumentInterface<DocumentType> {
format: ConfigFormat;
parsed: DocumentType
//parsingError: Error | string;
raw: string;
location?: string;
toString(): string;
toJS(): object;
}
abstract class AbstractConfigDocument<DocumentType> implements ConfigDocumentInterface<DocumentType> {
public abstract format: ConfigFormat;
public abstract parsed: DocumentType;
//public abstract parsingError: Error | string;
constructor(public raw: string, public location?: string) {
}
public abstract toString(): string;
public abstract toJS(): object;
}
export default AbstractConfigDocument;

View File

@@ -0,0 +1,30 @@
import AbstractConfigDocument from "./AbstractConfigDocument";
import {stringify, parse} from 'comment-json';
import JSON5 from 'json5';
import {ConfigFormat} from "../types";
import {OperatorJsonConfig} from "../interfaces";
class JsonConfigDocument extends AbstractConfigDocument<OperatorJsonConfig> {
public parsed: OperatorJsonConfig;
protected cleanParsed: OperatorJsonConfig;
public format: ConfigFormat;
public constructor(raw: string, location?: string) {
super(raw, location);
this.parsed = parse(raw);
this.cleanParsed = JSON5.parse(raw);
this.format = 'json';
}
public toJS(): OperatorJsonConfig {
return this.cleanParsed;
}
public toString(): string {
return stringify(this.parsed, null, 1);
}
}
export default JsonConfigDocument;

View File

@@ -0,0 +1,54 @@
import YamlConfigDocument from "../YamlConfigDocument";
import JsonConfigDocument from "../JsonConfigDocument";
import {YAMLMap, YAMLSeq} from "yaml";
import {BotInstanceJsonConfig, OperatorJsonConfig} from "../../interfaces";
import {assign} from 'comment-json';
export interface OperatorConfigDocumentInterface {
addBot(botData: BotInstanceJsonConfig): void;
toJS(): OperatorJsonConfig;
}
export class YamlOperatorConfigDocument extends YamlConfigDocument implements OperatorConfigDocumentInterface {
addBot(botData: BotInstanceJsonConfig) {
const bots = this.parsed.get('bots') as YAMLSeq;
if (bots === undefined) {
this.parsed.add({key: 'bots', value: [botData]});
} else if (botData.name !== undefined) {
// overwrite if we find an existing
const existingIndex = bots.items.findIndex(x => (x as YAMLMap).get('name') === botData.name);
if (existingIndex !== -1) {
this.parsed.setIn(['bots', existingIndex], botData);
} else {
this.parsed.addIn(['bots'], botData);
}
} else {
this.parsed.addIn(['bots'], botData);
}
}
toJS(): OperatorJsonConfig {
return super.toJS();
}
}
export class JsonOperatorConfigDocument extends JsonConfigDocument implements OperatorConfigDocumentInterface {
addBot(botData: BotInstanceJsonConfig) {
if (this.parsed.bots === undefined) {
this.parsed.bots = [botData];
} else if (botData.name !== undefined) {
const existingIndex = this.parsed.bots.findIndex(x => x.name === botData.name);
if (existingIndex !== -1) {
this.parsed.bots[existingIndex] = assign(this.parsed.bots[existingIndex], botData);
} else {
this.parsed.bots.push(botData);
}
} else {
this.parsed.bots.push(botData);
}
}
toJS(): OperatorJsonConfig {
return super.toJS();
}
}

View File

@@ -0,0 +1,24 @@
import AbstractConfigDocument from "./AbstractConfigDocument";
import {Document, parseDocument} from 'yaml';
import {ConfigFormat} from "../types";
class YamlConfigDocument extends AbstractConfigDocument<Document> {
public parsed: Document;
public format: ConfigFormat;
public constructor(raw: string, location?: string) {
super(raw, location);
this.parsed = parseDocument(raw);
this.format = 'yaml';
}
public toJS(): object {
return this.parsed.toJS();
}
public toString(): string {
return this.parsed.toString();
}
}
export default YamlConfigDocument;

View File

@@ -3,9 +3,9 @@ import {Submission} from "snoowrap/dist/objects";
import {URL} from "url";
import {absPercentDifference, getSharpAsync, isValidImageURL} from "../util";
import sizeOf from "image-size";
import SimpleError from "../Utils/SimpleError";
import {Sharp} from "sharp";
import {blockhash} from "./blockhash/blockhash";
import {SimpleError} from "../Utils/Errors";
export interface ImageDataOptions {
width?: number,
@@ -83,7 +83,7 @@ class ImageData {
}
} catch (err) {
} catch (err: any) {
if(!(err instanceof SimpleError)) {
throw new Error(`Error occurred while fetching response from URL: ${err.message}`);
} else {

View File

@@ -1,7 +1,7 @@
import {HistoricalStats} from "./interfaces";
import {HistoricalStats, FilterCriteriaDefaults} from "./interfaces";
export const cacheOptDefaults = {ttl: 60, max: 500, checkPeriod: 600};
export const cacheTTLDefaults = {authorTTL: 60, userNotesTTL: 300, wikiTTL: 300, submissionTTL: 60, commentTTL: 60, filterCriteriaTTL: 60, subredditTTL: 600};
export const cacheTTLDefaults = {authorTTL: 60, userNotesTTL: 300, wikiTTL: 300, submissionTTL: 60, commentTTL: 60, filterCriteriaTTL: 60, subredditTTL: 600, selfTTL: 60};
export const historicalDefaults: HistoricalStats = {
eventsCheckedTotal: 0,
eventsActionedTotal: 0,
@@ -29,3 +29,13 @@ export const createHistoricalDefaults = (): HistoricalStats => {
actionsRun: new Map(),
};
}
export const filterCriteriaDefault: FilterCriteriaDefaults = {
authorIs: {
exclude: [
{
isMod: true
}
]
}
}

View File

@@ -5,6 +5,17 @@ import Poll from "snoostorm/out/util/Poll";
import Snoowrap from "snoowrap";
import {RuleResult} from "../Rule";
import {IncomingMessage} from "http";
import Submission from "snoowrap/dist/objects/Submission";
import Comment from "snoowrap/dist/objects/Comment";
import RedditUser from "snoowrap/dist/objects/RedditUser";
import {AuthorCriteria, AuthorOptions} from "../Author/Author";
import {ConfigFormat} from "./types";
import AbstractConfigDocument, {ConfigDocumentInterface} from "./Config/AbstractConfigDocument";
import {Document as YamlDocument} from 'yaml';
import {JsonOperatorConfigDocument, YamlOperatorConfigDocument} from "./Config/Operator";
import {ConsoleTransportOptions} from "winston/lib/winston/transports";
import {DailyRotateFileTransportOptions} from "winston-daily-rotate-file";
import {DuplexTransportOptions} from "winston-duplex/dist/DuplexTransport";
/**
* An ISO 8601 Duration
@@ -486,38 +497,6 @@ export type PollOn = 'unmoderated' | 'modqueue' | 'newSub' | 'newComm';
export interface PollingOptionsStrong extends PollingOptions {
limit: number,
interval: number,
clearProcessed: ClearProcessedOptions
}
/**
* For very long-running, high-volume subreddits clearing the list of processed activities helps manage memory bloat
*
* All of these options have default values based on the limit and/or interval set for polling options on each subreddit stream. They only need to modified if the defaults are not sufficient.
*
* If both `after` and `size` are defined whichever is hit first will trigger the list to clear. `after` will be reset after ever clear.
* */
export interface ClearProcessedOptions {
/**
* An interval the processed list should be cleared after.
*
* * EX `9 days`
* * EX `3 months`
* * EX `5 minutes`
* @pattern ^\s*(?<time>\d+)\s*(?<unit>days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)\s*$
* */
after?: string,
/**
* Number of activities found in processed list after which the list should be cleared.
*
* Defaults to the `limit` value from `PollingOptions`
* */
size?: number,
/**
* The number of activities to retain in processed list after clearing.
*
* Defaults to `limit` value from `PollingOptions`
* */
retain?: number,
}
export interface PollingDefaults {
@@ -591,8 +570,6 @@ export interface PollingOptions extends PollingDefaults {
*
* */
pollOn: 'unmoderated' | 'modqueue' | 'newSub' | 'newComm'
clearProcessed?: ClearProcessedOptions
}
export interface TTLConfig {
@@ -670,6 +647,24 @@ export interface TTLConfig {
* @default 60
* */
filterCriteriaTTL?: number | boolean;
/**
* Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling
*
* This is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:
*
* * Ignore comments created through an Action
* * Ignore Activity polled from modqueue that the bot just reported
*
* This value should be at least as long as the longest polling interval for modqueue/newComm
*
* * If `0` or `true` will cache indefinitely (not recommended)
* * If `false` will not cache
*
* @examples [50]
* @default 50
* */
selfTTL?: number | boolean
}
export interface CacheConfig extends TTLConfig {
@@ -832,6 +827,15 @@ export interface ManagerOptions {
nickname?: string
notifications?: NotificationConfig
credentials?: ThirdPartyCredentialsJsonConfig
/**
* Set the default filter criteria for all checks. If this property is specified it will override any defaults passed from the bot's config
*
* Default behavior is to exclude all mods and automoderator from checks
* */
filterCriteriaDefaults?: FilterCriteriaDefaults
}
/**
@@ -907,6 +911,20 @@ export interface ActivityState {
distinguished?: boolean
approved?: boolean
score?: CompareValue
/**
* A string containing a comparison operator and a value to compare against
*
* The syntax is `(< OR > OR <= OR >=) <number>`
*
* * EX `> 2` => greater than 2 total reports
*
* Defaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:
*
* * EX `> 3 mod` => greater than 3 mod reports
* * EX `>= 1 user` => greater than 1 user report
*
* @pattern ^\s*(>|>=|<|<=)\s*(\d+)\s*(%?)(.*)$
* */
reports?: CompareValue
age?: DurationComparor
}
@@ -928,8 +946,9 @@ export interface SubmissionState extends ActivityState {
* */
title?: string
link_flair_text?: string
link_flair_css_class?: string
link_flair_text?: string | string[]
link_flair_css_class?: string | string[]
flairTemplate?: string | string[]
}
// properties calculated/derived by CM -- not provided as plain values by reddit
@@ -948,6 +967,14 @@ export interface CommentState extends ActivityState {
* A list of SubmissionState attributes to test the Submission this comment is in
* */
submissionState?: SubmissionState[]
/**
* The (nested) level of a comment.
*
* * 0 mean the comment is at top-level (replying to submission)
* * non-zero, Nth value means the comment has N parent comments
* */
depth?: DurationComparor
}
/**
@@ -979,6 +1006,8 @@ export interface SubredditState {
* A friendly description of what this State is trying to parse
* */
stateDescription?: string
isUserProfile?: boolean
}
export interface StrongSubredditState extends SubredditState {
@@ -1006,6 +1035,28 @@ export const STOPPED = 'stopped';
export const RUNNING = 'running';
export const PAUSED = 'paused';
export interface SearchAndReplaceRegExp {
/**
* The search value to test for
*
* Can be a normal string (converted to a case-sensitive literal) or a valid regular expression
*
* EX `["find this string", "/some string*\/ig"]`
*
* @examples ["find this string", "/some string*\/ig"]
* */
search: string
/**
* The replacement string/value to use when search is found
*
* This can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value
*
* See replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace
* */
replace: string
}
export interface NamedGroup {
[name: string]: string
}
@@ -1023,6 +1074,92 @@ export interface RegExResult {
}
type LogLevel = "error" | "warn" | "info" | "verbose" | "debug";
export type LogConsoleOptions = Pick<ConsoleTransportOptions, 'silent' | 'eol' | 'stderrLevels' | 'consoleWarnLevels'> & {
level?: LogLevel
}
export type LogFileOptions = Omit<DailyRotateFileTransportOptions, 'stream' | 'handleRejections' | 'options' | 'handleExceptions' | 'format' | 'log' | 'logv' | 'close' | 'dirname'> & {
level?: LogLevel
/**
* The absolute path to a directory where rotating log files should be stored.
*
* * If not present or `null` or `false` no log files will be created
* * If `true` logs will be stored at `[working directory]/logs`
*
* * ENV => `LOG_DIR`
* * ARG => `--logDir [dir]`
*
* @examples ["/var/log/contextmod"]
* */
dirname?: string | boolean | null
}
// export type StrongFileOptions = LogFileOptions & {
// dirname?: string
// }
export type LogStreamOptions = Omit<DuplexTransportOptions, 'name' | 'stream' | 'handleRejections' | 'handleExceptions' | 'format' | 'log' | 'logv' | 'close'> & {
level?: LogLevel
}
export interface LoggingOptions {
/**
* The minimum log level to output. The log level set will output logs at its level **and all levels above it:**
*
* * `error`
* * `warn`
* * `info`
* * `verbose`
* * `debug`
*
* Note: `verbose` will display *a lot* of information on the status/result of run rules/checks/actions etc. which is very useful for testing configurations. Once your bot is stable changing the level to `info` will reduce log noise.
*
* * ENV => `LOG_LEVEL`
* * ARG => `--logLevel <level>`
*
* @default "verbose"
* @examples ["verbose"]
* */
level?: LogLevel,
/**
* **DEPRECATED** - Use `file.dirname` instead
* The absolute path to a directory where rotating log files should be stored.
*
* * If not present or `null` or `false` no log files will be created
* * If `true` logs will be stored at `[working directory]/logs`
*
* * ENV => `LOG_DIR`
* * ARG => `--logDir [dir]`
*
* @examples ["/var/log/contextmod"]
* @deprecated
* @see logging.file.dirname
* */
path?: string | boolean | null
/**
* Options for Rotating File logging
* */
file?: LogFileOptions
/**
* Options for logging to api/web
* */
stream?: LogStreamOptions
/**
* Options for logging to console
* */
console?: LogConsoleOptions
}
export type StrongLoggingOptions = Required<Pick<LoggingOptions, 'stream' | 'console' | 'file'>> & {
level?: LogLevel
};
export type LoggerFactoryOptions = StrongLoggingOptions & {
additionalTransports?: any[]
defaultLabel?: string
}
/**
* Available cache providers
* */
@@ -1038,6 +1175,7 @@ export type StrongCache = {
submissionTTL: number | boolean,
commentTTL: number | boolean,
subredditTTL: number | boolean,
selfTTL: number | boolean,
filterCriteriaTTL: number | boolean,
provider: CacheOptions
actionedEventsMax?: number,
@@ -1150,6 +1288,7 @@ export interface Notifier {
export interface ManagerStateChangeOption {
reason?: string
suppressNotification?: boolean
suppressChangeEvent?: boolean
}
/**
@@ -1255,6 +1394,53 @@ export interface WebCredentials {
redirectUri?: string,
}
export interface SnoowrapOptions {
/**
* Proxy all requests to Reddit's API through this endpoint
*
* * ENV => `PROXY`
* * ARG => `--proxy <proxyEndpoint>`
*
* @examples ["http://localhost:4443"]
* */
proxy?: string,
/**
* Manually set the debug status for snoowrap
*
* When snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level
*
* * Set to `true` to always output
* * Set to `false` to never output
*
* If not present or `null` will be set based on `logLevel`
*
* * ENV => `SNOO_DEBUG`
* * ARG => `--snooDebug`
* */
debug?: boolean,
}
export type FilterCriteriaDefaultBehavior = 'replace' | 'merge';
export interface FilterCriteriaDefaults {
itemIs?: TypedActivityStates
/**
* Determine how itemIs defaults behave when itemIs is present on the check
*
* * merge => adds defaults to check's itemIs
* * replace => check itemIs will replace defaults (no defaults used)
* */
itemIsBehavior?: FilterCriteriaDefaultBehavior
/**
* Determine how authorIs defaults behave when authorIs is present on the check
*
* * merge => merges defaults with check's authorIs
* * replace => check authorIs will replace defaults (no defaults used)
* */
authorIs?: AuthorOptions
authorIsBehavior?: FilterCriteriaDefaultBehavior
}
/**
* The configuration for an **individual reddit account** ContextMod will run as a bot.
*
@@ -1264,7 +1450,7 @@ export interface WebCredentials {
*
* */
export interface BotInstanceJsonConfig {
credentials?: RedditCredentials
credentials?: BotCredentialsJsonConfig | RedditCredentials
/*
* The name to display for the bot. If not specified will use the name of the reddit account IE `u/TheBotName`
* */
@@ -1275,33 +1461,20 @@ export interface BotInstanceJsonConfig {
notifications?: NotificationConfig
/**
* Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior
* Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior.
*
* Overrides any defaults provided at top-level operator config.
*
* Set to an empty object to "ignore" any top-level config
* */
snoowrap?: {
/**
* Proxy all requests to Reddit's API through this endpoint
*
* * ENV => `PROXY`
* * ARG => `--proxy <proxyEndpoint>`
*
* @examples ["http://localhost:4443"]
* */
proxy?: string,
/**
* Manually set the debug status for snoowrap
*
* When snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level
*
* * Set to `true` to always output
* * Set to `false` to never output
*
* If not present or `null` will be set based on `logLevel`
*
* * ENV => `SNOO_DEBUG`
* * ARG => `--snooDebug`
* */
debug?: boolean,
}
snoowrap?: SnoowrapOptions
/**
* Define the default behavior for all filter criteria on all checks in all subreddits
*
* Defaults to exclude mods and automoderator from checks
* */
filterCriteriaDefaults?: FilterCriteriaDefaults
/**
* Settings related to bot behavior for subreddits it is managing
@@ -1370,18 +1543,31 @@ export interface BotInstanceJsonConfig {
* */
polling?: PollingDefaults & {
/**
* If set to `true` all subreddits polling unmoderated/modqueue with default polling settings will share a request to "r/mod"
* otherwise each subreddit will poll its own mod view
* DEPRECATED: See `shared`
*
* Using the ENV or ARG will sett `unmoderated` and `modqueue` on `shared`
*
* * ENV => `SHARE_MOD`
* * ARG => `--shareMod`
*
* @default false
* @deprecated
* */
sharedMod?: boolean,
/**
* If sharing a mod stream stagger pushing relevant Activities to individual subreddits.
* Set which polling sources should be shared among subreddits using default polling settings for that source
*
* * For `unmoderated and `modqueue` the bot will poll on **r/mod** for new activities
* * For `newSub` and `newComm` all subreddits sharing the source will be combined to poll like **r/subreddit1+subreddit2/new**
*
* If set to `true` all polling sources will be shared, otherwise specify which sourcs should be shared as a list
*
* */
shared?: PollOn[] | true,
/**
* If sharing a stream staggers pushing relevant Activities to individual subreddits.
*
* Useful when running many subreddits and rules are potentially cpu/memory/traffic heavy -- allows spreading out load
* */
@@ -1484,38 +1670,7 @@ export interface OperatorJsonConfig {
/**
* Settings to configure global logging defaults
* */
logging?: {
/**
* The minimum log level to output. The log level set will output logs at its level **and all levels above it:**
*
* * `error`
* * `warn`
* * `info`
* * `verbose`
* * `debug`
*
* Note: `verbose` will display *a lot* of information on the status/result of run rules/checks/actions etc. which is very useful for testing configurations. Once your bot is stable changing the level to `info` will reduce log noise.
*
* * ENV => `LOG_LEVEL`
* * ARG => `--logLevel <level>`
*
* @default "verbose"
* @examples ["verbose"]
* */
level?: LogLevel,
/**
* The absolute path to a directory where rotating log files should be stored.
*
* * If not present or `null` no log files will be created
* * If `true` logs will be stored at `[working directory]/logs`
*
* * ENV => `LOG_DIR`
* * ARG => `--logDir [dir]`
*
* @examples ["/var/log/contextmod"]
* */
path?: string,
},
logging?: LoggingOptions,
/**
* Settings to configure the default caching behavior globally
@@ -1524,6 +1679,11 @@ export interface OperatorJsonConfig {
* */
caching?: OperatorCacheConfig
/**
* Set global snoowrap options as well as default snoowrap config for all bots that don't specify their own
* */
snoowrap?: SnoowrapOptions
bots?: BotInstanceJsonConfig[]
/**
@@ -1644,6 +1804,8 @@ export interface OperatorJsonConfig {
* */
friendly?: string,
}
credentials?: ThirdPartyCredentialsJsonConfig
}
export interface RequiredOperatorRedditCredentials extends RedditCredentials {
@@ -1657,8 +1819,23 @@ export interface RequiredWebRedditCredentials extends RedditCredentials {
redirectUri: string
}
export interface ThirdPartyCredentialsJsonConfig {
youtube?: {
apiKey: string
}
[key: string]: any
}
export interface BotCredentialsJsonConfig extends ThirdPartyCredentialsJsonConfig {
reddit: RedditCredentials
}
export interface BotCredentialsConfig extends ThirdPartyCredentialsJsonConfig {
reddit: RequiredOperatorRedditCredentials
}
export interface BotInstanceConfig extends BotInstanceJsonConfig {
credentials: RequiredOperatorRedditCredentials
credentials: BotCredentialsJsonConfig
snoowrap: {
proxy?: string,
debug?: boolean,
@@ -1671,7 +1848,7 @@ export interface BotInstanceConfig extends BotInstanceJsonConfig {
heartbeatInterval: number,
},
polling: {
sharedMod: boolean,
shared: PollOn[],
stagger?: number,
limit: number,
interval: number,
@@ -1693,10 +1870,7 @@ export interface OperatorConfig extends OperatorJsonConfig {
display?: string,
},
notifications?: NotificationConfig
logging: {
level: LogLevel,
path?: string,
},
logging: StrongLoggingOptions,
caching: StrongCache,
web: {
port: number,
@@ -1720,6 +1894,16 @@ export interface OperatorConfig extends OperatorJsonConfig {
friendly?: string,
}
bots: BotInstanceConfig[]
credentials: ThirdPartyCredentialsJsonConfig
}
export interface OperatorFileConfig {
document: YamlOperatorConfigDocument | JsonOperatorConfigDocument
isWriteable?: boolean
}
export interface OperatorConfigWithFileContext extends OperatorConfig {
fileConfig: OperatorFileConfig
}
//export type OperatorConfig = Required<OperatorJsonConfig>;
@@ -1749,20 +1933,18 @@ export interface LogInfo {
bot?: string
}
export interface ActionResult {
export interface ActionResult extends ActionProcessResult {
kind: string,
name: string,
run: boolean,
runReason?: string,
dryRun: boolean,
success: boolean,
result?: string,
}
export interface ActionProcessResult {
success: boolean,
dryRun: boolean,
result?: string
touchedEntities?: (Submission | Comment | RedditUser | string)[]
}
export interface ActionedEvent {
@@ -1791,14 +1973,6 @@ export interface RedditEntity {
type: RedditEntityType
}
export interface StatusCodeError extends Error {
name: 'StatusCodeError',
statusCode: number,
message: string,
response: IncomingMessage,
error: Error
}
export interface HistoricalStatsDisplay extends HistoricalStats {
checksRunTotal: number
checksFromCacheTotal: number
@@ -1878,3 +2052,104 @@ export interface HistoricalStatUpdateData {
rulesCachedTotal: number
rulesTriggered: string[] | string
}
export type SearchFacetType = 'title' | 'url' | 'duplicates' | 'crossposts' | 'external';
export interface RepostItem {
value: string
createdOn?: number
source: string
sourceUrl?: string
score?: number
id: string
itemType: string
acquisitionType: SearchFacetType | 'comment'
sourceObj?: any
reqSameness?: number
}
export interface RepostItemResult extends RepostItem {
sameness: number
}
export interface StringComparisonOptions {
lengthWeight?: number,
transforms?: ((str: string) => string)[]
}
export interface FilterCriteriaPropertyResult<T> {
property: keyof T
expected: (string | boolean | number)[]
found?: string | boolean | number | null
passed?: null | boolean
reason?: string
behavior: FilterBehavior
}
export interface FilterCriteriaResult<T> {
behavior: FilterBehavior
criteria: T//AuthorCriteria | TypedActivityStates
propertyResults: FilterCriteriaPropertyResult<T>[]
passed: boolean
}
export type FilterBehavior = 'include' | 'exclude'
export interface FilterResult<T> {
criteriaResults: FilterCriteriaResult<T>[]
join: JoinOperands
passed: boolean
}
export interface TextTransformOptions {
/**
* A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.
*
* * If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text
* * If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text
* */
transformations?: SearchAndReplaceRegExp[]
/**
* Specify a separate set of transformations for the activity text (submission title or comment)
*
* To perform no transformations when `transformations` is defined set this to an empty array (`[]`)
* */
transformationsActivity?: SearchAndReplaceRegExp[]
}
export interface TextMatchOptions {
/**
* The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match
*
* Note: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere
*
* Defaults to `85` (85%)
*
* @default 85
* @example [85]
* */
matchScore?: number
/**
* The minimum number of words in the activity being checked for which this rule will run on
*
* If the word count is below the minimum the rule fails
*
* Defaults to 2
*
* @default 2
* @example [2]
* */
minWordCount?: number
/**
* Should text matching be case sensitive?
*
* Defaults to false
*
* @default false
* @example [false]
**/
caseSensitive?: boolean
}

View File

@@ -3,6 +3,7 @@ import {RepeatActivityJSONConfig} from "../Rule/RepeatActivityRule";
import {AuthorRuleJSONConfig} from "../Rule/AuthorRule";
import {AttributionJSONConfig} from "../Rule/AttributionRule";
import {FlairActionJson} from "../Action/SubmissionAction/FlairAction";
import {UserFlairActionJson} from "../Action/UserFlairAction";
import {CommentActionJson} from "../Action/CommentAction";
import {ReportActionJson} from "../Action/ReportAction";
import {LockActionJson} from "../Action/LockAction";
@@ -13,9 +14,19 @@ import {ApproveActionJson} from "../Action/ApproveAction";
import {BanActionJson} from "../Action/BanAction";
import {RegexRuleJSONConfig} from "../Rule/RegexRule";
import {MessageActionJson} from "../Action/MessageAction";
import {RepostRuleJSONConfig} from "../Rule/RepostRule";
export type RuleJson = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | string;
export type RuleJson = RecentActivityRuleJSONConfig | RepeatActivityJSONConfig | AuthorRuleJSONConfig | AttributionJSONConfig | HistoryJSONConfig | RegexRuleJSONConfig | RepostRuleJSONConfig | string;
export type RuleObjectJson = Exclude<RuleJson, string>
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | string;
export type ActionJson = CommentActionJson | FlairActionJson | ReportActionJson | LockActionJson | RemoveActionJson | ApproveActionJson | BanActionJson | UserNoteActionJson | MessageActionJson | UserFlairActionJson | string;
export type ActionObjectJson = Exclude<ActionJson, string>;
// borrowed from https://github.com/jabacchetta/set-random-interval/blob/master/src/index.ts
export type SetRandomInterval = (
intervalFunction: () => void,
minDelay: number,
maxDelay: number,
) => { clear: () => void };
export type ConfigFormat = 'json' | 'yaml';

26
src/Common/typings/support.d.ts vendored Normal file
View File

@@ -0,0 +1,26 @@
declare module 'snoowrap/dist/errors' {
export interface InvalidUserError extends Error {
}
export interface NoCredentialsError extends Error {
}
export interface InvalidMethodCallError extends Error {
}
export interface RequestError extends Error {
statusCode: number,
response: http.IncomingMessage
error: Error
}
export interface StatusCodeError extends RequestError {
name: 'StatusCodeError',
}
export interface RateLimitError extends RequestError {
name: 'RateLimitError',
}
}

View File

@@ -1,12 +1,12 @@
import {Logger} from "winston";
import {
buildCacheOptionsFromProvider, buildCachePrefix,
createAjvFactory,
createAjvFactory, fileOrDirectoryIsWriteable,
mergeArr,
normalizeName,
overwriteMerge,
parseBool, randomId,
readConfigFile,
parseBool, parseFromJsonOrYamlToObject, randomId,
readConfigFile, removeFromSourceIfKeysExistsInDestination,
removeUndefinedKeys
} from "./util";
import {CommentCheck} from "./Check/CommentCheck";
@@ -31,19 +31,34 @@ import {
CacheOptions,
BotInstanceJsonConfig,
BotInstanceConfig,
RequiredWebRedditCredentials
RequiredWebRedditCredentials,
RedditCredentials,
BotCredentialsJsonConfig,
BotCredentialsConfig,
FilterCriteriaDefaults, TypedActivityStates, OperatorFileConfig
} from "./Common/interfaces";
import {isRuleSetJSON, RuleSetJson, RuleSetObjectJson} from "./Rule/RuleSet";
import deepEqual from "fast-deep-equal";
import {ActionJson, ActionObjectJson, RuleJson, RuleObjectJson} from "./Common/types";
import {ActionJson, ActionObjectJson, ConfigFormat, RuleJson, RuleObjectJson} from "./Common/types";
import {isActionJson} from "./Action";
import {getLogger} from "./Utils/loggerFactory";
import {GetEnvVars} from 'env-cmd';
import {operatorConfig} from "./Utils/CommandConfig";
import merge from 'deepmerge';
import * as process from "process";
import {cacheOptDefaults, cacheTTLDefaults} from "./Common/defaults";
import {cacheOptDefaults, cacheTTLDefaults, filterCriteriaDefault} from "./Common/defaults";
import objectHash from "object-hash";
import {AuthorCriteria, AuthorOptions} from "./Author/Author";
import path from 'path';
import {
JsonOperatorConfigDocument,
OperatorConfigDocumentInterface,
YamlOperatorConfigDocument
} from "./Common/Config/Operator";
import {ConfigDocumentInterface} from "./Common/Config/AbstractConfigDocument";
import {Document as YamlDocument} from "yaml";
import {SimpleError} from "./Utils/Errors";
import {ErrorWithCause} from "pony-cause";
export interface ConfigBuilderOptions {
logger: Logger,
@@ -115,22 +130,51 @@ export class ConfigBuilder {
return validConfig as JSONConfig;
}
parseToStructured(config: JSONConfig): CheckStructuredJson[] {
parseToStructured(config: JSONConfig, filterCriteriaDefaultsFromBot?: FilterCriteriaDefaults): CheckStructuredJson[] {
let namedRules: Map<string, RuleObjectJson> = new Map();
let namedActions: Map<string, ActionObjectJson> = new Map();
const {checks = []} = config;
const {checks = [], filterCriteriaDefaults} = config;
for (const c of checks) {
const {rules = []} = c;
namedRules = extractNamedRules(rules, namedRules);
namedActions = extractNamedActions(c.actions, namedActions);
}
const filterDefs = filterCriteriaDefaults ?? filterCriteriaDefaultsFromBot;
const {
authorIsBehavior = 'merge',
itemIsBehavior = 'merge',
authorIs: authorIsDefault = {},
itemIs: itemIsDefault = []
} = filterDefs || {};
const structuredChecks: CheckStructuredJson[] = [];
for (const c of checks) {
const {rules = []} = c;
const {rules = [], authorIs = {}, itemIs = []} = c;
const strongRules = insertNamedRules(rules, namedRules);
const strongActions = insertNamedActions(c.actions, namedActions);
const strongCheck = {...c, rules: strongRules, actions: strongActions} as CheckStructuredJson;
let derivedAuthorIs: AuthorOptions = authorIsDefault;
if (authorIsBehavior === 'merge') {
derivedAuthorIs = merge.all([authorIs, authorIsDefault], {arrayMerge: removeFromSourceIfKeysExistsInDestination});
} else if (Object.keys(authorIs).length > 0) {
derivedAuthorIs = authorIs;
}
let derivedItemIs: TypedActivityStates = itemIsDefault;
if (itemIsBehavior === 'merge') {
derivedItemIs = [...itemIs, ...itemIsDefault];
} else if (itemIs.length > 0) {
derivedItemIs = itemIs;
}
const strongCheck = {
...c,
authorIs: derivedAuthorIs,
itemIs: derivedItemIs,
rules: strongRules,
actions: strongActions
} as CheckStructuredJson;
structuredChecks.push(strongCheck);
}
@@ -146,10 +190,6 @@ export const buildPollingOptions = (values: (string | PollingOptions)[]): Pollin
pollOn: v as PollOn,
interval: DEFAULT_POLLING_INTERVAL,
limit: DEFAULT_POLLING_LIMIT,
clearProcessed: {
size: DEFAULT_POLLING_LIMIT,
retain: DEFAULT_POLLING_LIMIT,
}
});
} else {
const {
@@ -157,14 +197,12 @@ export const buildPollingOptions = (values: (string | PollingOptions)[]): Pollin
interval = DEFAULT_POLLING_INTERVAL,
limit = DEFAULT_POLLING_LIMIT,
delayUntil,
clearProcessed = {size: limit, retain: limit},
} = v;
opts.push({
pollOn: p as PollOn,
interval,
limit,
delayUntil,
clearProcessed
});
}
}
@@ -281,8 +319,6 @@ export const parseDefaultBotInstanceFromArgs = (args: any): BotInstanceJsonConfi
heartbeat,
hardLimit,
authorTTL,
snooProxy,
snooDebug,
sharedMod,
caching,
} = args || {};
@@ -294,10 +330,6 @@ export const parseDefaultBotInstanceFromArgs = (args: any): BotInstanceJsonConfi
accessToken,
refreshToken,
},
snoowrap: {
proxy: snooProxy,
debug: snooDebug,
},
subreddits: {
names: subreddits,
wikiConfig,
@@ -305,7 +337,7 @@ export const parseDefaultBotInstanceFromArgs = (args: any): BotInstanceJsonConfi
heartbeatInterval: heartbeat,
},
polling: {
sharedMod,
shared: sharedMod ? ['unmoderated', 'modqueue'] : undefined,
},
nanny: {
softLimit,
@@ -330,6 +362,8 @@ export const parseOpConfigFromArgs = (args: any): OperatorJsonConfig => {
mode,
caching,
authorTTL,
snooProxy,
snooDebug,
} = args || {};
const data = {
@@ -340,12 +374,25 @@ export const parseOpConfigFromArgs = (args: any): OperatorJsonConfig => {
},
logging: {
level: logLevel,
path: logDir === true ? `${process.cwd()}/logs` : undefined,
file: {
level: logLevel,
dirName: logDir,
},
stream: {
level: logLevel,
},
console: {
level: logLevel,
}
},
caching: {
provider: caching,
authorTTL
},
snoowrap: {
proxy: snooProxy,
debug: snooDebug,
},
web: {
enabled: web,
port,
@@ -387,10 +434,13 @@ const parseListFromEnv = (val: string | undefined) => {
export const parseDefaultBotInstanceFromEnv = (): BotInstanceJsonConfig => {
const data = {
credentials: {
clientId: process.env.CLIENT_ID,
clientSecret: process.env.CLIENT_SECRET,
accessToken: process.env.ACCESS_TOKEN,
refreshToken: process.env.REFRESH_TOKEN,
reddit: {
clientId: process.env.CLIENT_ID,
clientSecret: process.env.CLIENT_SECRET,
accessToken: process.env.ACCESS_TOKEN,
refreshToken: process.env.REFRESH_TOKEN,
},
youtube: process.env.YOUTUBE_API_KEY
},
subreddits: {
names: parseListFromEnv(process.env.SUBREDDITS),
@@ -398,12 +448,8 @@ export const parseDefaultBotInstanceFromEnv = (): BotInstanceJsonConfig => {
dryRun: parseBool(process.env.DRYRUN, undefined),
heartbeatInterval: process.env.HEARTBEAT !== undefined ? parseInt(process.env.HEARTBEAT) : undefined,
},
snoowrap: {
proxy: process.env.PROXY,
debug: parseBool(process.env.SNOO_DEBUG, undefined),
},
polling: {
sharedMod: parseBool(process.env.SHARE_MOD),
shared: parseBool(process.env.SHARE_MOD) ? ['unmoderated', 'modqueue'] : undefined,
},
nanny: {
softLimit: process.env.SOFT_LIMIT !== undefined ? parseInt(process.env.SOFT_LIMIT) : undefined,
@@ -421,9 +467,17 @@ export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
display: process.env.OPERATOR_DISPLAY
},
logging: {
// @ts-ignore
level: process.env.LOG_LEVEL,
path: process.env.LOG_DIR === 'true' ? `${process.cwd()}/logs` : undefined,
file: {
level: process.env.LOG_LEVEL,
dirname: process.env.LOG_DIR,
},
stream: {
level: process.env.LOG_LEVEL,
},
console: {
level: process.env.LOG_LEVEL,
}
},
caching: {
provider: {
@@ -432,6 +486,10 @@ export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
},
authorTTL: process.env.AUTHOR_TTL !== undefined ? parseInt(process.env.AUTHOR_TTL) : undefined
},
snoowrap: {
proxy: process.env.PROXY,
debug: parseBool(process.env.SNOO_DEBUG, undefined),
},
web: {
port: process.env.PORT !== undefined ? parseInt(process.env.PORT) : undefined,
session: {
@@ -443,6 +501,11 @@ export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
clientSecret: process.env.CLIENT_SECRET,
redirectUri: process.env.REDIRECT_URI,
},
},
credentials: {
youtube: {
apiKey: process.env.YOUTUBE_API_KEY
}
}
}
@@ -455,12 +518,26 @@ export const parseOpConfigFromEnv = (): OperatorJsonConfig => {
// Actual ENVs (from environment)
// json config
// args from cli
export const parseOperatorConfigFromSources = async (args: any): Promise<OperatorJsonConfig> => {
const {logLevel = process.env.LOG_LEVEL, logDir = process.env.LOG_DIR || false} = args || {};
export const parseOperatorConfigFromSources = async (args: any): Promise<[OperatorJsonConfig, OperatorFileConfig]> => {
const {logLevel = process.env.LOG_LEVEL ?? 'debug', logDir = process.env.LOG_DIR} = args || {};
const envPath = process.env.OPERATOR_ENV;
const initLoggerOptions = {
level: logLevel,
console: {
level: logLevel
},
file: {
level: logLevel,
dirname: logDir,
},
stream: {
level: logLevel
}
}
// create a pre config logger to help with debugging
const initLogger = getLogger({logLevel, logDir: logDir === true ? `${process.cwd()}/logs` : logDir}, 'init');
// default to debug if nothing is provided
const initLogger = getLogger(initLoggerOptions, 'init');
try {
const vars = await GetEnvVars({
@@ -476,7 +553,7 @@ export const parseOperatorConfigFromSources = async (args: any): Promise<Operato
process.env[k] = v;
}
}
} catch (err) {
} catch (err: any) {
let msg = 'No .env file found at default location (./env)';
if (envPath !== undefined) {
msg = `${msg} or OPERATOR_ENV path (${envPath})`;
@@ -486,24 +563,90 @@ export const parseOperatorConfigFromSources = async (args: any): Promise<Operato
//swallow silently for now 😬
}
const {operatorConfig = process.env.OPERATOR_CONFIG} = args;
const {operatorConfig = (process.env.OPERATOR_CONFIG ?? path.resolve(__dirname, '../config.yaml'))} = args;
let configFromFile: OperatorJsonConfig = {};
if (operatorConfig !== undefined) {
let rawConfig;
try {
rawConfig = await readConfigFile(operatorConfig, {log: initLogger}) as object;
} catch (err) {
initLogger.error('Cannot continue app startup because operator config file was not parseable.');
err.logged = true;
throw err;
let fileConfigFormat: ConfigFormat | undefined = undefined;
let fileConfig: object = {};
let rawConfig: string = '';
let configDoc: YamlOperatorConfigDocument | JsonOperatorConfigDocument;
let writeable = false;
try {
writeable = await fileOrDirectoryIsWriteable(operatorConfig);
} catch (e) {
initLogger.warn(`Issue while parsing operator config file location: ${e} \n This is only a problem if you do not have a config file but are planning on adding bots interactively.`);
}
try {
const [rawConfigValue, format] = await readConfigFile(operatorConfig, {log: initLogger});
rawConfig = rawConfigValue ?? '';
fileConfigFormat = format as ConfigFormat;
} catch (err: any) {
const {code} = err;
if (code === 'ENOENT') {
initLogger.warn('No operator config file found but will continue');
if (err.extension !== undefined) {
fileConfigFormat = err.extension
}
} else {
throw new ErrorWithCause('Cannot continue app startup because operator config file exists but was not parseable.', {cause: err});
}
}
const [format, doc, jsonErr, yamlErr] = parseFromJsonOrYamlToObject(rawConfig, {
location: operatorConfig,
jsonDocFunc: (content, location) => new JsonOperatorConfigDocument(content, location),
yamlDocFunc: (content, location) => new YamlOperatorConfigDocument(content, location)
});
if (format !== undefined && fileConfigFormat === undefined) {
fileConfigFormat = 'yaml';
}
if (doc === undefined && rawConfig !== '') {
initLogger.error(`Could not parse file contents at ${operatorConfig} as JSON or YAML (likely it is ${fileConfigFormat}):`);
initLogger.error(jsonErr);
initLogger.error(yamlErr);
throw new SimpleError(`Could not parse file contents at ${operatorConfig} as JSON or YAML`);
} else if (doc === undefined && rawConfig === '') {
// create an empty doc
if(fileConfigFormat === 'json') {
configDoc = new JsonOperatorConfigDocument('{}', operatorConfig);
} else {
configDoc = new YamlOperatorConfigDocument('', operatorConfig);
configDoc.parsed = new YamlDocument({});
}
configFromFile = {};
} else {
configDoc = doc as (YamlOperatorConfigDocument | JsonOperatorConfigDocument);
try {
configFromFile = validateJson(rawConfig, operatorSchema, initLogger) as OperatorJsonConfig;
} catch (err) {
configFromFile = validateJson(configDoc.toJS(), operatorSchema, initLogger) as OperatorJsonConfig;
const {
bots = [],
logging: {
path = undefined
} = {}
} = configFromFile || {};
if(path !== undefined) {
initLogger.warn(`'path' property in top-level 'logging' object is DEPRECATED and will be removed in next minor version. Use 'logging.file.dirname' instead`);
}
for (const b of bots) {
const {
polling: {
sharedMod
} = {}
} = b;
if (sharedMod !== undefined) {
initLogger.warn(`'sharedMod' bot config property is DEPRECATED and will be removed in next minor version. Use 'shared' property instead (see docs)`);
break;
}
}
} catch (err: any) {
initLogger.error('Cannot continue app startup because operator config file was not valid.');
throw err;
}
}
const opConfigFromArgs = parseOpConfigFromArgs(args);
const opConfigFromEnv = parseOpConfigFromEnv();
@@ -530,7 +673,10 @@ export const parseOperatorConfigFromSources = async (args: any): Promise<Operato
botInstances = botInstancesFromFile.map(x => merge.all([defaultBotInstance, x], {arrayMerge: overwriteMerge}));
}
return removeUndefinedKeys({...mergedConfig, bots: botInstances}) as OperatorJsonConfig;
return [removeUndefinedKeys({...mergedConfig, bots: botInstances}) as OperatorJsonConfig, {
document: configDoc,
isWriteable: writeable
}];
}
export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): OperatorConfig => {
@@ -543,6 +689,9 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
logging: {
level = 'verbose',
path,
file = {},
console = {},
stream = {},
} = {},
caching: opCache,
web: {
@@ -560,11 +709,13 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
credentials: webCredentials,
operators,
} = {},
snoowrap: snoowrapOp = {},
api: {
port: apiPort = 8095,
secret: apiSecret = randomId(),
friendly,
} = {},
credentials = {},
bots = [],
} = data;
@@ -614,130 +765,14 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
}
}
let hydratedBots: BotInstanceConfig[] = bots.map(x => {
const {
name: botName,
polling: {
sharedMod = false,
stagger,
limit = 100,
interval = 30,
} = {},
queue: {
maxWorkers = 1,
} = {},
caching,
nanny: {
softLimit = 250,
hardLimit = 50
} = {},
snoowrap = {},
credentials: {
clientId: ci,
clientSecret: cs,
...restCred
} = {},
subreddits: {
names = [],
exclude = [],
wikiConfig = 'botconfig/contextbot',
dryRun,
heartbeatInterval = 300,
} = {},
} = x;
let botCache: StrongCache;
let botActionedEventsDefault: number;
if (caching === undefined) {
botCache = {
...cacheTTLDefaults,
actionedEventsDefault: opActionedEventsDefault,
actionedEventsMax: opActionedEventsMax,
provider: {...defaultProvider}
};
} else {
const {
provider,
actionedEventsMax = opActionedEventsMax,
actionedEventsDefault = opActionedEventsDefault,
...restConfig
} = caching;
botActionedEventsDefault = actionedEventsDefault;
if (actionedEventsMax !== undefined) {
botActionedEventsDefault = Math.min(actionedEventsDefault, actionedEventsMax);
}
if (typeof provider === 'string') {
botCache = {
...cacheTTLDefaults,
...restConfig,
actionedEventsDefault: botActionedEventsDefault,
provider: {
store: provider as CacheProvider,
...cacheOptDefaults
}
}
} else {
const {ttl = 60, max = 500, store = 'memory', ...rest} = provider || {};
botCache = {
...cacheTTLDefaults,
...restConfig,
actionedEventsDefault: botActionedEventsDefault,
actionedEventsMax,
provider: {
store,
...cacheOptDefaults,
...rest,
},
}
}
}
const botCreds = {
clientId: (ci as string),
clientSecret: (cs as string),
...restCred,
};
if (botCache.provider.prefix === undefined || botCache.provider.prefix === defaultProvider.prefix) {
// need to provide unique prefix to bot
botCache.provider.prefix = buildCachePrefix([botCache.provider.prefix, 'bot', (botName || objectHash.sha1(botCreds))]);
}
return {
name: botName,
snoowrap,
subreddits: {
names,
exclude,
wikiConfig,
heartbeatInterval,
dryRun,
},
credentials: botCreds,
caching: botCache,
polling: {
sharedMod,
stagger,
limit,
interval,
},
queue: {
maxWorkers,
},
nanny: {
softLimit,
hardLimit
}
}
});
const defaultOperators = typeof name === 'string' ? [name] : name;
const {
dirname = path,
...fileRest
} = file;
const config: OperatorConfig = {
mode,
operator: {
@@ -746,7 +781,19 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
},
logging: {
level,
path
file: {
level: level,
dirname,
...fileRest,
},
stream: {
level: level,
...stream,
},
console: {
level: level,
...console,
}
},
caching: cache,
web: {
@@ -772,8 +819,175 @@ export const buildOperatorConfigWithDefaults = (data: OperatorJsonConfig): Opera
secret: apiSecret,
friendly
},
bots: hydratedBots,
bots: [],
credentials,
};
config.bots = bots.map(x => buildBotConfig(x, config));
return config;
}
export const buildBotConfig = (data: BotInstanceJsonConfig, opConfig: OperatorConfig): BotInstanceConfig => {
const {
snoowrap: snoowrapOp,
caching: {
actionedEventsMax: opActionedEventsMax,
actionedEventsDefault: opActionedEventsDefault = 25,
provider: defaultProvider,
} = {}
} = opConfig;
const {
name: botName,
filterCriteriaDefaults = filterCriteriaDefault,
polling: {
sharedMod,
shared = [],
stagger,
limit = 100,
interval = 30,
} = {},
queue: {
maxWorkers = 1,
} = {},
caching,
nanny: {
softLimit = 250,
hardLimit = 50
} = {},
snoowrap = snoowrapOp,
credentials = {},
subreddits: {
names = [],
exclude = [],
wikiConfig = 'botconfig/contextbot',
dryRun,
heartbeatInterval = 300,
} = {},
} = data;
let botCache: StrongCache;
let botActionedEventsDefault: number;
if (caching === undefined) {
botCache = {
...cacheTTLDefaults,
actionedEventsDefault: opActionedEventsDefault,
actionedEventsMax: opActionedEventsMax,
provider: {...defaultProvider as CacheOptions}
};
} else {
const {
provider,
actionedEventsMax = opActionedEventsMax,
actionedEventsDefault = opActionedEventsDefault,
...restConfig
} = caching;
botActionedEventsDefault = actionedEventsDefault;
if (actionedEventsMax !== undefined) {
botActionedEventsDefault = Math.min(actionedEventsDefault, actionedEventsMax);
}
if (typeof provider === 'string') {
botCache = {
...cacheTTLDefaults,
...restConfig,
actionedEventsDefault: botActionedEventsDefault,
provider: {
store: provider as CacheProvider,
...cacheOptDefaults
}
}
} else {
const {ttl = 60, max = 500, store = 'memory', ...rest} = provider || {};
botCache = {
...cacheTTLDefaults,
...restConfig,
actionedEventsDefault: botActionedEventsDefault,
actionedEventsMax,
provider: {
store,
...cacheOptDefaults,
...rest,
},
}
}
}
let botCreds: BotCredentialsConfig;
if ((credentials as any).clientId !== undefined) {
const creds = credentials as RedditCredentials;
const {
clientId: ci,
clientSecret: cs,
...restCred
} = creds;
botCreds = {
reddit: {
clientId: (ci as string),
clientSecret: (cs as string),
...restCred,
}
}
} else {
const creds = credentials as BotCredentialsJsonConfig;
const {
reddit: {
clientId: ci,
clientSecret: cs,
...restRedditCreds
},
...rest
} = creds;
botCreds = {
reddit: {
clientId: (ci as string),
clientSecret: (cs as string),
...restRedditCreds,
},
...rest
}
}
if (botCache.provider.prefix === undefined || botCache.provider.prefix === (defaultProvider as CacheOptions).prefix) {
// need to provide unique prefix to bot
botCache.provider.prefix = buildCachePrefix([botCache.provider.prefix, 'bot', (botName || objectHash.sha1(botCreds))]);
}
let realShared = shared === true ? ['unmoderated', 'modqueue', 'newComm', 'newSub'] : shared;
if (sharedMod === true) {
realShared.push('unmoderated');
realShared.push('modqueue');
}
return {
name: botName,
snoowrap: snoowrap || {},
filterCriteriaDefaults,
subreddits: {
names,
exclude,
wikiConfig,
heartbeatInterval,
dryRun,
},
credentials: botCreds,
caching: botCache,
polling: {
shared: [...new Set(realShared)] as PollOn[],
stagger,
limit,
interval,
},
queue: {
maxWorkers,
},
nanny: {
softLimit,
hardLimit
}
}
}

View File

@@ -14,8 +14,8 @@ import {
PASS
} from "../util";
import { Comment } from "snoowrap/dist/objects";
import SimpleError from "../Utils/SimpleError";
import as from "async";
import {SimpleError} from "../Utils/Errors";
export interface AttributionCriteria {

View File

@@ -2,6 +2,7 @@ import {Rule, RuleJSONConfig, RuleOptions, RuleResult} from "./index";
import {Comment} from "snoowrap";
import Submission from "snoowrap/dist/objects/Submission";
import {Author, AuthorCriteria} from "../Author/Author";
import {checkAuthorFilter} from "../Subreddit/SubredditResources";
/**
* Checks the author of the Activity against AuthorCriteria. This differs from a Rule's AuthorOptions as this is a full Rule and will only pass/fail, not skip.
@@ -59,20 +60,8 @@ export class AuthorRule extends Rule {
}
protected async process(item: Comment | Submission): Promise<[boolean, RuleResult]> {
if (this.include.length > 0) {
for (const auth of this.include) {
if (await this.resources.testAuthorCriteria(item, auth)) {
return Promise.resolve([true, this.getResult(true)]);
}
}
return Promise.resolve([false, this.getResult(false)]);
}
for (const auth of this.exclude) {
if (await this.resources.testAuthorCriteria(item, auth, false)) {
return Promise.resolve([true, this.getResult(true)]);
}
}
return Promise.resolve([false, this.getResult(false)]);
const [result, filterType] = await checkAuthorFilter(item, {include: this.include, exclude: this.exclude}, this.resources, this.logger);
return Promise.resolve([result, this.getResult(result)]);
}
}

View File

@@ -1,6 +1,7 @@
import {Rule, RuleJSONConfig, RuleOptions, RulePremise, RuleResult} from "./index";
import {Comment, VoteableContent} from "snoowrap";
import {VoteableContent} from "snoowrap";
import Submission from "snoowrap/dist/objects/Submission";
import Comment from "snoowrap/dist/objects/Comment";
import as from 'async';
import pMap from 'p-map';
// @ts-ignore
@@ -23,7 +24,7 @@ import {
parseSubredditName,
parseUsableLinkIdentifier,
PASS, sleep,
toStrongSubredditState
toStrongSubredditState, windowToActivityWindowCriteria
} from "../util";
import {
ActivityWindow,
@@ -43,7 +44,7 @@ const parseLink = parseUsableLinkIdentifier();
export class RecentActivityRule extends Rule {
window: ActivityWindowType;
thresholds: ActivityThreshold[];
useSubmissionAsReference: boolean;
useSubmissionAsReference: boolean | undefined;
imageDetection: StrongImageDetection
lookAt?: 'comments' | 'submissions';
@@ -51,7 +52,7 @@ export class RecentActivityRule extends Rule {
super(options);
const {
window = 15,
useSubmissionAsReference = true,
useSubmissionAsReference,
imageDetection,
lookAt,
} = options || {};
@@ -115,20 +116,53 @@ export class RecentActivityRule extends Rule {
async process(item: Submission | Comment): Promise<[boolean, RuleResult]> {
let activities;
// ACID is a bitch
// reddit may not return the activity being checked in the author's recent history due to availability/consistency issues or *something*
// so make sure we add it in if config is checking the same type and it isn't included
// TODO refactor this for SubredditState everywhere branch
let shouldIncludeSelf = true;
const strongWindow = windowToActivityWindowCriteria(this.window);
const {
subreddits: {
include = [],
exclude = []
} = {}
} = strongWindow;
if (include.length > 0 && !include.some(x => x.toLocaleLowerCase() === item.subreddit.display_name.toLocaleLowerCase())) {
shouldIncludeSelf = false;
} else if (exclude.length > 0 && exclude.some(x => x.toLocaleLowerCase() === item.subreddit.display_name.toLocaleLowerCase())) {
shouldIncludeSelf = false;
}
switch (this.lookAt) {
case 'comments':
activities = await this.resources.getAuthorComments(item.author, {window: this.window});
if (shouldIncludeSelf && item instanceof Comment && !activities.some(x => x.name === item.name)) {
activities.unshift(item);
}
break;
case 'submissions':
activities = await this.resources.getAuthorSubmissions(item.author, {window: this.window});
if (shouldIncludeSelf && item instanceof Submission && !activities.some(x => x.name === item.name)) {
activities.unshift(item);
}
break;
default:
activities = await this.resources.getAuthorActivities(item.author, {window: this.window});
if (shouldIncludeSelf && !activities.some(x => x.name === item.name)) {
activities.unshift(item);
}
break;
}
let viableActivity = activities;
if (this.useSubmissionAsReference) {
// if config does not specify reference then we set the default based on whether the item is a submission or not
// -- this is essentially the same as defaulting reference to true BUT eliminates noisy "can't use comment as reference" log statement when item is a comment
let inferredSubmissionAsRef = this.useSubmissionAsReference;
if(inferredSubmissionAsRef === undefined) {
inferredSubmissionAsRef = isSubmission(item);
}
if (inferredSubmissionAsRef) {
if (!asSubmission(item)) {
this.logger.warn('Cannot use post as reference because triggered item is not a Submission');
} else if (item.is_self) {
@@ -165,7 +199,7 @@ export class RecentActivityRule extends Rule {
// if (referenceImage.preferredResolution !== undefined) {
// await (referenceImage.getSimilarResolutionVariant(...referenceImage.preferredResolution) as ImageData).sharp();
// }
} catch (err) {
} catch (err: any) {
this.logger.verbose(err.message);
}
}
@@ -241,11 +275,11 @@ export class RecentActivityRule extends Rule {
if (sameImage) {
return x;
}
} catch (err) {
} catch (err: any) {
this.logger.warn(`Unexpected error encountered while pixel-comparing images, will skip comparison => ${err.message}`);
}
}
} catch (err) {
} catch (err: any) {
if(!err.message.includes('did not end with a valid image extension')) {
this.logger.warn(`Will not compare image from Submission ${x.id} due to error while parsing image URL => ${err.message}`);
}
@@ -310,34 +344,6 @@ export class RecentActivityRule extends Rule {
}
}
for (const activity of viableActivity) {
if (asSubmission(activity) && submissionState !== undefined) {
if (!(await this.resources.testItemCriteria(activity, [submissionState]))) {
continue;
}
} else if (commentState !== undefined) {
if (!(await this.resources.testItemCriteria(activity, [commentState]))) {
continue;
}
}
let inSubreddits = false;
for (const ss of subStates) {
const res = await this.resources.testSubredditCriteria(activity, ss);
if (res) {
inSubreddits = true;
break;
}
}
if (inSubreddits) {
currCount++;
combinedKarma += activity.score;
const pSub = getActivitySubredditName(activity);
if (!presentSubs.includes(pSub)) {
presentSubs.push(pSub);
}
}
}
const {operator, value, isPercent} = parseGenericValueOrPercentComparison(threshold);
let sum = {
subsWithActivity: presentSubs,
@@ -421,6 +427,7 @@ export class RecentActivityRule extends Rule {
threshold,
testValue,
karmaThreshold,
combinedKarma,
}
};
}
@@ -501,6 +508,16 @@ interface RecentActivityConfig extends ActivityWindow, ReferenceSubmission {
thresholds: ActivityThreshold[],
imageDetection?: ImageDetection
/**
* When Activity is a submission should we only include activities that are other submissions with the same content?
*
* * When the Activity is a submission this defaults to **true**
* * When the Activity is a comment it is ignored (not relevant)
*
* @default true
* */
useSubmissionAsReference?: boolean
}
export interface RecentActivityRuleOptions extends RecentActivityConfig, RuleOptions {

View File

@@ -11,7 +11,7 @@ import {
ActivityWindowType, JoinOperands,
} from "../Common/interfaces";
import dayjs from 'dayjs';
import SimpleError from "../Utils/SimpleError";
import {SimpleError} from "../Utils/Errors";
export interface RegexCriteria {
/**
@@ -152,7 +152,8 @@ export class RegexRule extends Rule {
}, []);
// check regex
const reg = parseStringToRegex(regex, 'g');
const regexContent = await this.resources.getContent(regex);
const reg = parseStringToRegex(regexContent, 'g');
if(reg === undefined) {
throw new SimpleError(`Value given for regex on Criteria ${name} was not valid: ${regex}`);
}
@@ -257,7 +258,7 @@ export class RegexRule extends Rule {
const critResults = {
criteria: {
name,
regex,
regex: regex !== regexContent ? `${regex} from ${regexContent}` : regex,
testOn,
matchThreshold,
activityMatchThreshold,
@@ -296,6 +297,7 @@ export class RegexRule extends Rule {
const logSummary: string[] = [];
let index = 0;
let matchSample = undefined;
for (const c of criteriaResults) {
index++;
let msg = `Criteria ${c.criteria.name || `#${index}`} ${triggeredIndicator(c.triggered)}`;
@@ -309,8 +311,8 @@ export class RegexRule extends Rule {
}
msg = `${msg} (Window: ${c.criteria.window})`;
if(c.matches.length > 0) {
let matchSample = `-- Matched Values: ${c.matches.slice(0, 3).map(x => `"${x}"`).join(', ')}${c.matches.length > 3 ? `, and ${c.matches.length - 3} more...` : ''}`;
logSummary.push(`${msg} ${matchSample}`);
matchSample = `${c.matches.slice(0, 3).map(x => `"${x}"`).join(', ')}${c.matches.length > 3 ? `, and ${c.matches.length - 3} more...` : ''}`;
logSummary.push(`${msg} -- Matched Values: ${matchSample}`);
} else {
logSummary.push(msg);
}
@@ -319,7 +321,7 @@ export class RegexRule extends Rule {
const result = `${triggeredIndicator(criteriaMet)} ${logSummary.join(' || ')}`;
this.logger.verbose(result);
return Promise.resolve([criteriaMet, this.getResult(criteriaMet, {result, data: criteriaResults})]);
return Promise.resolve([criteriaMet, this.getResult(criteriaMet, {result, data: {results: criteriaResults, matchSample }})]);
}
protected getMatchesFromActivity(a: (Submission | Comment), testOn: string[], reg: RegExp): string[] {

View File

@@ -1,17 +1,28 @@
import {Rule, RuleJSONConfig, RuleOptions, RuleResult} from "./index";
import {Comment} from "snoowrap";
import {
activityWindowText, asSubmission,
comparisonTextOp, FAIL, getActivitySubredditName, isExternalUrlSubmission, isRedditMedia,
parseGenericValueComparison, parseSubredditName,
parseUsableLinkIdentifier as linkParser, PASS, subredditStateIsNameOnly, toStrongSubredditState
activityWindowText,
asSubmission,
comparisonTextOp,
FAIL,
getActivitySubredditName,
isExternalUrlSubmission,
isRedditMedia,
parseGenericValueComparison,
parseSubredditName,
parseUsableLinkIdentifier as linkParser,
PASS,
searchAndReplace,
stringSameness,
subredditStateIsNameOnly,
toStrongSubredditState
} from "../util";
import {
ActivityWindow,
ActivityWindowType,
ReferenceSubmission,
ReferenceSubmission, SearchAndReplaceRegExp,
StrongSubredditState,
SubredditState
SubredditState, TextMatchOptions, TextTransformOptions
} from "../Common/interfaces";
import Submission from "snoowrap/dist/objects/Submission";
import dayjs from "dayjs";
@@ -29,27 +40,6 @@ interface RepeatActivityReducer {
allSets: RepeatActivityData[]
}
const getActivityIdentifier = (activity: (Submission | Comment), length = 200) => {
let identifier: string;
if (asSubmission(activity)) {
if (activity.is_self) {
identifier = `${activity.title}${activity.selftext.slice(0, length)}`;
} else if(isRedditMedia(activity)) {
identifier = activity.title;
} else {
identifier = parseUsableLinkIdentifier(activity.url) as string;
}
} else {
identifier = activity.body.slice(0, length);
}
return identifier;
}
const fuzzyOptions = {
includeScore: true,
distance: 15
};
export class RepeatActivityRule extends Rule {
threshold: string;
window: ActivityWindowType;
@@ -62,6 +52,9 @@ export class RepeatActivityRule extends Rule {
activityFilterFunc: (x: Submission|Comment) => Promise<boolean> = async (x) => true;
keepRemoved: boolean;
minWordCount: number;
transformations: SearchAndReplaceRegExp[]
caseSensitive: boolean
matchScore: number
constructor(options: RepeatActivityOptions) {
super(options);
@@ -75,7 +68,13 @@ export class RepeatActivityRule extends Rule {
include = [],
exclude = [],
keepRemoved = false,
transformations = [],
caseSensitive = true,
matchScore = 85,
} = options;
this.matchScore = matchScore;
this.transformations = transformations;
this.caseSensitive = caseSensitive;
this.minWordCount = minWordCount;
this.keepRemoved = keepRemoved;
this.threshold = threshold;
@@ -136,6 +135,37 @@ export class RepeatActivityRule extends Rule {
}
}
getActivityIdentifier(activity: (Submission | Comment), length = 200, transform = true) {
let identifier: string;
if (asSubmission(activity)) {
if (activity.is_self) {
identifier = `${activity.title}${activity.selftext.slice(0, length)}`;
} else if(isRedditMedia(activity)) {
identifier = activity.title;
} else {
identifier = parseUsableLinkIdentifier(activity.url) as string;
}
} else {
identifier = activity.body.slice(0, length);
}
if(!transform) {
return identifier;
}
// apply any transforms
if (this.transformations.length > 0) {
identifier = searchAndReplace(identifier, this.transformations);
}
// perform after transformations so as not to mess up regex's depending on case
if(!this.caseSensitive) {
identifier = identifier.toLowerCase();
}
return identifier;
}
async process(item: Submission|Comment): Promise<[boolean, RuleResult]> {
let referenceUrl;
if(asSubmission(item) && this.useSubmissionAsReference) {
@@ -162,9 +192,10 @@ export class RepeatActivityRule extends Rule {
const acc = await accProm;
const {openSets = [], allSets = []} = acc;
let identifier = getActivityIdentifier(activity);
let identifier = this.getActivityIdentifier(activity);
const isUrl = isExternalUrlSubmission(activity);
let fu = new Fuse([identifier], !isUrl ? fuzzyOptions : {...fuzzyOptions, distance: 5});
//let fu = new Fuse([identifier], !isUrl ? fuzzyOptions : {...fuzzyOptions, distance: 5});
const validSub = await this.activityFilterFunc(activity);
let minMet = identifier.length >= this.minWordCount;
@@ -174,12 +205,15 @@ export class RepeatActivityRule extends Rule {
let currIdentifierInOpen = false;
const bufferedActivities = this.gapAllowance === undefined || this.gapAllowance === 0 ? [] : activities.slice(Math.max(0, index - this.gapAllowance), Math.max(0, index));
for (const o of openSets) {
const res = fu.search(o.identifier);
const match = res.length > 0;
if (match && validSub && minMet) {
const strMatchResults = stringSameness(o.identifier, identifier);
if (strMatchResults.highScoreWeighted >= this.matchScore && minMet) {
updatedOpenSets.push({...o, sets: [...o.sets, activity]});
currIdentifierInOpen = true;
} else if (bufferedActivities.some(x => fu.search(getActivityIdentifier(x)).length > 0) && validSub && minMet) {
} else if (bufferedActivities.some(x => {
let buffIdentifier = this.getActivityIdentifier(x);
const buffMatch = stringSameness(identifier, buffIdentifier);
return buffMatch.highScoreWeighted >= this.matchScore;
}) && validSub && minMet) {
updatedOpenSets.push(o);
} else if(!currIdentifierInOpen && !isUrl) {
updatedAllSets.push(o);
@@ -193,15 +227,18 @@ export class RepeatActivityRule extends Rule {
// could be that a spammer is using different URLs for each submission but similar submission titles so search by title as well
const sub = activity as Submission;
identifier = sub.title;
fu = new Fuse([identifier], !isUrl ? fuzzyOptions : {...fuzzyOptions, distance: 5});
//fu = new Fuse([identifier], !isUrl ? fuzzyOptions : {...fuzzyOptions, distance: 5});
minMet = identifier.length >= this.minWordCount;
for (const o of openSets) {
const res = fu.search(o.identifier);
const match = res.length > 0;
if (match && validSub && minMet) {
const strMatchResults = stringSameness(o.identifier, identifier);
if (strMatchResults.highScoreWeighted >= this.matchScore && minMet) {
updatedOpenSets.push({...o, sets: [...o.sets, activity]});
currIdentifierInOpen = true;
} else if (bufferedActivities.some(x => fu.search(getActivityIdentifier(x)).length > 0) && validSub && minMet && !updatedOpenSets.includes(o)) {
} else if (bufferedActivities.some(x => {
let buffIdentifier = this.getActivityIdentifier(x);
const buffMatch = stringSameness(identifier, buffIdentifier);
return buffMatch.highScoreWeighted >= this.matchScore;
}) && validSub && minMet && !updatedOpenSets.includes(o)) {
updatedOpenSets.push(o);
} else if(!updatedAllSets.includes(o)) {
updatedAllSets.push(o);
@@ -232,7 +269,7 @@ export class RepeatActivityRule extends Rule {
let applicableGroupedActivities = identifierGroupedActivities;
if (this.useSubmissionAsReference) {
applicableGroupedActivities = new Map();
let identifier = getActivityIdentifier(item);
let identifier = this.getActivityIdentifier(item);
let referenceSubmissions = identifierGroupedActivities.get(identifier);
if(referenceSubmissions === undefined && isExternalUrlSubmission(item)) {
// if external url sub then try by title
@@ -240,7 +277,7 @@ export class RepeatActivityRule extends Rule {
referenceSubmissions = identifierGroupedActivities.get(identifier);
if(referenceSubmissions === undefined) {
// didn't get by title so go back to url since that's the default
identifier = getActivityIdentifier(item);
identifier = this.getActivityIdentifier(item);
}
}
@@ -265,7 +302,7 @@ export class RepeatActivityRule extends Rule {
};
for (let set of value) {
const test = comparisonTextOp(set.length, operator, thresholdValue);
const md = set.map((x: (Comment | Submission)) => `[${asSubmission(x) ? x.title : getActivityIdentifier(x, 50)}](https://reddit.com${x.permalink}) in ${x.subreddit_name_prefixed} on ${dayjs(x.created_utc * 1000).utc().format()}`);
const md = set.map((x: (Comment | Submission)) => `[${asSubmission(x) ? x.title : this.getActivityIdentifier(x, 50)}](https://reddit.com${x.permalink}) in ${x.subreddit_name_prefixed} on ${dayjs(x.created_utc * 1000).utc().format()}`);
summaryData.sets.push(set);
summaryData.largestTrigger = Math.max(summaryData.largestTrigger, set.length);
@@ -325,7 +362,7 @@ interface SummaryData {
triggeringSetsMarkdown: string[]
}
interface RepeatActivityConfig extends ActivityWindow, ReferenceSubmission {
interface RepeatActivityConfig extends ActivityWindow, ReferenceSubmission, TextMatchOptions {
/**
* The number of repeat submissions that will trigger the rule
* @default ">= 5"
@@ -383,18 +420,9 @@ interface RepeatActivityConfig extends ActivityWindow, ReferenceSubmission {
keepRemoved?: boolean
/**
* For activities that are text-based this is the minimum number of words required for the activity to be considered for a repeat
*
* EX if `minimumWordCount=5` and a comment is `what about you` then it is ignored because `3 is less than 5`
*
* **For self-text submissions** -- title + body text
*
* **For comments* -- body text
*
* @default 1
* @example [1]
* A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.
* */
minWordCount?: number,
transformations?: SearchAndReplaceRegExp[]
}
export interface RepeatActivityOptions extends RepeatActivityConfig, RuleOptions {

844
src/Rule/RepostRule.ts Normal file
View File

@@ -0,0 +1,844 @@
import {Rule, RuleJSONConfig, RuleOptions, RuleResult} from "./index";
import {Listing, SearchOptions} from "snoowrap";
import Submission from "snoowrap/dist/objects/Submission";
import Comment from "snoowrap/dist/objects/Comment";
import {
compareDurationValue,
comparisonTextOp,
FAIL, formatNumber,
isRepostItemResult, parseDurationComparison, parseGenericValueComparison,
parseUsableLinkIdentifier,
PASS, searchAndReplace, stringSameness, triggeredIndicator, windowToActivityWindowCriteria, wordCount
} from "../util";
import {
ActivityWindow,
ActivityWindowType,
CompareValue, DurationComparor,
JoinOperands,
RepostItem,
RepostItemResult,
SearchAndReplaceRegExp,
SearchFacetType, TextMatchOptions, TextTransformOptions,
} from "../Common/interfaces";
import objectHash from "object-hash";
import {getActivities, getAttributionIdentifier} from "../Utils/SnoowrapUtils";
import Fuse from "fuse.js";
import leven from "leven";
import {YoutubeClient, commentsAsRepostItems} from "../Utils/ThirdParty/YoutubeClient";
import dayjs from "dayjs";
import {rest} from "lodash";
const parseYtIdentifier = parseUsableLinkIdentifier();
export interface SearchFacetJSONConfig extends TextMatchOptions, TextTransformOptions, ActivityWindow {
kind: SearchFacetType | SearchFacetType[]
}
export interface SearchFacet extends SearchFacetJSONConfig {
kind: SearchFacetType
}
export type TimeBasedSelector = "newest" | "oldest" | "any" | "all";
export interface OccurredAt {
/**
* Which repost to test on
*
* * `any` -- ANY repost passing `condition` will cause this criteria to be true
* * `all` -- ALL reposts must pass `condition` for this criteria to be true
* */
"testOn": TimeBasedSelector,
"condition": DurationComparor
}
export interface OccurrenceTests {
count?: {
condition?: JoinOperands
/**
* An array of strings containing a comparison operator and the number of repost occurrences to compare against
*
* Examples:
*
* * `">= 7"` -- TRUE if 7 or more reposts were found
* * `"< 1"` -- TRUE if less than 0 reposts were found
* */
test: CompareValue[]
}
/**
* Test the time the reposts occurred at
* */
time?: {
/**
* How to test all the specified comparisons
*
* * AND -- All criteria must be true
* * OR -- Any criteria must be true
*
* Defaults to AND
*
* @default AND
* @example ["AND", "OR"]
* */
condition?: JoinOperands
/**
* An array of time-based conditions to test against found reposts (test when a repost was made)
* */
test: OccurredAt[]
}
}
/**
* A set of criteria used to find reposts
*
* Contains options and conditions used to define how candidate reposts are retrieved and if they are a match.
*
* */
export interface RepostCriteria extends ActivityWindow, TextMatchOptions, TextTransformOptions {
/**
* Define how to find candidate reposts
*
* * **title** -- search reddit for submissions with the same title
* * **url** -- search reddit for submissions with the same url
* * **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)
* */
searchOn?: (SearchFacetType | SearchFacetJSONConfig)[]
/**
* A set of comparisons to test against the number of reposts found
*
* If not specified the default is "AND [occurrences] > 0" IE any reposts makes this test pass
* */
occurrences?: {
/**
* How to test all the specified comparisons
*
* * AND -- All criteria must be true
* * OR -- Any criteria must be true
*
* Defaults to AND
*
* @default AND
* @example ["AND", "OR"]
* */
condition?: JoinOperands
criteria?: OccurrenceTests[]
}
/**
* Test the time the reposts occurred at
* */
occurredAt?: {
/**
* How to test all the specified comparisons
*
* * AND -- All criteria must be true
* * OR -- Any criteria must be true
*
* Defaults to AND
*
* @default AND
* @example ["AND", "OR"]
* */
condition?: JoinOperands
/**
* An array of time-based conditions to test against found reposts (test when a repost was made)
* */
criteria: OccurredAt[]
}
/**
* The maximum number of comments/submissions to check
*
* In both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the "top" maximum specified
*
* For comment checks this is the number of comments cached
*
* @default 50
* @example [50]
* */
maxRedditItems?: number
/**
* The maximum number of external items (youtube comments) to check (and cache for comment checks)
*
* @default 50
* @example [50]
* */
maxExternalItems?: number
}
export interface CriteriaResult {
passed: boolean
conditionsSummary: string
items: RepostItemResult[]
}
const parentSubmissionSearchFacetDefaults = {
title: {
matchScore: 85,
minWordCount: 3
},
url: {
matchScore: 0, // when looking for submissions to find repost comments on automatically include any with exact same url
},
duplicates: {
matchScore: 0, // when looking for submissions to find repost comments on automatically include any that reddit thinks are duplicates
},
crossposts: {
matchScore: 0, // when looking for submissions to find repost comments on automatically include any that reddit thinks are crossposts
},
external: {}
}
const isSearchFacetType = (val: any): val is SearchFacetType => {
if (typeof val === 'string') {
return ['title', 'url', 'duplicates', 'crossposts', 'external'].includes(val);
}
return false;
}
const generateSearchFacet = (val: SearchFacetType | SearchFacetJSONConfig): SearchFacet[] => {
let facets: SearchFacet[] = [];
if (isSearchFacetType(val)) {
facets.push({
kind: val
});
} else if (Array.isArray(val.kind)) {
facets.concat(val.kind.map(x => ({...val, kind: x})));
} else {
facets.push(val as SearchFacet);
}
return facets.map(x => {
return {
...parentSubmissionSearchFacetDefaults[x.kind],
...x,
}
});
}
export class RepostRule extends Rule {
criteria: RepostCriteria[]
condition: JoinOperands;
submission?: Submission;
constructor(options: RepostRuleOptions) {
super(options);
const {
criteria = [{}],
condition = 'OR'
} = options || {};
if (criteria.length < 1) {
throw new Error('Must provide at least one RepostCriteria');
}
this.criteria = criteria;
this.condition = condition;
}
getKind(): string {
return 'Repost';
}
protected getSpecificPremise(): object {
return {
criteria: this.criteria,
condition: this.condition
}
}
// @ts-ignore
protected async getSubmission(item: Submission | Comment) {
if (item instanceof Comment) {
// @ts-ignore
return await this.client.getSubmission(item.link_id).fetch();
}
return item;
}
protected async process(item: Submission | Comment): Promise<[boolean, RuleResult]> {
let criteriaResults: CriteriaResult[] = [];
let ytClient: YoutubeClient | undefined = undefined;
let criteriaMatchedResults: RepostItemResult[] = [];
let totalSubs = 0;
let totalCommentSubs = 0;
let totalComments = 0;
let totalExternal = new Map<string,number>();
let fromCache = false;
let andFail = false;
for (const rCriteria of this.criteria) {
criteriaMatchedResults = [];
const {
searchOn = (item instanceof Submission ? ['title', 'url', 'duplicates', 'crossposts'] : ['external', 'title', 'url', 'duplicates', 'crossposts']),
//criteria = {},
maxRedditItems = 50,
maxExternalItems = 50,
window = 20,
...restCriteria
} = rCriteria;
const searchFacets = searchOn.map(x => generateSearchFacet(x)).flat(1) as SearchFacet[];
const includeCrossposts = searchFacets.some(x => x.kind === 'crossposts');
// in getDuplicate() options add "crossposts_only=1" to get only crossposts https://www.reddit.com/r/redditdev/comments/b4t5g4/get_all_the_subreddits_that_a_post_has_been/
// if a submission is a crosspost it has "crosspost_parent" attribute https://www.reddit.com/r/redditdev/comments/l46y2l/check_if_post_is_a_crosspost/
const strongWindow = windowToActivityWindowCriteria(window);
const candidateHash = `repostItems-${item instanceof Submission ? item.id : item.link_id}-${objectHash.sha1({
window,
searchOn
})}`;
let items: (RepostItem|RepostItemResult)[] = [];
let cacheRes = undefined;
if (item instanceof Comment) {
cacheRes = await this.resources.cache.get(candidateHash) as ((RepostItem|RepostItemResult)[] | undefined | null);
}
if (cacheRes === undefined || cacheRes === null) {
const sub = await this.getSubmission(item);
let dups: (Submission[] | undefined) = undefined;
for (const sf of searchFacets) {
const {
matchScore = 85,
minWordCount = 3,
transformations = [],
} = sf;
if (sf.kind === 'external') {
const attribution = getAttributionIdentifier(sub);
switch (attribution.provider) {
case 'YouTube':
const ytCreds = this.resources.getThirdPartyCredentials('youtube')
if (ytCreds === undefined) {
throw new Error('Cannot extract comments from Youtube because a Youtube Data API key was not provided in configuration');
}
if (ytClient === undefined) {
ytClient = new YoutubeClient(ytCreds.apiKey);
}
const ytComments = commentsAsRepostItems(await ytClient.getVideoTopComments(sub.url, maxExternalItems));
items = items.concat(ytComments)
totalExternal.set('Youtube comments', (totalExternal.get('Youtube comments') ?? 0) + ytComments.length);
break;
default:
if (attribution.provider === undefined) {
this.logger.debug('Unable to determine external provider');
continue;
} else {
this.logger.debug(`External parsing of ${attribution} is not supported yet.`);
continue;
}
}
} else {
let subs: Submission[];
if (['title', 'url'].includes(sf.kind)) {
let query: string;
let searchFunc: (limit: number) => Promise<Listing<Submission | Comment>>;
if (sf.kind === 'title') {
query = (await this.getSubmission(item)).title;
searchFunc = (limit: number) => {
let opts: SearchOptions = {
query,
limit,
sort: 'relevance'
};
if (strongWindow.subreddits?.include !== undefined && strongWindow.subreddits?.include.length > 0) {
opts.restrictSr = true;
opts.subreddit = strongWindow.subreddits?.include.join('+');
}
return this.client.search(opts);
}
} else {
const attr = getAttributionIdentifier(sub);
if (attr.provider === 'YouTube') {
const ytId = parseYtIdentifier(sub.url);
query = `url:https://youtu.be/${ytId}`;
} else {
query = `url:${sub.url}`;
}
searchFunc = (limit: number) => {
let opts: SearchOptions = {
query,
limit,
sort: 'top'
};
if (strongWindow.subreddits?.include !== undefined && strongWindow.subreddits?.include.length > 0) {
opts.restrictSr = true;
opts.subreddit = strongWindow.subreddits?.include.join('+');
}
return this.client.search(opts);
}
}
subs = await getActivities(searchFunc, {window: strongWindow}) as Submission[];
} else {
if (dups === undefined) {
let searchFunc: (limit: number) => Promise<Listing<Submission | Comment>> = (limit: number) => {
// this does not work correctly
// see https://github.com/not-an-aardvark/snoowrap/issues/320
// searchFunc = (limit: number) => {
// return sub.getDuplicates({crossposts_only: 0, limit});
// };
return this.client.oauthRequest({
uri: `duplicates/${sub.id}`,
qs: {
limit,
}
}).then(x => {
return Promise.resolve(x.comments) as Promise<Listing<Submission>>
});
};
subs = await getActivities(searchFunc, {window: strongWindow}) as Submission[];
dups = subs;
} else {
subs = dups;
}
if (sf.kind === 'duplicates') {
// @ts-ignore
subs = subs.filter(x => x.crosspost_parent === undefined)
} else {
// @ts-ignore
subs = subs.filter(x => x.crosspost_parent !== undefined && x.crosspost_parent === sub.id)
}
}
// filter by minimum word count
subs = subs.filter(x => wordCount(x.title) > minWordCount);
items = items.concat(subs.map(x => ({
value: searchAndReplace(x.title, transformations),
createdOn: x.created,
source: 'reddit',
sourceUrl: x.permalink,
id: x.id,
score: x.score,
itemType: 'submission',
acquisitionType: sf.kind,
sourceObj: x,
reqSameness: matchScore,
})));
}
}
if (!includeCrossposts) {
const sub = await this.getSubmission(item);
// remove submissions if they are official crossposts of the submission being checked and searchOn did not include 'crossposts'
items = items.filter(x => x.itemType !== 'submission' || !(x.sourceObj.crosspost_parent !== undefined && x.sourceObj.crosspost_parent === sub.id))
}
let sourceTitle = searchAndReplace(sub.title, restCriteria.transformationsActivity ?? []);
// do submission scoring BEFORE pruning duplicates bc...
// might end up in a situation where we get same submission for both title and url
// -- url is always a repost but title is not guaranteed and we if remove the url item but not the title we could potentially filter the title submission out and miss this repost
items = items.reduce((acc: (RepostItem|RepostItemResult)[], x) => {
if(x.itemType === 'submission') {
totalSubs++;
const sf = searchFacets.find(y => y.kind === x.acquisitionType) as SearchFacet;
let cleanTitle = x.value;
if (!(sf.caseSensitive ?? false)) {
cleanTitle = cleanTitle.toLowerCase();
}
const strMatchResults = stringSameness(sourceTitle, cleanTitle);
if(strMatchResults.highScoreWeighted >= (x.reqSameness as number)) {
return acc.concat({
...x,
sameness: Math.min(strMatchResults.highScoreWeighted, 100),
});
}
return acc;
}
return acc.concat(x);
}, []);
// now remove duplicate submissions
items = items.reduce((acc: RepostItem[], curr) => {
if(curr.itemType !== 'submission') {
return acc.concat(curr);
}
const subId = curr.sourceObj.id;
if (sub.id !== subId && !acc.some(x => x.itemType === 'submission' && x.sourceObj.id === subId)) {
return acc.concat(curr);
}
return acc;
}, []);
if (item instanceof Comment) {
// we need to gather comments from submissions
// first cut down the number of submissions to retrieve because we don't care about have ALL submissions,
// just most popular comments (which will be in the most popular submissions)
let subs = items.filter(x => x.itemType === 'submission').map(x => x.sourceObj) as Submission[];
totalCommentSubs += subs.length;
const nonSubItems = items.filter(x => x.itemType !== 'submission' && wordCount(x.value) > (restCriteria.minWordCount ?? 3));
subs.sort((a, b) => a.score - b.score).reverse();
// take top 10 submissions
subs = subs.slice(0, 10);
let comments: Comment[] = [];
for (const sub of subs) {
const commFunc = (limit: number) => {
return this.client.oauthRequest({
uri: `${sub.subreddit_name_prefixed}/comments/${sub.id}`,
// get ONLY top-level comments, sorted by Top
qs: {
sort: 'top',
depth: 0,
limit,
}
}).then(x => {
return x.comments as Promise<Listing<Comment>>
});
}
// and return the top 20 most popular
const subComments = await getActivities(commFunc, {window: {count: 20}, skipReplies: true}) as Listing<Comment>;
comments = comments.concat(subComments);
}
// sort by highest scores
comments.sort((a, b) => a.score - b.score).reverse();
// filter out all comments with fewer words than required (prevent false negatives)
comments.filter(x => wordCount(x.body) > (restCriteria.minWordCount ?? 3));
totalComments += Math.min(comments.length, maxRedditItems);
// and take the user-defined maximum number of items
items = nonSubItems.concat(comments.slice(0, maxRedditItems).map(x => ({
value: searchAndReplace(x.body, restCriteria.transformations ?? []),
createdOn: x.created,
source: 'reddit',
id: x.id,
sourceUrl: x.permalink,
score: x.score,
itemType: 'comment',
acquisitionType: 'comment'
})));
}
// cache items for 20 minutes
await this.resources.cache.set(candidateHash, items, {ttl: 1200});
} else {
items = cacheRes;
totalExternal = items.reduce((acc, curr) => {
if(curr.acquisitionType === 'external') {
acc.set(`${curr.source} comments`, (acc.get(`${curr.source} comments`) ?? 0 ) + 1);
return acc;
}
return acc;
}, new Map<string, number>());
//totalSubs = items.filter(x => x.itemType === 'submission').length;
//totalCommentSubs = totalSubs;
totalComments = items.filter(x => x.itemType === 'comment' && x.source === 'reddit').length;
fromCache = true;
}
const {
matchScore = 85,
caseSensitive = false,
transformations = [],
transformationsActivity = transformations,
occurrences = {
condition: 'AND',
criteria: [
{
count: {
test: ['> 0']
}
}
]
},
} = restCriteria;
if(item instanceof Submission) {
// we've already done difference calculations in the searchFacet phase
// and when the check is for a sub it means we are only checking if the submissions has been reposted which means either:
// * very similar title (default sameness of 85% or more)
// * duplicate/same URL -- which is a repost, duh
// so just add all items to critMatches at this point
criteriaMatchedResults = criteriaMatchedResults.concat(items.filter(x => "sameness" in x) as RepostItemResult[]);
} else {
let sourceContent = searchAndReplace(item.body, transformationsActivity);
if (!caseSensitive) {
sourceContent = sourceContent.toLowerCase();
}
for (const i of items) {
const itemContent = !caseSensitive ? i.value.toLowerCase() : i.value;
const strMatchResults = stringSameness(sourceContent, itemContent);
if(strMatchResults.highScoreWeighted >= matchScore) {
criteriaMatchedResults.push({
...i,
// @ts-ignore
reqSameness: matchScore,
sameness: Math.min(strMatchResults.highScoreWeighted, 100)
});
}
}
}
// now do occurrence and time tests
const {
condition: occCondition = 'AND',
criteria: occCriteria = [
{
count: {
test: ['> 0']
}
}
]
} = occurrences;
let orPass = false;
let occurrenceReason = null;
for(const occurrenceTest of occCriteria) {
const {
count:{
condition: oCondition = 'AND',
test: oCriteria = []
} = {},
time: {
condition: tCondition = 'AND',
test: tCriteria = [],
} = {}
} = occurrenceTest;
let conditionFailSummaries = [];
const passedConditions = [];
const failedConditions = [];
for (const oc of oCriteria) {
const ocCompare = parseGenericValueComparison(oc);
const ocMatch = comparisonTextOp(criteriaMatchedResults.length, ocCompare.operator, ocCompare.value);
if (ocMatch) {
passedConditions.push(oc);
} else {
failedConditions.push(oc);
if (oCondition === 'AND') {
conditionFailSummaries.push(`(AND) ${oc} occurrences was not true`);
break;
}
}
}
if (passedConditions.length === 0 && oCriteria.length > 0) {
conditionFailSummaries.push('(OR) No occurrence tests passed');
}
const existingPassed = passedConditions.length;
if (conditionFailSummaries.length === 0) {
const timeAwareReposts = [...criteriaMatchedResults].filter(x => x.createdOn !== undefined).sort((a, b) => (a.createdOn as number) - (b.createdOn as number));
for (const tc of tCriteria) {
let toTest: RepostItemResult[] = [];
const durationCompare = parseDurationComparison(tc.condition);
switch (tc.testOn) {
case 'newest':
case 'oldest':
if (tc.testOn === 'newest') {
toTest = timeAwareReposts.slice(-1);
} else {
toTest = timeAwareReposts.slice(0, 1);
}
break;
case 'any':
case 'all':
toTest = timeAwareReposts;
break;
}
const timePass = tc.testOn === 'any' ? toTest.some(x => compareDurationValue(durationCompare, dayjs.unix(x.createdOn as number))) : toTest.every(x => compareDurationValue(durationCompare, dayjs.unix(x.createdOn as number)));
if (timePass) {
passedConditions.push(tc.condition);
} else {
failedConditions.push(tc.condition);
if (tCondition === 'AND') {
conditionFailSummaries.push(`(AND) ${tc.condition} was not true`);
break;
}
}
}
if (tCriteria.length > 0 && passedConditions.length === existingPassed) {
conditionFailSummaries.push('(OR) No time-based tests passed');
}
}
if(conditionFailSummaries.length !== 0 && occCondition === 'AND') {
// failed occurrence tests (high-level)
occurrenceReason = conditionFailSummaries.join(' | ');
break;
}
if(passedConditions.length > 0 && occCondition === 'OR') {
occurrenceReason = passedConditions.join(' | ');
orPass = true;
break;
}
}
let passed = occCriteria.length === 0;
if(occCriteria.length > 0) {
if(occCondition === 'OR') {
passed = orPass;
occurrenceReason = occurrenceReason === null ? 'No occurrence test sets passed' : occurrenceReason;
} else if(occCondition === 'AND') {
passed = occurrenceReason === null;
occurrenceReason = occurrenceReason === null ? 'All tests passed' : occurrenceReason;
}
//passed = (occCondition === 'OR' && orPass) || (occurrenceFailureReason === null && occCondition === 'AND')
}
const results = {
passed,
conditionsSummary: occurrenceReason as string,
items: criteriaMatchedResults
};
criteriaResults.push(results)
if(!results.passed) {
if(this.condition === 'AND') {
andFail = true;
break;
}
} else if(this.condition === 'OR') {
break;
}
if (!results.passed && this.condition === 'AND') {
andFail = true;
break;
}
}
// get all repost items for stats and SCIENCE
const repostItemResults = [...criteriaResults
// only want reposts from criteria that passed
.filter(x => x.passed).map(x => x.items)
.flat()
// make sure we are only accumulating unique reposts
.reduce((acc, curr) => {
const hash = `${curr.source}-${curr.itemType}-${curr.id}`;
if (!acc.has(hash)) {
acc.set(hash, curr);
}
return acc;
}, new Map<string, RepostItemResult>()).values()];
repostItemResults.sort((a, b) => a.sameness - b.sameness).reverse();
const foundRepost = criteriaResults.length > 0;
let avgSameness = null;
let closestSummary = null;
let closestSameness = null;
let searchCandidateSummary = '';
if(item instanceof Comment) {
searchCandidateSummary = `Searched top ${totalComments} comments in top 10 ${fromCache ? '' : `of ${totalCommentSubs} `}most popular submissions`;
if(totalExternal.size > 0) {
searchCandidateSummary += ", ";
const extSumm: string[] = [];
totalExternal.forEach((v, k) => {
extSumm.push(`${v} ${k}`);
});
searchCandidateSummary += extSumm.join(', ');
}
} else {
searchCandidateSummary = `Searched ${totalSubs}`
}
let summary = `${searchCandidateSummary} and found ${repostItemResults.length} reposts.`;
if(repostItemResults.length > 0) {
avgSameness = formatNumber(repostItemResults.reduce((acc, curr) => acc + curr.sameness, 0) / criteriaResults.length);
const closest = repostItemResults[0];
summary += ` --- Closest Match => >> ${closest.value} << from ${closest.source} (${closest.sourceUrl}) with ${formatNumber(closest.sameness)}% sameness.`
closestSummary = `matched a ${closest.itemType} from ${closest.source}`;
closestSameness = closest.sameness;
if(criteriaResults.length > 1) {
summary += ` Avg ${formatNumber(avgSameness)}%`;
}
}
let passed;
if(this.condition === 'AND') {
const failedCrit = criteriaResults.find(x => !x.passed);
if(failedCrit !== undefined) {
summary += `BUT a criteria failed >> ${failedCrit.conditionsSummary} << and rule has AND condition.`;
passed = false;
} else {
passed = true;
}
} else {
const passedCrit = criteriaResults.find(x => x.passed);
if(passedCrit === undefined) {
summary += `BUT all criteria failed`;
passed = false;
} else {
passed = true;
}
}
const result = `${passed ? PASS : FAIL} ${summary}`;
this.logger.verbose(result);
return [passed, this.getResult(passed, {
result,
data: {
allResults: criteriaResults,
closestSameness: passed ? formatNumber(closestSameness as number) : undefined,
closestSummary: passed ? closestSummary : undefined,
}
})];
}
}
interface RepostConfig {
/**
* A list of Regular Expressions and conditions under which tested Activity(ies) are matched
* @minItems 1
* @examples [{"regex": "/reddit/", "matchThreshold": "> 3"}]
* */
criteria?: RepostCriteria[]
/**
* * If `OR` then any set of Criteria that pass will trigger the Rule
* * If `AND` then all Criteria sets must pass to trigger the Rule
*
* @default "OR"
* */
condition?: 'AND' | 'OR'
}
export interface RepostRuleOptions extends RepostConfig, RuleOptions {
}
/**
* Search for reposts of a Submission or Comment
*
* * For submissions the title or URL can searched and matched against
* * For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against
*
* */
export interface RepostRuleJSONConfig extends RepostConfig, RuleJSONConfig {
/**
* @examples ["repost"]
* */
kind: 'repost'
}

View File

@@ -8,6 +8,7 @@ import HistoryRule, {HistoryJSONConfig} from "./HistoryRule";
import RegexRule, {RegexRuleJSONConfig} from "./RegexRule";
import {SubredditResources} from "../Subreddit/SubredditResources";
import Snoowrap from "snoowrap";
import {RepostRule, RepostRuleJSONConfig} from "./RepostRule";
export function ruleFactory
(config: RuleJSONConfig, logger: Logger, subredditName: string, resources: SubredditResources, client: Snoowrap): Rule {
@@ -31,6 +32,9 @@ export function ruleFactory
case 'regex':
cfg = config as RegexRuleJSONConfig;
return new RegexRule({...cfg, logger, subredditName, resources, client});
case 'repost':
cfg = config as RepostRuleJSONConfig;
return new RepostRule({...cfg, logger, subredditName, resources, client});
default:
throw new Error('rule "kind" was not recognized.');
}

View File

@@ -2,7 +2,7 @@ import Snoowrap, {Comment} from "snoowrap";
import Submission from "snoowrap/dist/objects/Submission";
import {Logger} from "winston";
import {findResultByPremise, mergeArr} from "../util";
import {SubredditResources} from "../Subreddit/SubredditResources";
import {checkAuthorFilter, SubredditResources} from "../Subreddit/SubredditResources";
import {ChecksActivityState, TypedActivityStates} from "../Common/interfaces";
import Author, {AuthorOptions} from "../Author/Author";
@@ -65,6 +65,7 @@ export abstract class Rule implements IRule, Triggerable {
name = this.getKind(),
logger,
authorIs: {
excludeCondition = 'OR',
include = [],
exclude = [],
} = {},
@@ -78,6 +79,7 @@ export abstract class Rule implements IRule, Triggerable {
this.client = client;
this.authorIs = {
excludeCondition,
exclude: exclude.map(x => new Author(x)),
include: include.map(x => new Author(x)),
}
@@ -99,31 +101,18 @@ export abstract class Rule implements IRule, Triggerable {
this.logger.verbose(`(Skipped) Item did not pass 'itemIs' test`);
return Promise.resolve([null, this.getResult(null, {result: `Item did not pass 'itemIs' test`})]);
}
if (this.authorIs.include !== undefined && this.authorIs.include.length > 0) {
for (const auth of this.authorIs.include) {
if (await this.resources.testAuthorCriteria(item, auth)) {
return this.process(item);
}
}
this.logger.verbose('(Skipped) Inclusive author criteria not matched');
return Promise.resolve([null, this.getResult(null, {result: 'Inclusive author criteria not matched'})]);
const [authFilterResult, authFilterType] = await checkAuthorFilter(item, this.authorIs, this.resources, this.logger);
if(!authFilterResult) {
this.logger.verbose(`(Skipped) ${authFilterType} Author criteria not matched`);
return Promise.resolve([null, this.getResult(null, {result: `${authFilterType} author criteria not matched`})]);
}
if (this.authorIs.exclude !== undefined && this.authorIs.exclude.length > 0) {
for (const auth of this.authorIs.exclude) {
if (await this.resources.testAuthorCriteria(item, auth, false)) {
return this.process(item);
}
}
this.logger.verbose('(Skipped) Exclusive author criteria not matched');
return Promise.resolve([null, this.getResult(null, {result: 'Exclusive author criteria not matched'})]);
}
} catch (err) {
} catch (err: any) {
this.logger.error('Error occurred during Rule pre-process checks');
throw err;
}
try {
return this.process(item);
} catch (err) {
} catch (err: any) {
this.logger.error('Error occurred while processing rule');
throw err;
}
@@ -240,6 +229,6 @@ export interface RuleJSONConfig extends IRule {
* The kind of rule to run
* @examples ["recentActivity", "repeatActivity", "author", "attribution", "history"]
*/
kind: 'recentActivity' | 'repeatActivity' | 'author' | 'attribution' | 'history' | 'regex'
kind: 'recentActivity' | 'repeatActivity' | 'author' | 'attribution' | 'history' | 'regex' | 'repost'
}

View File

@@ -50,24 +50,52 @@
]
},
"flairCssClass": {
"description": "A list of (user) flair css class values from the subreddit to match against",
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair css class (or list of) from the subreddit to match against",
"examples": [
"red"
]
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"items": {
"type": "string"
},
"type": "array"
"description": "A (user) flair template id (or list of) from the subreddit to match against"
},
"flairText": {
"description": "A list of (user) flair text values from the subreddit to match against",
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair text value (or list of) from the subreddit to match against",
"examples": [
"Approved"
],
"items": {
"type": "string"
},
"type": "array"
]
},
"isMod": {
"description": "Is the author a moderator?",
@@ -131,12 +159,21 @@
],
"properties": {
"exclude": {
"description": "Only runs if `include` is not present. Will \"pass\" if any of set of the AuthorCriteria **does not** pass",
"description": "Only runs if `include` is not present. Each AuthorCriteria is comprised of conditions that the Author being checked must \"not\" pass. See excludeCondition for set behavior\n\nEX: `isMod: true, name: Automoderator` => Will pass if the Author IS NOT a mod and IS NOT named Automoderator",
"items": {
"$ref": "#/definitions/AuthorCriteria"
},
"type": "array"
},
"excludeCondition": {
"default": "OR",
"description": "* OR => if ANY exclude condition \"does not\" pass then the exclude test passes\n* AND => if ALL exclude conditions \"do not\" pass then the exclude test passes\n\nDefaults to OR",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"include": {
"description": "Will \"pass\" if any set of AuthorCriteria passes",
"items": {
@@ -167,6 +204,11 @@
"deleted": {
"type": "boolean"
},
"depth": {
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"distinguished": {
"type": "boolean"
},
@@ -184,7 +226,7 @@
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
@@ -235,14 +277,47 @@
"filtered": {
"type": "boolean"
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"is_self": {
"type": "boolean"
},
"link_flair_css_class": {
"type": "string"
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"link_flair_text": {
"type": "string"
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"locked": {
"type": "boolean"
@@ -258,7 +333,7 @@
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
@@ -386,6 +461,7 @@
"message",
"remove",
"report",
"userflair",
"usernote"
],
"type": "string"

View File

@@ -222,6 +222,17 @@
],
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
"type": "string"
},
"targets": {
"description": "Specify which Activities to approve\n\nThis setting is only applicable if the Activity being acted on is a **comment**. On a **submission** the setting does nothing\n\n* self => approve activity being checked (comment)\n* parent => approve parent (submission) of activity being checked (comment)",
"items": {
"enum": [
"parent",
"self"
],
"type": "string"
},
"type": "array"
}
},
"required": [
@@ -235,8 +246,7 @@
"default": "undefined",
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) rule will only aggregate on `link` and `media` (ignores reddit-hosted content and self-posts)",
"examples": [
[
]
[]
],
"items": {
"enum": [
@@ -269,8 +279,7 @@
},
"domains": {
"default": [
[
]
[]
],
"description": "A list of domains whose Activities will be tested against `threshold`.\n\nThe values are tested as partial strings so you do not need to include full URLs, just the part that matters.\n\nEX `[\"youtube\"]` will match submissions with the domain `https://youtube.com/c/aChannel`\nEX `[\"youtube.com/c/bChannel\"]` will NOT match submissions with the domain `https://youtube.com/c/aChannel`\n\nIf you wish to aggregate on self-posts for a subreddit use the syntax `self.[subreddit]` EX `self.AskReddit`\n\n**If this Rule is part of a Check for a Submission and you wish to aggregate on the domain of the Submission use the special string `AGG:SELF`**\n\nIf nothing is specified or list is empty (default) aggregate using `aggregateOn`",
"items": {
@@ -505,24 +514,52 @@
]
},
"flairCssClass": {
"description": "A list of (user) flair css class values from the subreddit to match against",
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair css class (or list of) from the subreddit to match against",
"examples": [
"red"
]
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"items": {
"type": "string"
},
"type": "array"
"description": "A (user) flair template id (or list of) from the subreddit to match against"
},
"flairText": {
"description": "A list of (user) flair text values from the subreddit to match against",
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair text value (or list of) from the subreddit to match against",
"examples": [
"Approved"
],
"items": {
"type": "string"
},
"type": "array"
]
},
"isMod": {
"description": "Is the author a moderator?",
@@ -586,12 +623,21 @@
],
"properties": {
"exclude": {
"description": "Only runs if `include` is not present. Will \"pass\" if any of set of the AuthorCriteria **does not** pass",
"description": "Only runs if `include` is not present. Each AuthorCriteria is comprised of conditions that the Author being checked must \"not\" pass. See excludeCondition for set behavior\n\nEX: `isMod: true, name: Automoderator` => Will pass if the Author IS NOT a mod and IS NOT named Automoderator",
"items": {
"$ref": "#/definitions/AuthorCriteria"
},
"type": "array"
},
"excludeCondition": {
"default": "OR",
"description": "* OR => if ANY exclude condition \"does not\" pass then the exclude test passes\n* AND => if ALL exclude conditions \"do not\" pass then the exclude test passes\n\nDefaults to OR",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"include": {
"description": "Will \"pass\" if any set of AuthorCriteria passes",
"items": {
@@ -856,6 +902,17 @@
],
"description": "The cache provider and, optionally, a custom configuration for that provider\n\nIf not present or `null` provider will be `memory`.\n\nTo specify another `provider` but use its default configuration set this property to a string of one of the available providers: `memory`, `redis`, or `none`"
},
"selfTTL": {
"default": 50,
"description": "Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling\n\nThis is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:\n\n* Ignore comments created through an Action\n* Ignore Activity polled from modqueue that the bot just reported\n\nThis value should be at least as long as the longest polling interval for modqueue/newComm\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
"examples": [
50
],
"type": [
"number",
"boolean"
]
},
"submissionTTL": {
"default": 60,
"description": "Amount of time, in seconds, a submission should be cached\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
@@ -904,8 +961,7 @@
"type": "object"
},
"CacheOptions": {
"additionalProperties": {
},
"additionalProperties": {},
"description": "Configure granular settings for a cache provider with this object",
"properties": {
"auth_pass": {
@@ -970,25 +1026,6 @@
],
"type": "string"
},
"ClearProcessedOptions": {
"description": "For very long-running, high-volume subreddits clearing the list of processed activities helps manage memory bloat\n\nAll of these options have default values based on the limit and/or interval set for polling options on each subreddit stream. They only need to modified if the defaults are not sufficient.\n\nIf both `after` and `size` are defined whichever is hit first will trigger the list to clear. `after` will be reset after ever clear.",
"properties": {
"after": {
"description": "An interval the processed list should be cleared after.\n\n* EX `9 days`\n* EX `3 months`\n* EX `5 minutes`",
"pattern": "^\\s*(?<time>\\d+)\\s*(?<unit>days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)\\s*$",
"type": "string"
},
"retain": {
"description": "The number of activities to retain in processed list after clearing.\n\nDefaults to `limit` value from `PollingOptions`",
"type": "number"
},
"size": {
"description": "Number of activities found in processed list after which the list should be cleared.\n\nDefaults to the `limit` value from `PollingOptions`",
"type": "number"
}
},
"type": "object"
},
"CommentActionJson": {
"description": "Reply to the Activity. For a submission the reply will be a top-level comment.",
"properties": {
@@ -1118,6 +1155,9 @@
{
"$ref": "#/definitions/FlairActionJson"
},
{
"$ref": "#/definitions/UserFlairActionJson"
},
{
"$ref": "#/definitions/CommentActionJson"
},
@@ -1256,6 +1296,9 @@
{
"$ref": "#/definitions/RegexRuleJSONConfig"
},
{
"$ref": "#/definitions/RepostRuleJSONConfig"
},
{
"$ref": "#/definitions/RuleSetJson"
},
@@ -1294,6 +1337,11 @@
"deleted": {
"type": "boolean"
},
"depth": {
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"distinguished": {
"type": "boolean"
},
@@ -1311,7 +1359,7 @@
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
@@ -1414,6 +1462,61 @@
},
"type": "object"
},
"FilterCriteriaDefaults": {
"properties": {
"authorIs": {
"$ref": "#/definitions/AuthorOptions",
"description": "Determine how authorIs defaults behave when authorIs is present on the check\n\n* merge => merges defaults with check's authorIs\n* replace => check authorIs will replace defaults (no defaults used)",
"examples": [
{
"include": [
{
"flairText": [
"Contributor",
"Veteran"
]
},
{
"isMod": true
}
]
}
]
},
"authorIsBehavior": {
"enum": [
"merge",
"replace"
],
"type": "string"
},
"itemIs": {
"anyOf": [
{
"items": {
"$ref": "#/definitions/SubmissionState"
},
"type": "array"
},
{
"items": {
"$ref": "#/definitions/CommentState"
},
"type": "array"
}
]
},
"itemIsBehavior": {
"description": "Determine how itemIs defaults behave when itemIs is present on the check\n\n* merge => adds defaults to check's itemIs\n* replace => check itemIs will replace defaults (no defaults used)",
"enum": [
"merge",
"replace"
],
"type": "string"
}
},
"type": "object"
},
"FlairActionJson": {
"description": "Flair the Submission",
"properties": {
@@ -1457,6 +1560,10 @@
],
"type": "boolean"
},
"flair_template_id": {
"description": "Flair template ID to assign",
"type": "string"
},
"itemIs": {
"anyOf": [
{
@@ -2004,6 +2111,76 @@
],
"type": "object"
},
"OccurredAt": {
"properties": {
"condition": {
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"testOn": {
"$ref": "#/definitions/TimeBasedSelector",
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
}
},
"required": [
"condition",
"testOn"
],
"type": "object"
},
"OccurrenceTests": {
"properties": {
"count": {
"properties": {
"condition": {
"enum": [
"AND",
"OR"
],
"type": "string"
},
"test": {
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
"items": {
"type": "string"
},
"type": "array"
}
},
"required": [
"test"
],
"type": "object"
},
"time": {
"description": "Test the time the reposts occurred at",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"test": {
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
"items": {
"$ref": "#/definitions/OccurredAt"
},
"type": "array"
}
},
"required": [
"test"
],
"type": "object"
}
},
"type": "object"
},
"PollingOptions": {
"description": "A configuration for where, how, and when to poll Reddit for Activities to process",
"examples": [
@@ -2014,10 +2191,6 @@
}
],
"properties": {
"clearProcessed": {
"$ref": "#/definitions/ClearProcessedOptions",
"description": "For very long-running, high-volume subreddits clearing the list of processed activities helps manage memory bloat\n\nAll of these options have default values based on the limit and/or interval set for polling options on each subreddit stream. They only need to modified if the defaults are not sufficient.\n\nIf both `after` and `size` are defined whichever is hit first will trigger the list to clear. `after` will be reset after ever clear."
},
"delayUntil": {
"description": "Delay processing Activity until it is `N` seconds old\n\nUseful if there are other bots that may process an Activity and you want this bot to run first/last/etc.\n\nIf the Activity is already `N` seconds old when it is initially retrieved no refresh of the Activity occurs (no API request is made) and it is immediately processed.",
"type": "number"
@@ -2137,7 +2310,7 @@
},
"useSubmissionAsReference": {
"default": true,
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
"type": "boolean"
},
"window": {
@@ -2462,6 +2635,9 @@
],
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
"type": "string"
},
"spam": {
"type": "boolean"
}
},
"required": [
@@ -2491,6 +2667,11 @@
}
]
},
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"exclude": {
"description": "If present, activities will be counted only if they are **NOT** found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
"examples": [
@@ -2581,9 +2762,14 @@
],
"type": "string"
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"minWordCount": {
"default": 1,
"description": "For activities that are text-based this is the minimum number of words required for the activity to be considered for a repeat\n\nEX if `minimumWordCount=5` and a comment is `what about you` then it is ignored because `3 is less than 5`\n\n**For self-text submissions** -- title + body text\n\n**For comments* -- body text",
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"name": {
@@ -2599,6 +2785,13 @@
"description": "The number of repeat submissions that will trigger the rule",
"type": "string"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"useSubmissionAsReference": {
"default": true,
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
@@ -2716,6 +2909,224 @@
],
"type": "object"
},
"RepostCriteria": {
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
"properties": {
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"maxExternalItems": {
"default": 50,
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
"type": "number"
},
"maxRedditItems": {
"default": 50,
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
"type": "number"
},
"minWordCount": {
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"occurredAt": {
"description": "Test the time the reposts occurred at",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
"items": {
"$ref": "#/definitions/OccurredAt"
},
"type": "array"
}
},
"required": [
"criteria"
],
"type": "object"
},
"occurrences": {
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"items": {
"$ref": "#/definitions/OccurrenceTests"
},
"type": "array"
}
},
"type": "object"
},
"searchOn": {
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
"items": {
"anyOf": [
{
"$ref": "#/definitions/SearchFacetJSONConfig"
},
{
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
}
]
},
"type": "array"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"transformationsActivity": {
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"window": {
"anyOf": [
{
"$ref": "#/definitions/ActivityWindowCriteria"
},
{
"$ref": "#/definitions/DurationObject"
},
{
"type": [
"string",
"number"
]
}
],
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
"examples": [
"90 days"
]
}
},
"type": "object"
},
"RepostRuleJSONConfig": {
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
"properties": {
"authorIs": {
"$ref": "#/definitions/AuthorOptions",
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
"examples": [
{
"include": [
{
"flairText": [
"Contributor",
"Veteran"
]
},
{
"isMod": true
}
]
}
]
},
"condition": {
"default": "OR",
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
"examples": [
{
"matchThreshold": "> 3",
"regex": "/reddit/"
}
],
"items": {
"$ref": "#/definitions/RepostCriteria"
},
"minItems": 1,
"type": "array"
},
"itemIs": {
"anyOf": [
{
"items": {
"$ref": "#/definitions/SubmissionState"
},
"type": "array"
},
{
"items": {
"$ref": "#/definitions/CommentState"
},
"type": "array"
}
],
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
},
"kind": {
"description": "The kind of rule to run",
"enum": [
"repost"
],
"examples": [
"repost"
],
"type": "string"
},
"name": {
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
"examples": [
"myNewRule"
],
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
"type": "string"
}
},
"required": [
"kind"
],
"type": "object"
},
"RuleSetJson": {
"description": "A RuleSet is a \"nested\" set of `Rule` objects that can be used to create more complex AND/OR behavior. Think of the outcome of a `RuleSet` as the result of all of its run `Rule` objects (based on `condition`)",
"properties": {
@@ -2753,6 +3164,9 @@
{
"$ref": "#/definitions/RegexRuleJSONConfig"
},
{
"$ref": "#/definitions/RepostRuleJSONConfig"
},
{
"type": "string"
}
@@ -2767,6 +3181,111 @@
],
"type": "object"
},
"SearchAndReplaceRegExp": {
"properties": {
"replace": {
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
"type": "string"
},
"search": {
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
"examples": [
"find this string",
"/some string*/ig"
],
"type": "string"
}
},
"required": [
"replace",
"search"
],
"type": "object"
},
"SearchFacetJSONConfig": {
"properties": {
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"kind": {
"anyOf": [
{
"items": {
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
},
"type": "array"
},
{
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
}
]
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"minWordCount": {
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"transformationsActivity": {
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"window": {
"anyOf": [
{
"$ref": "#/definitions/ActivityWindowCriteria"
},
{
"$ref": "#/definitions/DurationObject"
},
{
"type": [
"string",
"number"
]
}
],
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
"examples": [
"90 days"
]
}
},
"required": [
"kind"
],
"type": "object"
},
"SubmissionCheckJson": {
"properties": {
"actions": {
@@ -2788,6 +3307,9 @@
{
"$ref": "#/definitions/FlairActionJson"
},
{
"$ref": "#/definitions/UserFlairActionJson"
},
{
"$ref": "#/definitions/CommentActionJson"
},
@@ -2926,6 +3448,9 @@
{
"$ref": "#/definitions/RegexRuleJSONConfig"
},
{
"$ref": "#/definitions/RepostRuleJSONConfig"
},
{
"$ref": "#/definitions/RuleSetJson"
},
@@ -2970,14 +3495,47 @@
"filtered": {
"type": "boolean"
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"is_self": {
"type": "boolean"
},
"link_flair_css_class": {
"type": "string"
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"link_flair_text": {
"type": "string"
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"locked": {
"type": "boolean"
@@ -2993,7 +3551,7 @@
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
@@ -3026,6 +3584,9 @@
}
],
"properties": {
"isUserProfile": {
"type": "boolean"
},
"name": {
"anyOf": [
{
@@ -3056,6 +3617,121 @@
},
"type": "object"
},
"ThirdPartyCredentialsJsonConfig": {
"additionalProperties": {},
"properties": {
"youtube": {
"properties": {
"apiKey": {
"type": "string"
}
},
"required": [
"apiKey"
],
"type": "object"
}
},
"type": "object"
},
"TimeBasedSelector": {
"enum": [
"all",
"any",
"newest",
"oldest"
],
"type": "string"
},
"UserFlairActionJson": {
"description": "Flair the Submission",
"properties": {
"authorIs": {
"$ref": "#/definitions/AuthorOptions",
"description": "If present then these Author criteria are checked before running the Action. If criteria fails then the Action is not run.",
"examples": [
{
"include": [
{
"flairText": [
"Contributor",
"Veteran"
]
},
{
"isMod": true
}
]
}
]
},
"css": {
"description": "The text of the css class of the flair to apply",
"type": "string"
},
"dryRun": {
"default": false,
"description": "If `true` the Action will not make the API request to Reddit to perform its action.",
"examples": [
false,
true
],
"type": "boolean"
},
"enable": {
"default": true,
"description": "If set to `false` the Action will not be run",
"examples": [
true
],
"type": "boolean"
},
"flair_template_id": {
"description": "Flair template to pick.\n\n**Note:** If this template is used text/css are ignored",
"type": "string"
},
"itemIs": {
"anyOf": [
{
"items": {
"$ref": "#/definitions/SubmissionState"
},
"type": "array"
},
{
"items": {
"$ref": "#/definitions/CommentState"
},
"type": "array"
}
],
"description": "A list of criteria to test the state of the `Activity` against before running the Action.\n\nIf any set of criteria passes the Action will be run."
},
"kind": {
"description": "The type of action that will be performed",
"enum": [
"userflair"
],
"type": "string"
},
"name": {
"description": "An optional, but highly recommended, friendly name for this Action. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes",
"examples": [
"myDescriptiveAction"
],
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
"type": "string"
},
"text": {
"description": "The text of the flair to apply",
"type": "string"
}
},
"required": [
"kind"
],
"type": "object"
},
"UserNoteActionJson": {
"description": "Add a Toolbox User Note to the Author of this Activity",
"properties": {
@@ -3239,6 +3915,9 @@
"minItems": 1,
"type": "array"
},
"credentials": {
"$ref": "#/definitions/ThirdPartyCredentialsJsonConfig"
},
"dryRun": {
"default": "undefined",
"description": "Use this option to override the `dryRun` setting for all `Checks`",
@@ -3248,6 +3927,10 @@
],
"type": "boolean"
},
"filterCriteriaDefaults": {
"$ref": "#/definitions/FilterCriteriaDefaults",
"description": "Set the default filter criteria for all checks. If this property is specified it will override any defaults passed from the bot's config\n\nDefault behavior is to exclude all mods and automoderator from checks"
},
"footer": {
"anyOf": [
{

View File

@@ -1,6 +1,189 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"AuthorCriteria": {
"additionalProperties": false,
"description": "Criteria with which to test against the author of an Activity. The outcome of the test is based on:\n\n1. All present properties passing and\n2. If a property is a list then any value from the list matching",
"examples": [
{
"flairText": [
"Contributor",
"Veteran"
],
"isMod": true,
"name": [
"FoxxMD",
"AnotherUser"
]
}
],
"minProperties": 1,
"properties": {
"age": {
"description": "Test the age of the Author's account (when it was created) against this comparison\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>`\n\n* EX `> 100 days` => Passes if Author's account is older than 100 days\n* EX `<= 2 months` => Passes if Author's account is younger than or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days?|weeks?|months?|years?|hours?|minutes?|seconds?|milliseconds?)\\s*$",
"type": "string"
},
"commentKarma": {
"description": "A string containing a comparison operator and a value to compare karma against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign]`\n\n* EX `> 100` => greater than 100 comment karma\n* EX `<= 75%` => comment karma is less than or equal to 75% of **all karma**",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
"description": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "An (array of) string/regular expression to test contents of an Author's profile description against\n\nIf no flags are specified then the **insensitive** flag is used by default\n\nIf using an array then if **any** value in the array passes the description test passes",
"examples": [
[
"/test$/i",
"look for this string literal"
]
]
},
"flairCssClass": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair css class (or list of) from the subreddit to match against",
"examples": [
"red"
]
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair template id (or list of) from the subreddit to match against"
},
"flairText": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair text value (or list of) from the subreddit to match against",
"examples": [
"Approved"
]
},
"isMod": {
"description": "Is the author a moderator?",
"type": "boolean"
},
"linkKarma": {
"description": "A string containing a comparison operator and a value to compare link karma against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign]`\n\n* EX `> 100` => greater than 100 link karma\n* EX `<= 75%` => link karma is less than or equal to 75% of **all karma**",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
"name": {
"description": "A list of reddit usernames (case-insensitive) to match against. Do not include the \"u/\" prefix\n\n EX to match against /u/FoxxMD and /u/AnotherUser use [\"FoxxMD\",\"AnotherUser\"]",
"examples": [
"FoxxMD",
"AnotherUser"
],
"items": {
"type": "string"
},
"type": "array"
},
"shadowBanned": {
"description": "Is the author shadowbanned?\n\nThis is determined by trying to retrieve the author's profile. If a 404 is returned it is likely they are shadowbanned",
"type": "boolean"
},
"totalKarma": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
"userNotes": {
"description": "A list of UserNote properties to check against the User Notes attached to this Author in this Subreddit (must have Toolbox enabled and used User Notes at least once)",
"items": {
"$ref": "#/definitions/UserNoteCriteria"
},
"type": "array"
},
"verified": {
"description": "Does Author's account have a verified email?",
"type": "boolean"
}
},
"type": "object"
},
"AuthorOptions": {
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
"examples": [
{
"include": [
{
"flairText": [
"Contributor",
"Veteran"
]
},
{
"isMod": true
}
]
}
],
"properties": {
"exclude": {
"description": "Only runs if `include` is not present. Each AuthorCriteria is comprised of conditions that the Author being checked must \"not\" pass. See excludeCondition for set behavior\n\nEX: `isMod: true, name: Automoderator` => Will pass if the Author IS NOT a mod and IS NOT named Automoderator",
"items": {
"$ref": "#/definitions/AuthorCriteria"
},
"type": "array"
},
"excludeCondition": {
"default": "OR",
"description": "* OR => if ANY exclude condition \"does not\" pass then the exclude test passes\n* AND => if ALL exclude conditions \"do not\" pass then the exclude test passes\n\nDefaults to OR",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"include": {
"description": "Will \"pass\" if any set of AuthorCriteria passes",
"items": {
"$ref": "#/definitions/AuthorCriteria"
},
"type": "array"
}
},
"type": "object"
},
"BotConnection": {
"description": "Configuration required to connect to a CM Server",
"properties": {
@@ -19,6 +202,28 @@
],
"type": "object"
},
"BotCredentialsJsonConfig": {
"properties": {
"reddit": {
"$ref": "#/definitions/RedditCredentials"
},
"youtube": {
"properties": {
"apiKey": {
"type": "string"
}
},
"required": [
"apiKey"
],
"type": "object"
}
},
"required": [
"reddit"
],
"type": "object"
},
"BotInstanceJsonConfig": {
"description": "The configuration for an **individual reddit account** ContextMod will run as a bot.\n\nMultiple bot configs may be specified (one per reddit account).\n\n**NOTE:** If `bots` is not specified in a `FILE` then a default `bot` is generated using `ENV/ARG` values IE `CLIENT_ID`, etc...but if `bots` IS specified the default is not generated.",
"properties": {
@@ -27,18 +232,19 @@
"description": "Settings to configure the default caching behavior for this bot\n\nEvery setting not specified will default to what is specified by the global operator caching config"
},
"credentials": {
"$ref": "#/definitions/RedditCredentials",
"description": "Credentials required for the bot to interact with Reddit's API\n\nThese credentials will provided to both the API and Web interface unless otherwise specified with the `web.credentials` property\n\nRefer to the [required credentials table](https://github.com/FoxxMD/context-mod/blob/master/docs/operatorConfiguration.md#minimum-required-configuration) to see what is necessary to run the bot.",
"examples": [
"anyOf": [
{
"accessToken": "p75_1c467b2",
"clientId": "f4b4df1_9oiu",
"clientSecret": "34v5q1c564_yt7",
"redirectUri": "http://localhost:8085/callback",
"refreshToken": "34_f1w1v4"
"$ref": "#/definitions/RedditCredentials"
},
{
"$ref": "#/definitions/BotCredentialsJsonConfig"
}
]
},
"filterCriteriaDefaults": {
"$ref": "#/definitions/FilterCriteriaDefaults",
"description": "Define the default behavior for all filter criteria on all checks in all subreddits\n\nDefaults to exclude mods and automoderator from checks"
},
"name": {
"type": "string"
},
@@ -75,13 +281,36 @@
},
{
"properties": {
"shared": {
"anyOf": [
{
"items": {
"enum": [
"modqueue",
"newComm",
"newSub",
"unmoderated"
],
"type": "string"
},
"type": "array"
},
{
"enum": [
true
],
"type": "boolean"
}
],
"description": "Set which polling sources should be shared among subreddits using default polling settings for that source\n\n* For `unmoderated and `modqueue` the bot will poll on **r/mod** for new activities\n* For `newSub` and `newComm` all subreddits sharing the source will be combined to poll like **r/subreddit1+subreddit2/new**\n\nIf set to `true` all polling sources will be shared, otherwise specify which sourcs should be shared as a list"
},
"sharedMod": {
"default": false,
"description": "If set to `true` all subreddits polling unmoderated/modqueue with default polling settings will share a request to \"r/mod\"\notherwise each subreddit will poll its own mod view\n\n* ENV => `SHARE_MOD`\n* ARG => `--shareMod`",
"description": "DEPRECATED: See `shared`\n\n Using the ENV or ARG will sett `unmoderated` and `modqueue` on `shared`\n\n* ENV => `SHARE_MOD`\n* ARG => `--shareMod`",
"type": "boolean"
},
"stagger": {
"description": "If sharing a mod stream stagger pushing relevant Activities to individual subreddits.\n\nUseful when running many subreddits and rules are potentially cpu/memory/traffic heavy -- allows spreading out load",
"description": "If sharing a stream staggers pushing relevant Activities to individual subreddits.\n\nUseful when running many subreddits and rules are potentially cpu/memory/traffic heavy -- allows spreading out load",
"type": "number"
}
},
@@ -105,21 +334,8 @@
"type": "object"
},
"snoowrap": {
"description": "Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior",
"properties": {
"debug": {
"description": "Manually set the debug status for snoowrap\n\nWhen snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level\n\n* Set to `true` to always output\n* Set to `false` to never output\n\nIf not present or `null` will be set based on `logLevel`\n\n* ENV => `SNOO_DEBUG`\n* ARG => `--snooDebug`",
"type": "boolean"
},
"proxy": {
"description": "Proxy all requests to Reddit's API through this endpoint\n\n* ENV => `PROXY`\n* ARG => `--proxy <proxyEndpoint>`",
"examples": [
"http://localhost:4443"
],
"type": "string"
}
},
"type": "object"
"$ref": "#/definitions/SnoowrapOptions",
"description": "Settings to control some [Snoowrap](https://github.com/not-an-aardvark/snoowrap) behavior.\n\nOverrides any defaults provided at top-level operator config.\n\nSet to an empty object to \"ignore\" any top-level config"
},
"subreddits": {
"description": "Settings related to bot behavior for subreddits it is managing",
@@ -181,8 +397,7 @@
"type": "object"
},
"CacheOptions": {
"additionalProperties": {
},
"additionalProperties": {},
"description": "Configure granular settings for a cache provider with this object",
"properties": {
"auth_pass": {
@@ -247,6 +462,73 @@
],
"type": "string"
},
"CommentState": {
"description": "Different attributes a `Comment` can be in. Only include a property if you want to check it.",
"examples": [
{
"op": true,
"removed": false
}
],
"properties": {
"age": {
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"approved": {
"type": "boolean"
},
"deleted": {
"type": "boolean"
},
"depth": {
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"distinguished": {
"type": "boolean"
},
"filtered": {
"type": "boolean"
},
"locked": {
"type": "boolean"
},
"op": {
"description": "Is this Comment Author also the Author of the Submission this comment is in?",
"type": "boolean"
},
"removed": {
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
"score": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
"spam": {
"type": "boolean"
},
"stickied": {
"type": "boolean"
},
"submissionState": {
"description": "A list of SubmissionState attributes to test the Submission this comment is in",
"items": {
"$ref": "#/definitions/SubmissionState"
},
"type": "array"
}
},
"type": "object"
},
"DiscordProviderConfig": {
"properties": {
"name": {
@@ -269,6 +551,172 @@
],
"type": "object"
},
"FilterCriteriaDefaults": {
"properties": {
"authorIs": {
"$ref": "#/definitions/AuthorOptions",
"description": "Determine how authorIs defaults behave when authorIs is present on the check\n\n* merge => merges defaults with check's authorIs\n* replace => check authorIs will replace defaults (no defaults used)",
"examples": [
{
"include": [
{
"flairText": [
"Contributor",
"Veteran"
]
},
{
"isMod": true
}
]
}
]
},
"authorIsBehavior": {
"enum": [
"merge",
"replace"
],
"type": "string"
},
"itemIs": {
"anyOf": [
{
"items": {
"$ref": "#/definitions/SubmissionState"
},
"type": "array"
},
{
"items": {
"$ref": "#/definitions/CommentState"
},
"type": "array"
}
]
},
"itemIsBehavior": {
"description": "Determine how itemIs defaults behave when itemIs is present on the check\n\n* merge => adds defaults to check's itemIs\n* replace => check itemIs will replace defaults (no defaults used)",
"enum": [
"merge",
"replace"
],
"type": "string"
}
},
"type": "object"
},
"LoggingOptions": {
"properties": {
"console": {
"allOf": [
{
"$ref": "#/definitions/Pick<Transports.ConsoleTransportOptions,\"silent\"|\"eol\"|\"stderrLevels\"|\"consoleWarnLevels\">"
},
{
"properties": {
"level": {
"enum": [
"debug",
"error",
"info",
"verbose",
"warn"
],
"type": "string"
}
},
"type": "object"
}
],
"description": "Options for logging to console"
},
"file": {
"allOf": [
{
"$ref": "#/definitions/Omit<DailyRotateFileTransportOptions,\"stream\"|\"dirname\"|\"options\"|\"handleRejections\"|\"format\"|\"handleExceptions\"|\"log\"|\"logv\"|\"close\">"
},
{
"properties": {
"dirname": {
"description": "The absolute path to a directory where rotating log files should be stored.\n\n* If not present or `null` or `false` no log files will be created\n* If `true` logs will be stored at `[working directory]/logs`\n\n* ENV => `LOG_DIR`\n* ARG => `--logDir [dir]`",
"examples": [
"/var/log/contextmod"
],
"type": [
"null",
"string",
"boolean"
]
},
"level": {
"enum": [
"debug",
"error",
"info",
"verbose",
"warn"
],
"type": "string"
}
},
"type": "object"
}
],
"description": "Options for Rotating File logging"
},
"level": {
"default": "verbose",
"description": "The minimum log level to output. The log level set will output logs at its level **and all levels above it:**\n\n * `error`\n * `warn`\n * `info`\n * `verbose`\n * `debug`\n\n Note: `verbose` will display *a lot* of information on the status/result of run rules/checks/actions etc. which is very useful for testing configurations. Once your bot is stable changing the level to `info` will reduce log noise.\n\n * ENV => `LOG_LEVEL`\n * ARG => `--logLevel <level>`",
"enum": [
"debug",
"error",
"info",
"verbose",
"warn"
],
"examples": [
"verbose"
],
"type": "string"
},
"path": {
"description": "**DEPRECATED** - Use `file.dirname` instead\nThe absolute path to a directory where rotating log files should be stored.\n\n* If not present or `null` or `false` no log files will be created\n* If `true` logs will be stored at `[working directory]/logs`\n\n* ENV => `LOG_DIR`\n* ARG => `--logDir [dir]`",
"examples": [
"/var/log/contextmod"
],
"type": [
"null",
"string",
"boolean"
]
},
"stream": {
"allOf": [
{
"$ref": "#/definitions/Omit<DuplexTransportOptions,\"name\"|\"stream\"|\"handleRejections\"|\"format\"|\"handleExceptions\"|\"log\"|\"logv\"|\"close\">"
},
{
"properties": {
"level": {
"enum": [
"debug",
"error",
"info",
"verbose",
"warn"
],
"type": "string"
}
},
"type": "object"
}
],
"description": "Options for logging to api/web"
}
},
"type": "object"
},
"NotificationConfig": {
"properties": {
"events": {
@@ -334,6 +782,90 @@
],
"type": "object"
},
"Omit<DailyRotateFileTransportOptions,\"stream\"|\"dirname\"|\"options\"|\"handleRejections\"|\"format\"|\"handleExceptions\"|\"log\"|\"logv\"|\"close\">": {
"properties": {
"auditFile": {
"description": "A string representing the name of the name of the audit file. (default: './hash-audit.json')",
"type": "string"
},
"createSymlink": {
"description": "Create a tailable symlink to the current active log file. (default: false)",
"type": "boolean"
},
"datePattern": {
"description": "A string representing the moment.js date format to be used for rotating. The meta characters used in this string will dictate the frequency of the file rotation. For example, if your datePattern is simply 'HH' you will end up with 24 log files that are picked up and appended to every day. (default 'YYYY-MM-DD')",
"type": "string"
},
"eol": {
"type": "string"
},
"extension": {
"description": "A string representing an extension to be added to the filename, if not included in the filename property. (default: '')",
"type": "string"
},
"filename": {
"description": "Filename to be used to log to. This filename can include the %DATE% placeholder which will include the formatted datePattern at that point in the filename. (default: 'winston.log.%DATE%)",
"type": "string"
},
"frequency": {
"description": "A string representing the frequency of rotation. (default: 'custom')",
"type": "string"
},
"json": {
"type": "boolean"
},
"level": {
"type": "string"
},
"maxFiles": {
"description": "Maximum number of logs to keep. If not set, no logs will be removed. This can be a number of files or number of days. If using days, add 'd' as the suffix. (default: null)",
"type": [
"string",
"number"
]
},
"maxSize": {
"description": "Maximum size of the file after which it will rotate. This can be a number of bytes, or units of kb, mb, and gb. If using the units, add 'k', 'm', or 'g' as the suffix. The units need to directly follow the number. (default: null)",
"type": [
"string",
"number"
]
},
"silent": {
"type": "boolean"
},
"symlinkName": {
"description": "The name of the tailable symlink. (default: 'current.log')",
"type": "string"
},
"utc": {
"description": "A boolean whether or not to generate file name from \"datePattern\" in UTC format. (default: false)",
"type": "boolean"
},
"zippedArchive": {
"description": "A boolean to define whether or not to gzip archived log files. (default 'false')",
"type": "boolean"
}
},
"type": "object"
},
"Omit<DuplexTransportOptions,\"name\"|\"stream\"|\"handleRejections\"|\"format\"|\"handleExceptions\"|\"log\"|\"logv\"|\"close\">": {
"properties": {
"dump": {
"type": "boolean"
},
"eol": {
"type": "string"
},
"level": {
"type": "string"
},
"silent": {
"type": "boolean"
}
},
"type": "object"
},
"OperatorCacheConfig": {
"properties": {
"actionedEventsDefault": {
@@ -395,6 +927,17 @@
],
"description": "The cache provider and, optionally, a custom configuration for that provider\n\nIf not present or `null` provider will be `memory`.\n\nTo specify another `provider` but use its default configuration set this property to a string of one of the available providers: `memory`, `redis`, or `none`"
},
"selfTTL": {
"default": 50,
"description": "Amount of time, in seconds, an Activity that the bot has acted on or created will be ignored if found during polling\n\nThis is useful to prevent the bot from checking Activities it *just* worked on or a product of the checks. Examples:\n\n* Ignore comments created through an Action\n* Ignore Activity polled from modqueue that the bot just reported\n\nThis value should be at least as long as the longest polling interval for modqueue/newComm\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
"examples": [
50
],
"type": [
"number",
"boolean"
]
},
"submissionTTL": {
"default": 60,
"description": "Amount of time, in seconds, a submission should be cached\n\n* If `0` or `true` will cache indefinitely (not recommended)\n* If `false` will not cache",
@@ -442,6 +985,29 @@
},
"type": "object"
},
"Pick<Transports.ConsoleTransportOptions,\"silent\"|\"eol\"|\"stderrLevels\"|\"consoleWarnLevels\">": {
"properties": {
"consoleWarnLevels": {
"items": {
"type": "string"
},
"type": "array"
},
"eol": {
"type": "string"
},
"silent": {
"type": "boolean"
},
"stderrLevels": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"PollingDefaults": {
"properties": {
"delayUntil": {
@@ -510,6 +1076,183 @@
},
"type": "object"
},
"SnoowrapOptions": {
"properties": {
"debug": {
"description": "Manually set the debug status for snoowrap\n\nWhen snoowrap has `debug: true` it will log the http status response of reddit api requests to at the `debug` level\n\n* Set to `true` to always output\n* Set to `false` to never output\n\nIf not present or `null` will be set based on `logLevel`\n\n* ENV => `SNOO_DEBUG`\n* ARG => `--snooDebug`",
"type": "boolean"
},
"proxy": {
"description": "Proxy all requests to Reddit's API through this endpoint\n\n* ENV => `PROXY`\n* ARG => `--proxy <proxyEndpoint>`",
"examples": [
"http://localhost:4443"
],
"type": "string"
}
},
"type": "object"
},
"SubmissionState": {
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
"examples": [
{
"over_18": true,
"removed": false
}
],
"properties": {
"age": {
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"approved": {
"type": "boolean"
},
"deleted": {
"type": "boolean"
},
"distinguished": {
"type": "boolean"
},
"filtered": {
"type": "boolean"
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"is_self": {
"type": "boolean"
},
"link_flair_css_class": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"link_flair_text": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"locked": {
"type": "boolean"
},
"over_18": {
"description": "NSFW",
"type": "boolean"
},
"pinned": {
"type": "boolean"
},
"removed": {
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
"score": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
"spam": {
"type": "boolean"
},
"spoiler": {
"type": "boolean"
},
"stickied": {
"type": "boolean"
},
"title": {
"description": "A valid regular expression to match against the title of the submission",
"type": "string"
}
},
"type": "object"
},
"ThirdPartyCredentialsJsonConfig": {
"additionalProperties": {},
"properties": {
"youtube": {
"properties": {
"apiKey": {
"type": "string"
}
},
"required": [
"apiKey"
],
"type": "object"
}
},
"type": "object"
},
"UserNoteCriteria": {
"properties": {
"count": {
"default": ">= 1",
"description": "Number of occurrences of this type. Ignored if `search` is `current`\n\nA string containing a comparison operator and/or a value to compare number of occurrences against\n\nThe syntax is `(< OR > OR <= OR >=) <number>[percent sign] [ascending|descending]`",
"examples": [
">= 1"
],
"pattern": "^\\s*(?<opStr>>|>=|<|<=)\\s*(?<value>\\d+)\\s*(?<percent>%?)\\s*(?<extra>asc.*|desc.*)*$",
"type": "string"
},
"search": {
"default": "current",
"description": "How to test the notes for this Author:\n\n### current\n\nOnly the most recent note is checked for `type`\n\n### total\n\nThe `count` comparison of `type` must be found within all notes\n\n* EX `count: > 3` => Must have more than 3 notes of `type`, total\n* EX `count: <= 25%` => Must have 25% or less of notes of `type`, total\n\n### consecutive\n\nThe `count` **number** of `type` notes must be found in a row.\n\nYou may also specify the time-based order in which to search the notes by specifying `ascending (asc)` or `descending (desc)` in the `count` value. Default is `descending`\n\n* EX `count: >= 3` => Must have 3 or more notes of `type` consecutively, in descending order\n* EX `count: < 2` => Must have less than 2 notes of `type` consecutively, in descending order\n* EX `count: > 4 asc` => Must have greater than 4 notes of `type` consecutively, in ascending order",
"enum": [
"consecutive",
"current",
"total"
],
"examples": [
"current"
],
"type": "string"
},
"type": {
"description": "User Note type key to search for",
"examples": [
"spamwarn"
],
"type": "string"
}
},
"required": [
"type"
],
"type": "object"
},
"WebCredentials": {
"description": "Separate credentials for the web interface can be provided when also running the api.\n\nAll properties not specified will default to values given in ENV/ARG credential properties\n\nRefer to the [required credentials table](https://github.com/FoxxMD/context-mod/blob/master/docs/operatorConfiguration.md#minimum-required-configuration) to see what is necessary for the web interface.",
"examples": [
@@ -579,33 +1322,12 @@
"$ref": "#/definitions/OperatorCacheConfig",
"description": "Settings to configure the default caching behavior globally\n\nThese settings will be used by each bot, and subreddit, that does not specify their own"
},
"credentials": {
"$ref": "#/definitions/ThirdPartyCredentialsJsonConfig"
},
"logging": {
"description": "Settings to configure global logging defaults",
"properties": {
"level": {
"default": "verbose",
"description": "The minimum log level to output. The log level set will output logs at its level **and all levels above it:**\n\n * `error`\n * `warn`\n * `info`\n * `verbose`\n * `debug`\n\n Note: `verbose` will display *a lot* of information on the status/result of run rules/checks/actions etc. which is very useful for testing configurations. Once your bot is stable changing the level to `info` will reduce log noise.\n\n * ENV => `LOG_LEVEL`\n * ARG => `--logLevel <level>`",
"enum": [
"debug",
"error",
"info",
"verbose",
"warn"
],
"examples": [
"verbose"
],
"type": "string"
},
"path": {
"description": "The absolute path to a directory where rotating log files should be stored.\n\n* If not present or `null` no log files will be created\n* If `true` logs will be stored at `[working directory]/logs`\n\n* ENV => `LOG_DIR`\n* ARG => `--logDir [dir]`",
"examples": [
"/var/log/contextmod"
],
"type": "string"
}
},
"type": "object"
"$ref": "#/definitions/LoggingOptions",
"description": "Settings to configure global logging defaults"
},
"mode": {
"default": "all",
@@ -654,6 +1376,10 @@
},
"type": "object"
},
"snoowrap": {
"$ref": "#/definitions/SnoowrapOptions",
"description": "Set global snoowrap options as well as default snoowrap config for all bots that don't specify their own"
},
"web": {
"description": "Settings for the web interface",
"properties": {

View File

@@ -19,6 +19,9 @@
{
"$ref": "#/definitions/RegexRuleJSONConfig"
},
{
"$ref": "#/definitions/RepostRuleJSONConfig"
},
{
"type": "string"
}
@@ -181,8 +184,7 @@
"default": "undefined",
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) rule will only aggregate on `link` and `media` (ignores reddit-hosted content and self-posts)",
"examples": [
[
]
[]
],
"items": {
"enum": [
@@ -215,8 +217,7 @@
},
"domains": {
"default": [
[
]
[]
],
"description": "A list of domains whose Activities will be tested against `threshold`.\n\nThe values are tested as partial strings so you do not need to include full URLs, just the part that matters.\n\nEX `[\"youtube\"]` will match submissions with the domain `https://youtube.com/c/aChannel`\nEX `[\"youtube.com/c/bChannel\"]` will NOT match submissions with the domain `https://youtube.com/c/aChannel`\n\nIf you wish to aggregate on self-posts for a subreddit use the syntax `self.[subreddit]` EX `self.AskReddit`\n\n**If this Rule is part of a Check for a Submission and you wish to aggregate on the domain of the Submission use the special string `AGG:SELF`**\n\nIf nothing is specified or list is empty (default) aggregate using `aggregateOn`",
"items": {
@@ -451,24 +452,52 @@
]
},
"flairCssClass": {
"description": "A list of (user) flair css class values from the subreddit to match against",
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair css class (or list of) from the subreddit to match against",
"examples": [
"red"
]
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"items": {
"type": "string"
},
"type": "array"
"description": "A (user) flair template id (or list of) from the subreddit to match against"
},
"flairText": {
"description": "A list of (user) flair text values from the subreddit to match against",
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair text value (or list of) from the subreddit to match against",
"examples": [
"Approved"
],
"items": {
"type": "string"
},
"type": "array"
]
},
"isMod": {
"description": "Is the author a moderator?",
@@ -532,12 +561,21 @@
],
"properties": {
"exclude": {
"description": "Only runs if `include` is not present. Will \"pass\" if any of set of the AuthorCriteria **does not** pass",
"description": "Only runs if `include` is not present. Each AuthorCriteria is comprised of conditions that the Author being checked must \"not\" pass. See excludeCondition for set behavior\n\nEX: `isMod: true, name: Automoderator` => Will pass if the Author IS NOT a mod and IS NOT named Automoderator",
"items": {
"$ref": "#/definitions/AuthorCriteria"
},
"type": "array"
},
"excludeCondition": {
"default": "OR",
"description": "* OR => if ANY exclude condition \"does not\" pass then the exclude test passes\n* AND => if ALL exclude conditions \"do not\" pass then the exclude test passes\n\nDefaults to OR",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"include": {
"description": "Will \"pass\" if any set of AuthorCriteria passes",
"items": {
@@ -641,6 +679,11 @@
"deleted": {
"type": "boolean"
},
"depth": {
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"distinguished": {
"type": "boolean"
},
@@ -658,7 +701,7 @@
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
@@ -987,6 +1030,76 @@
},
"type": "object"
},
"OccurredAt": {
"properties": {
"condition": {
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"testOn": {
"$ref": "#/definitions/TimeBasedSelector",
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
}
},
"required": [
"condition",
"testOn"
],
"type": "object"
},
"OccurrenceTests": {
"properties": {
"count": {
"properties": {
"condition": {
"enum": [
"AND",
"OR"
],
"type": "string"
},
"test": {
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
"items": {
"type": "string"
},
"type": "array"
}
},
"required": [
"test"
],
"type": "object"
},
"time": {
"description": "Test the time the reposts occurred at",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"test": {
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
"items": {
"$ref": "#/definitions/OccurredAt"
},
"type": "array"
}
},
"required": [
"test"
],
"type": "object"
}
},
"type": "object"
},
"RecentActivityRuleJSONConfig": {
"description": "Checks a user's history for any Activity (Submission/Comment) in the subreddits specified in thresholds\n\nAvailable data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):\n\n```\nsummary => comma-deliminated list of subreddits that hit the threshold and their count EX subredditA(1), subredditB(4),...\nsubCount => Total number of subreddits that hit the threshold\ntotalCount => Total number of all activity occurrences in subreddits\n```",
"properties": {
@@ -1070,7 +1183,7 @@
},
"useSubmissionAsReference": {
"default": true,
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
"type": "boolean"
},
"window": {
@@ -1347,6 +1460,11 @@
}
]
},
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"exclude": {
"description": "If present, activities will be counted only if they are **NOT** found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
"examples": [
@@ -1437,9 +1555,14 @@
],
"type": "string"
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"minWordCount": {
"default": 1,
"description": "For activities that are text-based this is the minimum number of words required for the activity to be considered for a repeat\n\nEX if `minimumWordCount=5` and a comment is `what about you` then it is ignored because `3 is less than 5`\n\n**For self-text submissions** -- title + body text\n\n**For comments* -- body text",
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"name": {
@@ -1455,6 +1578,13 @@
"description": "The number of repeat submissions that will trigger the rule",
"type": "string"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"useSubmissionAsReference": {
"default": true,
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
@@ -1486,6 +1616,329 @@
],
"type": "object"
},
"RepostCriteria": {
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
"properties": {
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"maxExternalItems": {
"default": 50,
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
"type": "number"
},
"maxRedditItems": {
"default": 50,
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
"type": "number"
},
"minWordCount": {
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"occurredAt": {
"description": "Test the time the reposts occurred at",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
"items": {
"$ref": "#/definitions/OccurredAt"
},
"type": "array"
}
},
"required": [
"criteria"
],
"type": "object"
},
"occurrences": {
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"items": {
"$ref": "#/definitions/OccurrenceTests"
},
"type": "array"
}
},
"type": "object"
},
"searchOn": {
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
"items": {
"anyOf": [
{
"$ref": "#/definitions/SearchFacetJSONConfig"
},
{
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
}
]
},
"type": "array"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"transformationsActivity": {
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"window": {
"anyOf": [
{
"$ref": "#/definitions/ActivityWindowCriteria"
},
{
"$ref": "#/definitions/DurationObject"
},
{
"type": [
"string",
"number"
]
}
],
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
"examples": [
"90 days"
]
}
},
"type": "object"
},
"RepostRuleJSONConfig": {
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
"properties": {
"authorIs": {
"$ref": "#/definitions/AuthorOptions",
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
"examples": [
{
"include": [
{
"flairText": [
"Contributor",
"Veteran"
]
},
{
"isMod": true
}
]
}
]
},
"condition": {
"default": "OR",
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
"examples": [
{
"matchThreshold": "> 3",
"regex": "/reddit/"
}
],
"items": {
"$ref": "#/definitions/RepostCriteria"
},
"minItems": 1,
"type": "array"
},
"itemIs": {
"anyOf": [
{
"items": {
"$ref": "#/definitions/SubmissionState"
},
"type": "array"
},
{
"items": {
"$ref": "#/definitions/CommentState"
},
"type": "array"
}
],
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
},
"kind": {
"description": "The kind of rule to run",
"enum": [
"repost"
],
"examples": [
"repost"
],
"type": "string"
},
"name": {
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
"examples": [
"myNewRule"
],
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
"type": "string"
}
},
"required": [
"kind"
],
"type": "object"
},
"SearchAndReplaceRegExp": {
"properties": {
"replace": {
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
"type": "string"
},
"search": {
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
"examples": [
"find this string",
"/some string*/ig"
],
"type": "string"
}
},
"required": [
"replace",
"search"
],
"type": "object"
},
"SearchFacetJSONConfig": {
"properties": {
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"kind": {
"anyOf": [
{
"items": {
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
},
"type": "array"
},
{
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
}
]
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"minWordCount": {
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"transformationsActivity": {
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"window": {
"anyOf": [
{
"$ref": "#/definitions/ActivityWindowCriteria"
},
{
"$ref": "#/definitions/DurationObject"
},
{
"type": [
"string",
"number"
]
}
],
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
"examples": [
"90 days"
]
}
},
"required": [
"kind"
],
"type": "object"
},
"SubmissionState": {
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
"examples": [
@@ -1512,14 +1965,47 @@
"filtered": {
"type": "boolean"
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"is_self": {
"type": "boolean"
},
"link_flair_css_class": {
"type": "string"
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"link_flair_text": {
"type": "string"
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"locked": {
"type": "boolean"
@@ -1535,7 +2021,7 @@
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
@@ -1568,6 +2054,9 @@
}
],
"properties": {
"isUserProfile": {
"type": "boolean"
},
"name": {
"anyOf": [
{
@@ -1598,6 +2087,15 @@
},
"type": "object"
},
"TimeBasedSelector": {
"enum": [
"all",
"any",
"newest",
"oldest"
],
"type": "string"
},
"UserNoteCriteria": {
"properties": {
"count": {

View File

@@ -158,8 +158,7 @@
"default": "undefined",
"description": "This list determines which categories of domains should be aggregated on. All aggregated domains will be tested against `threshold`\n\n* If `media` is included then aggregate author's submission history which reddit recognizes as media (youtube, vimeo, etc.)\n* If `redditMedia` is included then aggregate on author's submissions history which are media hosted on reddit: galleries, videos, and images (i.redd.it / v.redd.it)\n* If `self` is included then aggregate on author's submission history which are self-post (`self.[subreddit]`) or domain is `reddit.com`\n* If `link` is included then aggregate author's submission history which is external links and not recognized as `media` by reddit\n\nIf nothing is specified or list is empty (default) rule will only aggregate on `link` and `media` (ignores reddit-hosted content and self-posts)",
"examples": [
[
]
[]
],
"items": {
"enum": [
@@ -192,8 +191,7 @@
},
"domains": {
"default": [
[
]
[]
],
"description": "A list of domains whose Activities will be tested against `threshold`.\n\nThe values are tested as partial strings so you do not need to include full URLs, just the part that matters.\n\nEX `[\"youtube\"]` will match submissions with the domain `https://youtube.com/c/aChannel`\nEX `[\"youtube.com/c/bChannel\"]` will NOT match submissions with the domain `https://youtube.com/c/aChannel`\n\nIf you wish to aggregate on self-posts for a subreddit use the syntax `self.[subreddit]` EX `self.AskReddit`\n\n**If this Rule is part of a Check for a Submission and you wish to aggregate on the domain of the Submission use the special string `AGG:SELF`**\n\nIf nothing is specified or list is empty (default) aggregate using `aggregateOn`",
"items": {
@@ -428,24 +426,52 @@
]
},
"flairCssClass": {
"description": "A list of (user) flair css class values from the subreddit to match against",
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair css class (or list of) from the subreddit to match against",
"examples": [
"red"
]
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"items": {
"type": "string"
},
"type": "array"
"description": "A (user) flair template id (or list of) from the subreddit to match against"
},
"flairText": {
"description": "A list of (user) flair text values from the subreddit to match against",
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
],
"description": "A (user) flair text value (or list of) from the subreddit to match against",
"examples": [
"Approved"
],
"items": {
"type": "string"
},
"type": "array"
]
},
"isMod": {
"description": "Is the author a moderator?",
@@ -509,12 +535,21 @@
],
"properties": {
"exclude": {
"description": "Only runs if `include` is not present. Will \"pass\" if any of set of the AuthorCriteria **does not** pass",
"description": "Only runs if `include` is not present. Each AuthorCriteria is comprised of conditions that the Author being checked must \"not\" pass. See excludeCondition for set behavior\n\nEX: `isMod: true, name: Automoderator` => Will pass if the Author IS NOT a mod and IS NOT named Automoderator",
"items": {
"$ref": "#/definitions/AuthorCriteria"
},
"type": "array"
},
"excludeCondition": {
"default": "OR",
"description": "* OR => if ANY exclude condition \"does not\" pass then the exclude test passes\n* AND => if ALL exclude conditions \"do not\" pass then the exclude test passes\n\nDefaults to OR",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"include": {
"description": "Will \"pass\" if any set of AuthorCriteria passes",
"items": {
@@ -618,6 +653,11 @@
"deleted": {
"type": "boolean"
},
"depth": {
"description": "The (nested) level of a comment.\n\n* 0 mean the comment is at top-level (replying to submission)\n* non-zero, Nth value means the comment has N parent comments",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"distinguished": {
"type": "boolean"
},
@@ -635,7 +675,7 @@
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
@@ -964,6 +1004,76 @@
},
"type": "object"
},
"OccurredAt": {
"properties": {
"condition": {
"description": "A duration and how to compare it against a value\n\nThe syntax is `(< OR > OR <= OR >=) <number> <unit>` EX `> 100 days`, `<= 2 months`\n\n* EX `> 100 days` => Passes if the date being compared is before 100 days ago\n* EX `<= 2 months` => Passes if the date being compared is after or equal to 2 months\n\nUnit must be one of [DayJS Duration units](https://day.js.org/docs/en/durations/creating)\n\n[See] https://regexr.com/609n8 for example",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(days|weeks|months|years|hours|minutes|seconds|milliseconds)\\s*$",
"type": "string"
},
"testOn": {
"$ref": "#/definitions/TimeBasedSelector",
"description": "Which repost to test on\n\n* `any` -- ANY repost passing `condition` will cause this criteria to be true\n* `all` -- ALL reposts must pass `condition` for this criteria to be true"
}
},
"required": [
"condition",
"testOn"
],
"type": "object"
},
"OccurrenceTests": {
"properties": {
"count": {
"properties": {
"condition": {
"enum": [
"AND",
"OR"
],
"type": "string"
},
"test": {
"description": "An array of strings containing a comparison operator and the number of repost occurrences to compare against\n\nExamples:\n\n* `\">= 7\"` -- TRUE if 7 or more reposts were found\n* `\"< 1\"` -- TRUE if less than 0 reposts were found",
"items": {
"type": "string"
},
"type": "array"
}
},
"required": [
"test"
],
"type": "object"
},
"time": {
"description": "Test the time the reposts occurred at",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"test": {
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
"items": {
"$ref": "#/definitions/OccurredAt"
},
"type": "array"
}
},
"required": [
"test"
],
"type": "object"
}
},
"type": "object"
},
"RecentActivityRuleJSONConfig": {
"description": "Checks a user's history for any Activity (Submission/Comment) in the subreddits specified in thresholds\n\nAvailable data for [Action templating](https://github.com/FoxxMD/context-mod#action-templating):\n\n```\nsummary => comma-deliminated list of subreddits that hit the threshold and their count EX subredditA(1), subredditB(4),...\nsubCount => Total number of subreddits that hit the threshold\ntotalCount => Total number of all activity occurrences in subreddits\n```",
"properties": {
@@ -1047,7 +1157,7 @@
},
"useSubmissionAsReference": {
"default": true,
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
"description": "When Activity is a submission should we only include activities that are other submissions with the same content?\n\n* When the Activity is a submission this defaults to **true**\n* When the Activity is a comment it is ignored (not relevant)",
"type": "boolean"
},
"window": {
@@ -1324,6 +1434,11 @@
}
]
},
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"exclude": {
"description": "If present, activities will be counted only if they are **NOT** found in this list of Subreddits\n\nEach value in the list can be either:\n\n * string (name of subreddit)\n * regular expression to run on the subreddit name\n * `SubredditState`\n\nEX `[\"mealtimevideos\",\"askscience\", \"/onlyfans*\\/i\", {\"over18\": true}]`",
"examples": [
@@ -1414,9 +1529,14 @@
],
"type": "string"
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"minWordCount": {
"default": 1,
"description": "For activities that are text-based this is the minimum number of words required for the activity to be considered for a repeat\n\nEX if `minimumWordCount=5` and a comment is `what about you` then it is ignored because `3 is less than 5`\n\n**For self-text submissions** -- title + body text\n\n**For comments* -- body text",
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"name": {
@@ -1432,6 +1552,13 @@
"description": "The number of repeat submissions that will trigger the rule",
"type": "string"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"useSubmissionAsReference": {
"default": true,
"description": "If activity is a Submission and is a link (not self-post) then only look at Submissions that contain this link, otherwise consider all activities.",
@@ -1463,6 +1590,329 @@
],
"type": "object"
},
"RepostCriteria": {
"description": "A set of criteria used to find reposts\n\nContains options and conditions used to define how candidate reposts are retrieved and if they are a match.",
"properties": {
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"maxExternalItems": {
"default": 50,
"description": "The maximum number of external items (youtube comments) to check (and cache for comment checks)",
"type": "number"
},
"maxRedditItems": {
"default": 50,
"description": "The maximum number of comments/submissions to check\n\nIn both cases this list is gathered from sorting all submissions or all comments from all submission by number of votes and taking the \"top\" maximum specified\n\nFor comment checks this is the number of comments cached",
"type": "number"
},
"minWordCount": {
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"occurredAt": {
"description": "Test the time the reposts occurred at",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"description": "An array of time-based conditions to test against found reposts (test when a repost was made)",
"items": {
"$ref": "#/definitions/OccurredAt"
},
"type": "array"
}
},
"required": [
"criteria"
],
"type": "object"
},
"occurrences": {
"description": "A set of comparisons to test against the number of reposts found\n\nIf not specified the default is \"AND [occurrences] > 0\" IE any reposts makes this test pass",
"properties": {
"condition": {
"default": "AND",
"description": "How to test all the specified comparisons\n\n* AND -- All criteria must be true\n* OR -- Any criteria must be true\n\nDefaults to AND",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"items": {
"$ref": "#/definitions/OccurrenceTests"
},
"type": "array"
}
},
"type": "object"
},
"searchOn": {
"description": "Define how to find candidate reposts\n\n* **title** -- search reddit for submissions with the same title\n* **url** -- search reddit for submissions with the same url\n* **external** -- WHEN ACTIVITY IS A COMMENT - tries to get comments from external source (youtube, twitter, etc...)",
"items": {
"anyOf": [
{
"$ref": "#/definitions/SearchFacetJSONConfig"
},
{
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
}
]
},
"type": "array"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"transformationsActivity": {
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"window": {
"anyOf": [
{
"$ref": "#/definitions/ActivityWindowCriteria"
},
{
"$ref": "#/definitions/DurationObject"
},
{
"type": [
"string",
"number"
]
}
],
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
"examples": [
"90 days"
]
}
},
"type": "object"
},
"RepostRuleJSONConfig": {
"description": "Search for reposts of a Submission or Comment\n\n* For submissions the title or URL can searched and matched against\n* For comments, candidate comments are gathered from similar reddit submissions and/or external sources (youtube, twitter, etc..) and then matched against",
"properties": {
"authorIs": {
"$ref": "#/definitions/AuthorOptions",
"description": "If present then these Author criteria are checked before running the rule. If criteria fails then the rule is skipped.",
"examples": [
{
"include": [
{
"flairText": [
"Contributor",
"Veteran"
]
},
{
"isMod": true
}
]
}
]
},
"condition": {
"default": "OR",
"description": "* If `OR` then any set of Criteria that pass will trigger the Rule\n* If `AND` then all Criteria sets must pass to trigger the Rule",
"enum": [
"AND",
"OR"
],
"type": "string"
},
"criteria": {
"description": "A list of Regular Expressions and conditions under which tested Activity(ies) are matched",
"examples": [
{
"matchThreshold": "> 3",
"regex": "/reddit/"
}
],
"items": {
"$ref": "#/definitions/RepostCriteria"
},
"minItems": 1,
"type": "array"
},
"itemIs": {
"anyOf": [
{
"items": {
"$ref": "#/definitions/SubmissionState"
},
"type": "array"
},
{
"items": {
"$ref": "#/definitions/CommentState"
},
"type": "array"
}
],
"description": "A list of criteria to test the state of the `Activity` against before running the Rule.\n\nIf any set of criteria passes the Rule will be run. If the criteria fails then the Rule is skipped."
},
"kind": {
"description": "The kind of rule to run",
"enum": [
"repost"
],
"examples": [
"repost"
],
"type": "string"
},
"name": {
"description": "An optional, but highly recommended, friendly name for this rule. If not present will default to `kind`.\n\nCan only contain letters, numbers, underscore, spaces, and dashes\n\nname is used to reference Rule result data during Action content templating. See CommentAction or ReportAction for more details.",
"examples": [
"myNewRule"
],
"pattern": "^[a-zA-Z]([\\w -]*[\\w])?$",
"type": "string"
}
},
"required": [
"kind"
],
"type": "object"
},
"SearchAndReplaceRegExp": {
"properties": {
"replace": {
"description": "The replacement string/value to use when search is found\n\nThis can be a literal string like `'replace with this`, an empty string to remove the search value (`''`), or a special regex value\n\nSee replacement here for more information: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace",
"type": "string"
},
"search": {
"description": "The search value to test for\n\nCan be a normal string (converted to a case-sensitive literal) or a valid regular expression\n\nEX `[\"find this string\", \"/some string*\\/ig\"]`",
"examples": [
"find this string",
"/some string*/ig"
],
"type": "string"
}
},
"required": [
"replace",
"search"
],
"type": "object"
},
"SearchFacetJSONConfig": {
"properties": {
"caseSensitive": {
"default": false,
"description": "Should text matching be case sensitive?\n\nDefaults to false",
"type": "boolean"
},
"kind": {
"anyOf": [
{
"items": {
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
},
"type": "array"
},
{
"enum": [
"crossposts",
"duplicates",
"external",
"title",
"url"
],
"type": "string"
}
]
},
"matchScore": {
"default": 85,
"description": "The percentage, as a whole number, of a repost title/comment that must match the title/comment being checked in order to consider both a match\n\nNote: Setting to 0 will make every candidate considered a match -- useful if you want to match if the URL has been reposted anywhere\n\nDefaults to `85` (85%)",
"type": "number"
},
"minWordCount": {
"default": 2,
"description": "The minimum number of words in the activity being checked for which this rule will run on\n\nIf the word count is below the minimum the rule fails\n\nDefaults to 2",
"type": "number"
},
"transformations": {
"description": "A set of search-and-replace operations to perform on text values before performing a match. Transformations are performed in the order they are defined.\n\n* If `transformationsActivity` IS NOT defined then these transformations will be performed on BOTH the activity text (submission title or comment) AND the repost candidate text\n* If `transformationsActivity` IS defined then these transformations are only performed on repost candidate text",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"transformationsActivity": {
"description": "Specify a separate set of transformations for the activity text (submission title or comment)\n\nTo perform no transformations when `transformations` is defined set this to an empty array (`[]`)",
"items": {
"$ref": "#/definitions/SearchAndReplaceRegExp"
},
"type": "array"
},
"window": {
"anyOf": [
{
"$ref": "#/definitions/ActivityWindowCriteria"
},
{
"$ref": "#/definitions/DurationObject"
},
{
"type": [
"string",
"number"
]
}
],
"description": "A value to define the range of Activities to retrieve.\n\nAcceptable values:\n\n**`ActivityWindowCriteria` object**\n\nAllows specify multiple range properties and more specific behavior\n\n**A `number` of Activities to retrieve**\n\n* EX `100` => 100 Activities\n\n*****\n\nAny of the below values that specify the amount of time to subtract from `NOW` to create a time range IE `NOW <---> [duration] ago`\n\nAcceptable values:\n\n**A `string` consisting of a value and a [Day.js](https://day.js.org/docs/en/durations/creating#list-of-all-available-units) time UNIT**\n\n* EX `9 days` => Range is `NOW <---> 9 days ago`\n\n**A [Day.js](https://day.js.org/docs/en/durations/creating) `object`**\n\n* EX `{\"days\": 90, \"minutes\": 15}` => Range is `NOW <---> 90 days and 15 minutes ago`\n\n**An [ISO 8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) `string`**\n\n* EX `PT15M` => 15 minutes => Range is `NOW <----> 15 minutes ago`",
"examples": [
"90 days"
]
}
},
"required": [
"kind"
],
"type": "object"
},
"SubmissionState": {
"description": "Different attributes a `Submission` can be in. Only include a property if you want to check it.",
"examples": [
@@ -1489,14 +1939,47 @@
"filtered": {
"type": "boolean"
},
"flairTemplate": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"is_self": {
"type": "boolean"
},
"link_flair_css_class": {
"type": "string"
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"link_flair_text": {
"type": "string"
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "string"
}
]
},
"locked": {
"type": "boolean"
@@ -1512,7 +1995,7 @@
"type": "boolean"
},
"reports": {
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 100` => greater than 100",
"description": "A string containing a comparison operator and a value to compare against\n\nThe syntax is `(< OR > OR <= OR >=) <number>`\n\n* EX `> 2` => greater than 2 total reports\n\nDefaults to TOTAL reports on an Activity. Suffix the value with the report type to check that type:\n\n* EX `> 3 mod` => greater than 3 mod reports\n* EX `>= 1 user` => greater than 1 user report",
"pattern": "^\\s*(>|>=|<|<=)\\s*(\\d+)\\s*(%?)(.*)$",
"type": "string"
},
@@ -1545,6 +2028,9 @@
}
],
"properties": {
"isUserProfile": {
"type": "boolean"
},
"name": {
"anyOf": [
{
@@ -1575,6 +2061,15 @@
},
"type": "object"
},
"TimeBasedSelector": {
"enum": [
"all",
"any",
"newest",
"oldest"
],
"type": "string"
},
"UserNoteCriteria": {
"properties": {
"count": {
@@ -1649,6 +2144,9 @@
{
"$ref": "#/definitions/RegexRuleJSONConfig"
},
{
"$ref": "#/definitions/RepostRuleJSONConfig"
},
{
"type": "string"
}

View File

@@ -1,24 +1,32 @@
import Snoowrap, {Comment, Subreddit} from "snoowrap";
import Snoowrap, {Comment, Subreddit, WikiPage} from "snoowrap";
import {Logger} from "winston";
import {SubmissionCheck} from "../Check/SubmissionCheck";
import {CommentCheck} from "../Check/CommentCheck";
import {
cacheStats, createHistoricalStatsDisplay,
cacheStats,
createHistoricalStatsDisplay,
createRetryHandler,
determineNewResults, findLastIndex, formatNumber,
mergeArr, parseFromJsonOrYamlToObject, pollingInfo, resultsSummary, sleep, totalFromMapStats, triggeredIndicator,
determineNewResults,
findLastIndex,
formatNumber, likelyJson5,
mergeArr,
parseFromJsonOrYamlToObject,
parseRedditEntity,
pollingInfo,
resultsSummary,
sleep,
totalFromMapStats,
triggeredIndicator,
} from "../util";
import {Poll} from "snoostorm";
import pEvent from "p-event";
import {RuleResult} from "../Rule";
import {ConfigBuilder, buildPollingOptions} from "../ConfigBuilder";
import {
ActionedEvent,
ActionResult,
DEFAULT_POLLING_INTERVAL,
DEFAULT_POLLING_LIMIT, Invokee,
DEFAULT_POLLING_LIMIT, FilterCriteriaDefaults, Invokee,
ManagerOptions, ManagerStateChangeOption, ManagerStats, PAUSED,
PollingOptionsStrong, ResourceStats, RUNNING, RunState, STOPPED, SYSTEM, USER
PollingOptionsStrong, PollOn, RUNNING, RunState, STOPPED, SYSTEM, USER
} from "../Common/interfaces";
import Submission from "snoowrap/dist/objects/Submission";
import {activityIsRemoved, itemContentPeek} from "../Utils/SnoowrapUtils";
@@ -38,9 +46,10 @@ import {queue, QueueObject} from 'async';
import {JSONConfig} from "../JsonConfig";
import {CheckStructuredJson} from "../Check";
import NotificationManager from "../Notification/NotificationManager";
import action from "../Web/Server/routes/authenticated/user/action";
import {createHistoricalDefaults, historicalDefaults} from "../Common/defaults";
import {ExtendedSnoowrap} from "../Utils/SnoowrapClients";
import {CMError, isRateLimitError, isStatusError} from "../Utils/Errors";
import {ErrorWithCause} from "pony-cause";
export interface RunningState {
state: RunState,
@@ -52,6 +61,7 @@ export interface runCheckOptions {
delayUntil?: number,
dryRun?: boolean,
refresh?: boolean,
force?: boolean,
}
export interface CheckTask {
@@ -61,7 +71,7 @@ export interface CheckTask {
}
export interface RuntimeManagerOptions extends ManagerOptions {
sharedModqueue?: boolean;
sharedStreams?: PollOn[];
wikiLocation?: string;
botName: string;
maxWorkers: number;
@@ -73,7 +83,7 @@ interface QueuedIdentifier {
state: 'queued' | 'processing'
}
export class Manager {
export class Manager extends EventEmitter {
subreddit: Subreddit;
client: ExtendedSnoowrap;
logger: Logger;
@@ -85,16 +95,17 @@ export class Manager {
wikiLocation: string;
lastWikiRevision?: DayjsObj
lastWikiCheck: DayjsObj = dayjs();
wikiFormat: ('yaml' | 'json') = 'yaml';
filterCriteriaDefaults?: FilterCriteriaDefaults
//wikiUpdateRunning: boolean = false;
streamListedOnce: string[] = [];
streams: SPoll<Snoowrap.Submission | Snoowrap.Comment>[] = [];
modStreamCallbacks: Map<string, any> = new Map();
streams: Map<string, SPoll<Snoowrap.Submission | Snoowrap.Comment>> = new Map();
sharedStreamCallbacks: Map<string, any> = new Map();
pollingRetryHandler: Function;
dryRun?: boolean;
sharedModqueue: boolean;
sharedStreams: PollOn[];
cacheManager: BotResourcesManager;
globalDryRun?: boolean;
emitter: EventEmitter = new EventEmitter();
queue: QueueObject<CheckTask>;
// firehose is used to ensure all activities from different polling streams are unique
// that is -- if the same activities is in both modqueue and unmoderated we don't want to process the activity twice or use stale data
@@ -130,6 +141,8 @@ export class Manager {
notificationManager: NotificationManager;
modPermissions?: string[]
// use by api nanny to slow event consumption
delayBy?: number;
@@ -182,7 +195,9 @@ export class Manager {
}
constructor(sub: Subreddit, client: ExtendedSnoowrap, logger: Logger, cacheManager: BotResourcesManager, opts: RuntimeManagerOptions = {botName: 'ContextMod', maxWorkers: 1}) {
const {dryRun, sharedModqueue = false, wikiLocation = 'botconfig/contextbot', botName, maxWorkers} = opts;
super();
const {dryRun, sharedStreams = [], wikiLocation = 'botconfig/contextbot', botName, maxWorkers, filterCriteriaDefaults} = opts;
this.displayLabel = opts.nickname || `${sub.display_name_prefixed}`;
const getLabels = this.getCurrentLabels;
const getDisplay = this.getDisplay;
@@ -198,7 +213,9 @@ export class Manager {
}, mergeArr);
this.globalDryRun = dryRun;
this.wikiLocation = wikiLocation;
this.sharedModqueue = sharedModqueue;
this.filterCriteriaDefaults = filterCriteriaDefaults;
this.sharedStreams = sharedStreams;
this.pollingRetryHandler = createRetryHandler({maxRequestRetry: 3, maxOtherRetry: 2}, this.logger);
this.subreddit = sub;
this.client = client;
this.botName = botName;
@@ -253,6 +270,18 @@ export class Manager {
})(this), 10000);
}
protected async getModPermissions(): Promise<string[]> {
if(this.modPermissions !== undefined) {
return this.modPermissions as string[];
}
this.logger.debug('Retrieving mod permissions for bot');
const userInfo = parseRedditEntity(this.botName, 'user');
const mods = this.subreddit.getModerators({name: userInfo.name});
// @ts-ignore
this.modPermissions = mods[0].mod_permissions;
return this.modPermissions as string[];
}
protected getMaxWorkers(subMaxWorkers?: number) {
let maxWorkers = this.globalMaxWorkers;
@@ -331,7 +360,7 @@ export class Manager {
return q;
}
protected async parseConfigurationFromObject(configObj: object) {
protected async parseConfigurationFromObject(configObj: object, suppressChangeEvent: boolean = false) {
try {
const configBuilder = new ConfigBuilder({logger: this.logger});
const validJson = configBuilder.validateJson(configObj);
@@ -339,6 +368,7 @@ export class Manager {
const {
polling = [{pollOn: 'unmoderated', limit: DEFAULT_POLLING_LIMIT, interval: DEFAULT_POLLING_INTERVAL}],
caching,
credentials,
dryRun,
footer,
nickname,
@@ -379,6 +409,7 @@ export class Manager {
logger: this.logger,
subreddit: this.subreddit,
caching,
credentials,
client: this.client,
};
this.resources = await this.cacheManager.set(this.subreddit.display_name, resourceConfig);
@@ -389,7 +420,11 @@ export class Manager {
const commentChecks: Array<CommentCheck> = [];
const subChecks: Array<SubmissionCheck> = [];
const structuredChecks = configBuilder.parseToStructured(validJson);
const structuredChecks = configBuilder.parseToStructured(validJson, this.filterCriteriaDefaults);
// TODO check that bot has permissions for subreddit for all specified actions
// can find permissions in this.subreddit.mod_permissions
for (const jCheck of structuredChecks) {
const checkConfig = {
...jCheck,
@@ -415,22 +450,71 @@ export class Manager {
this.logger.info(checkSummary);
}
this.validConfigLoaded = true;
} catch (err) {
if(this.eventsState.state === RUNNING) {
// need to update polling, potentially
await this.buildPolling();
for(const stream of this.streams.values()) {
if(!stream.running) {
this.logger.debug(`Starting Polling for ${stream.name.toUpperCase()} ${stream.frequency / 1000}s interval`);
stream.startInterval();
}
}
}
if(!suppressChangeEvent) {
this.emit('configChange');
}
} catch (err: any) {
this.validConfigLoaded = false;
throw err;
}
}
async parseConfiguration(causedBy: Invokee = 'system', force: boolean = false, options?: ManagerStateChangeOption) {
const {reason, suppressNotification = false} = options || {};
const {reason, suppressNotification = false, suppressChangeEvent = false} = options || {};
//this.wikiUpdateRunning = true;
this.lastWikiCheck = dayjs();
try {
let sourceData: string;
let wiki: WikiPage;
try {
// @ts-ignore
const wiki = await this.subreddit.getWikiPage(this.wikiLocation).fetch();
try {
// @ts-ignore
wiki = await this.subreddit.getWikiPage(this.wikiLocation).fetch();
} catch (err: any) {
if(isStatusError(err) && err.statusCode === 404) {
// see if we can create the page
if (!this.client.scope.includes('wikiedit')) {
throw new ErrorWithCause(`Page does not exist and could not be created because Bot does not have oauth permission 'wikiedit'`, {cause: err});
}
const modPermissions = await this.getModPermissions();
if (!modPermissions.includes('all') && !modPermissions.includes('wiki')) {
throw new ErrorWithCause(`Page does not exist and could not be created because Bot not have mod permissions for creating wiki pages. Must have 'all' or 'wiki'`, {cause: err});
}
if(!this.client.scope.includes('modwiki')) {
throw new ErrorWithCause(`Bot COULD create wiki config page but WILL NOT because it does not have the oauth permissions 'modwiki' which is required to set page visibility and editing permissions. Safety first!`, {cause: err});
}
// @ts-ignore
wiki = await this.subreddit.getWikiPage(this.wikiLocation).edit({
text: '',
reason: 'Empty configuration created for ContextMod'
});
this.logger.info(`Wiki page at ${this.wikiLocation} did not exist so bot created it!`);
// 0 = use subreddit wiki permissions
// 1 = only approved wiki contributors
// 2 = only mods may edit and view
// @ts-ignore
await this.subreddit.getWikiPage(this.wikiLocation).editSettings({
permissionLevel: 2,
// don't list this page on r/[subreddit]/wiki/pages
listed: false,
});
this.logger.info('Bot set wiki page visibility to MODS ONLY');
} else {
throw err;
}
}
const revisionDate = dayjs.unix(wiki.revision_date);
if (!force && this.validConfigLoaded && (this.lastWikiRevision !== undefined && this.lastWikiRevision.isSame(revisionDate))) {
// nothing to do, we already have this revision
@@ -456,27 +540,38 @@ export class Manager {
this.lastWikiRevision = revisionDate;
sourceData = await wiki.content_md;
} catch (err) {
const msg = `Could not read wiki configuration. Please ensure the page https://reddit.com${this.subreddit.url}wiki/${this.wikiLocation} exists and is readable -- error: ${err.message}`;
this.logger.error(msg);
throw new ConfigParseError(msg);
} catch (err: any) {
let hint = '';
if(isStatusError(err) && err.statusCode === 403) {
hint = ` -- HINT: Either the page is restricted to mods only and the bot's reddit account does have the mod permission 'all' or 'wiki' OR the bot does not have the 'wikiread' oauth permission`;
}
const msg = `Could not read wiki configuration. Please ensure the page https://reddit.com${this.subreddit.url}wiki/${this.wikiLocation} exists and is readable${hint}`;
throw new ErrorWithCause(msg, {cause: err});
}
if (sourceData === '') {
if (sourceData.replace('\r\n', '').trim() === '') {
this.logger.error(`Wiki page contents was empty`);
throw new ConfigParseError('Wiki page contents was empty');
}
const [configObj, jsonErr, yamlErr] = parseFromJsonOrYamlToObject(sourceData);
const [format, configObj, jsonErr, yamlErr] = parseFromJsonOrYamlToObject(sourceData);
this.wikiFormat = format;
if (configObj === undefined) {
this.logger.error(`Could not parse wiki page contents as JSON or YAML:`);
this.logger.error(jsonErr);
this.logger.error(yamlErr);
this.logger.error(`Could not parse wiki page contents as JSON or YAML. Looks like it should be ${this.wikiFormat}?`);
if (this.wikiFormat === 'json') {
this.logger.error(jsonErr);
this.logger.error('Check DEBUG output for yaml error');
this.logger.debug(yamlErr);
} else {
this.logger.error(yamlErr);
this.logger.error('Check DEBUG output for json error');
this.logger.debug(jsonErr);
}
throw new ConfigParseError('Could not parse wiki page contents as JSON or YAML')
}
await this.parseConfigurationFromObject(configObj);
await this.parseConfigurationFromObject(configObj.toJS(), suppressChangeEvent);
this.logger.info('Checks updated');
if(!suppressNotification) {
@@ -484,9 +579,13 @@ export class Manager {
}
return true;
} catch (err) {
} catch (err: any) {
const error = new ErrorWithCause('Failed to parse subreddit configuration', {cause: err});
// @ts-ignore
//error.logged = true;
this.logger.error(error);
this.validConfigLoaded = false;
throw err;
throw error;
}
}
@@ -494,6 +593,18 @@ export class Manager {
const checks = checkType === 'Comment' ? this.commentChecks : this.submissionChecks;
let item = activity;
const itemId = await item.id;
if(await this.resources.hasRecentSelf(item)) {
const {force = false} = options || {};
let recentMsg = `Found in Activities recently (last ${this.resources.selfTTL} seconds) modified/created by this bot`;
if(force) {
this.logger.debug(`${recentMsg} but will run anyway because "force" option was true.`);
} else {
this.logger.debug(`${recentMsg} so will skip running.`);
return;
}
}
let allRuleResults: RuleResult[] = [];
const itemIdentifier = `${checkType === 'Submission' ? 'SUB' : 'COM'} ${itemId}`;
this.currentLabels = [itemIdentifier];
@@ -502,50 +613,10 @@ export class Manager {
const [peek, _] = await itemContentPeek(item);
ePeek = peek;
this.logger.info(`<EVENT> ${peek}`);
} catch (err) {
} catch (err: any) {
this.logger.error(`Error occurred while generate item peek for ${checkType} Activity ${itemId}`, err);
}
const {
checkNames = [],
delayUntil,
dryRun,
refresh = false,
} = options || {};
let wasRefreshed = false;
if (delayUntil !== undefined) {
const created = dayjs.unix(item.created_utc);
const diff = dayjs().diff(created, 's');
if (diff < delayUntil) {
this.logger.verbose(`Delaying processing until Activity is ${delayUntil} seconds old (${delayUntil - diff}s)`);
await sleep(delayUntil - diff);
// @ts-ignore
item = await activity.refresh();
wasRefreshed = true;
}
}
// refresh signal from firehose if activity was ingested multiple times before processing or re-queued while processing
// want to make sure we have the most recent data
if(!wasRefreshed && refresh === true) {
this.logger.verbose('Refreshed data (probably due to signal from firehose)');
// @ts-ignore
item = await activity.refresh();
}
const startingApiLimit = this.client.ratelimitRemaining;
if (item instanceof Submission) {
if (await item.removed_by_category === 'deleted') {
this.logger.warn('Submission was deleted, cannot process.');
return;
}
} else if (item.author.name === '[deleted]') {
this.logger.warn('Comment was deleted, cannot process.');
return;
}
let checksRun = 0;
let actionsRun = 0;
let totalRulesRun = 0;
@@ -567,7 +638,48 @@ export class Manager {
let triggeredCheckName;
const checksRunNames = [];
const cachedCheckNames = [];
const startingApiLimit = this.client.ratelimitRemaining;
const {
checkNames = [],
delayUntil,
dryRun,
refresh = false,
} = options || {};
let wasRefreshed = false;
try {
if (delayUntil !== undefined) {
const created = dayjs.unix(item.created_utc);
const diff = dayjs().diff(created, 's');
if (diff < delayUntil) {
this.logger.verbose(`Delaying processing until Activity is ${delayUntil} seconds old (${delayUntil - diff}s)`);
await sleep(delayUntil - diff);
// @ts-ignore
item = await activity.refresh();
wasRefreshed = true;
}
}
// refresh signal from firehose if activity was ingested multiple times before processing or re-queued while processing
// want to make sure we have the most recent data
if(!wasRefreshed && refresh === true) {
this.logger.verbose('Refreshed data (probably due to signal from firehose)');
// @ts-ignore
item = await activity.refresh();
}
if (item instanceof Submission) {
if (await item.removed_by_category === 'deleted') {
this.logger.warn('Submission was deleted, cannot process.');
return;
}
} else if (item.author.name === '[deleted]') {
this.logger.warn('Comment was deleted, cannot process.');
return;
}
for (const check of checks) {
if (checkNames.length > 0 && !checkNames.map(x => x.toLowerCase()).some(x => x === check.name.toLowerCase())) {
this.logger.warn(`Check ${check.name} not in array of requested checks to run, skipping...`);
@@ -598,10 +710,11 @@ export class Manager {
this.logger.info('Check was triggered but cache result options specified NOT to run actions...counting as check NOT triggered');
triggered = false;
}
} catch (e) {
} catch (e: any) {
if (e.logged !== true) {
this.logger.warn(`Running rules for Check ${check.name} failed due to uncaught exception`, e);
}
this.emit('error', e);
}
if (triggered) {
@@ -614,6 +727,11 @@ export class Manager {
actionedEvent.ruleSummary = resultsSummary(currentResults, check.condition);
}
runActions = await check.runActions(item, currentResults.filter(x => x.triggered), dryRun);
// we only can about report and comment actions since those can produce items for newComm and modqueue
const recentCandidates = runActions.filter(x => ['report','comment'].includes(x.kind.toLocaleLowerCase())).map(x => x.touchedEntities === undefined ? [] : x.touchedEntities).flat();
for(const recent of recentCandidates) {
await this.resources.setRecentSelf(recent as (Submission|Comment));
}
actionsRun = runActions.length;
if(check.notifyOnTrigger) {
@@ -628,10 +746,11 @@ export class Manager {
this.logger.info('No checks triggered');
}
} catch (err) {
} catch (err: any) {
if (!(err instanceof LoggedError) && err.logged !== true) {
this.logger.error('An unhandled error occurred while running checks', err);
}
this.emit('error', err);
} finally {
try {
actionedEvent.actionResults = runActions;
@@ -642,7 +761,7 @@ export class Manager {
this.logger.verbose(`Run Stats: Checks ${checksRun} | Rules => Total: ${totalRulesRun} Unique: ${allRuleResults.length} Cached: ${totalRulesRun - allRuleResults.length} Rolling Avg: ~${formatNumber(this.rulesUniqueRollingAvg)}/s | Actions ${actionsRun}`);
this.logger.verbose(`Reddit API Stats: Initial ${startingApiLimit} | Current ${this.client.ratelimitRemaining} | Used ~${startingApiLimit - this.client.ratelimitRemaining} | Events ~${formatNumber(this.eventsRollingAvg)}/s`);
this.currentLabels = [];
} catch (err) {
} catch (err: any) {
this.logger.error('Error occurred while cleaning up Activity check and generating stats', err);
} finally {
this.resources.updateHistoricalStats({
@@ -660,126 +779,192 @@ export class Manager {
}
}
async buildPolling() {
// give current handle() time to stop
//await sleep(1000);
isPollingShared(streamName: string): boolean {
const pollOption = this.pollOptions.find(x => x.pollOn === streamName);
return pollOption !== undefined && pollOption.limit === DEFAULT_POLLING_LIMIT && pollOption.interval === DEFAULT_POLLING_INTERVAL && this.sharedStreams.includes(streamName as PollOn);
}
const retryHandler = createRetryHandler({maxRequestRetry: 5, maxOtherRetry: 1}, this.logger);
async buildPolling() {
const sources: PollOn[] = ['unmoderated', 'modqueue', 'newComm', 'newSub'];
const subName = this.subreddit.display_name;
for (const pollOpt of this.pollOptions) {
const {
pollOn,
limit,
interval,
delayUntil,
clearProcessed,
} = pollOpt;
let stream: SPoll<Snoowrap.Submission | Snoowrap.Comment>;
let modStreamType: string | undefined;
for (const source of sources) {
switch (pollOn) {
case 'unmoderated':
if (limit === DEFAULT_POLLING_LIMIT && interval === DEFAULT_POLLING_INTERVAL && this.sharedModqueue) {
modStreamType = 'unmoderated';
// use default mod stream from resources
stream = this.cacheManager.modStreams.get('unmoderated') as SPoll<Snoowrap.Submission | Snoowrap.Comment>;
} else {
stream = new UnmoderatedStream(this.client, {
subreddit: this.subreddit.display_name,
limit: limit,
pollTime: interval * 1000,
clearProcessed,
});
}
break;
case 'modqueue':
if (limit === DEFAULT_POLLING_LIMIT && interval === DEFAULT_POLLING_INTERVAL) {
modStreamType = 'modqueue';
// use default mod stream from resources
stream = this.cacheManager.modStreams.get('modqueue') as SPoll<Snoowrap.Submission | Snoowrap.Comment>;
} else {
stream = new ModQueueStream(this.client, {
subreddit: this.subreddit.display_name,
limit: limit,
pollTime: interval * 1000,
clearProcessed
});
}
break;
case 'newSub':
stream = new SubmissionStream(this.client, {
subreddit: this.subreddit.display_name,
limit: limit,
pollTime: interval * 1000,
clearProcessed
});
break;
case 'newComm':
stream = new CommentStream(this.client, {
subreddit: this.subreddit.display_name,
limit: limit,
pollTime: interval * 1000,
clearProcessed
});
break;
if (!sources.includes(source)) {
this.logger.error(`'${source}' is not a valid polling source. Valid sources: unmoderated | modqueue | newComm | newSub`);
continue;
}
stream.once('listing', async (listing) => {
if (!this.streamListedOnce.includes(pollOn)) {
// warning if poll event could potentially miss activities
if (this.commentChecks.length === 0 && ['unmoderated', 'modqueue', 'newComm'].some(x => x === pollOn)) {
this.logger.warn(`Polling '${pollOn}' may return Comments but no comments checks were configured.`);
}
if (this.submissionChecks.length === 0 && ['unmoderated', 'modqueue', 'newSub'].some(x => x === pollOn)) {
this.logger.warn(`Polling '${pollOn}' may return Submissions but no submission checks were configured.`);
}
this.streamListedOnce.push(pollOn);
const pollOpt = this.pollOptions.find(x => x.pollOn.toLowerCase() === source.toLowerCase());
if (pollOpt === undefined) {
if(this.sharedStreamCallbacks.has(source)) {
this.logger.debug(`Removing listener for shared polling on ${source.toUpperCase()} because it no longer exists in config`);
this.sharedStreamCallbacks.delete(source);
}
});
const onItem = async (item: Comment | Submission) => {
if (!this.streamListedOnce.includes(pollOn)) {
return;
const existingStream = this.streams.get(source);
if (existingStream !== undefined) {
this.logger.debug(`Stopping polling on ${source.toUpperCase()} because it no longer exists in config`);
existingStream.end();
this.streams.delete(source);
}
if (item.subreddit.display_name !== subName || this.eventsState.state !== RUNNING) {
return;
}
let checkType: 'Submission' | 'Comment' | undefined;
if (item instanceof Submission) {
if (this.submissionChecks.length > 0) {
checkType = 'Submission';
}
} else if (this.commentChecks.length > 0) {
checkType = 'Comment';
}
if (checkType !== undefined) {
this.firehose.push({checkType, activity: item, options: {delayUntil}})
}
};
if (modStreamType !== undefined) {
this.modStreamCallbacks.set(pollOn, onItem);
} else {
stream.on('item', onItem);
// @ts-ignore
stream.on('error', async (err: any) => {
this.logger.error('Polling error occurred', err);
const shouldRetry = await retryHandler(err);
if (shouldRetry) {
stream.startInterval();
} else {
this.logger.warn('Pausing event polling due to too many errors');
await this.pauseEvents();
const {
limit,
interval,
delayUntil,
} = pollOpt;
let stream: SPoll<Snoowrap.Submission | Snoowrap.Comment>;
let modStreamType: string | undefined;
switch (source) {
case 'unmoderated':
if (limit === DEFAULT_POLLING_LIMIT && interval === DEFAULT_POLLING_INTERVAL && this.sharedStreams.includes(source)) {
modStreamType = 'unmoderated';
// use default mod stream from resources
stream = this.cacheManager.modStreams.get('unmoderated') as SPoll<Snoowrap.Submission | Snoowrap.Comment>;
} else {
stream = new UnmoderatedStream(this.client, {
subreddit: this.subreddit.display_name,
limit: limit,
pollTime: interval * 1000,
logger: this.logger,
});
}
break;
case 'modqueue':
if (limit === DEFAULT_POLLING_LIMIT && interval === DEFAULT_POLLING_INTERVAL && this.sharedStreams.includes(source)) {
modStreamType = 'modqueue';
// use default mod stream from resources
stream = this.cacheManager.modStreams.get('modqueue') as SPoll<Snoowrap.Submission | Snoowrap.Comment>;
} else {
stream = new ModQueueStream(this.client, {
subreddit: this.subreddit.display_name,
limit: limit,
pollTime: interval * 1000,
logger: this.logger,
});
}
break;
case 'newSub':
if (limit === DEFAULT_POLLING_LIMIT && interval === DEFAULT_POLLING_INTERVAL && this.sharedStreams.includes(source)) {
modStreamType = 'newSub';
// use default mod stream from resources
stream = this.cacheManager.modStreams.get('newSub') as SPoll<Snoowrap.Submission | Snoowrap.Comment>;
} else {
stream = new SubmissionStream(this.client, {
subreddit: this.subreddit.display_name,
limit: limit,
pollTime: interval * 1000,
logger: this.logger,
});
}
break;
case 'newComm':
if (limit === DEFAULT_POLLING_LIMIT && interval === DEFAULT_POLLING_INTERVAL && this.sharedStreams.includes(source)) {
modStreamType = 'newComm';
// use default mod stream from resources
stream = this.cacheManager.modStreams.get('newComm') as SPoll<Snoowrap.Submission | Snoowrap.Comment>;
} else {
stream = new CommentStream(this.client, {
subreddit: this.subreddit.display_name,
limit: limit,
pollTime: interval * 1000,
logger: this.logger,
});
}
break;
}
if (stream === undefined) {
this.logger.error(`Should have found polling source for '${source}' but it did not exist for some reason!`);
continue;
}
const onItem = async (item: Comment | Submission) => {
if (item.subreddit.display_name !== subName || this.eventsState.state !== RUNNING) {
return;
}
});
this.streams.push(stream);
let checkType: 'Submission' | 'Comment' | undefined;
if (item instanceof Submission) {
if (this.submissionChecks.length > 0) {
checkType = 'Submission';
}
} else if (this.commentChecks.length > 0) {
checkType = 'Comment';
}
if (checkType !== undefined) {
this.firehose.push({checkType, activity: item, options: {delayUntil}})
}
};
if (modStreamType !== undefined) {
let removedOwn = false;
const existingStream = this.streams.get(source);
if(existingStream !== undefined) {
existingStream.end();
this.streams.delete(source);
removedOwn = true;
}
if(!this.sharedStreamCallbacks.has(source)) {
stream.once('listing', this.noChecksWarning(source));
this.logger.debug(`${removedOwn ? 'Stopped own polling and replace with ' : 'Set '}listener on shared polling ${source}`);
}
this.sharedStreamCallbacks.set(source, onItem);
} else {
let ownPollingMsgParts: string[] = [];
let removedShared = false;
if(this.sharedStreamCallbacks.has(source)) {
removedShared = true;
this.sharedStreamCallbacks.delete(source);
ownPollingMsgParts.push('removed shared polling listener');
}
const existingStream = this.streams.get(source);
let processed;
if (existingStream !== undefined) {
ownPollingMsgParts.push('replaced existing');
processed = existingStream.processed;
existingStream.end();
} else {
ownPollingMsgParts.push('create new');
stream.once('listing', this.noChecksWarning(source));
}
this.logger.debug(`Polling ${source.toUpperCase()} => ${ownPollingMsgParts.join('and')} dedicated stream`);
stream.on('item', onItem);
// @ts-ignore
stream.on('error', async (err: any) => {
this.emit('error', err);
const shouldRetry = await this.pollingRetryHandler(err);
if (shouldRetry) {
stream.startInterval(false, 'Within retry limits');
} else {
this.logger.warn('Stopping subreddit processing/polling due to too many errors');
await this.stop();
}
});
this.streams.set(source, stream);
}
}
}
}
noChecksWarning = (source: PollOn) => (listing: any) => {
if (this.commentChecks.length === 0 && ['modqueue', 'newComm'].some(x => x === source)) {
this.logger.warn(`Polling '${source.toUpperCase()}' may return Comments but no comments checks were configured.`);
}
if (this.submissionChecks.length === 0 && ['unmoderated', 'modqueue', 'newSub'].some(x => x === source)) {
this.logger.warn(`Polling '${source.toUpperCase()}' may return Submissions but no submission checks were configured.`);
}
}
startQueue(causedBy: Invokee = 'system', options?: ManagerStateChangeOption) {
const {reason, suppressNotification = false} = options || {};
if(this.queueState.state === RUNNING) {
@@ -856,10 +1041,19 @@ export class Manager {
} else {
const pauseWaitStart = dayjs();
this.logger.info(`Activity processing queue is stopping...waiting for ${this.queue.running()} activities to finish processing`);
const fullStopTime = dayjs().add(5, 'seconds');
let gracefulStop = true;
while (this.queue.running() > 0) {
gracefulStop = false;
if(dayjs().isAfter(fullStopTime)) {
break;
}
await sleep(1500);
this.logger.verbose(`Activity processing queue is stopping...waiting for ${this.queue.running()} activities to finish processing`);
}
if(!gracefulStop) {
this.logger.warn('Waited longer than 5 seconds to stop activities. Something isn\'t right so forcing stop :/ ');
}
this.logger.info(`Activity processing queue stopped by ${causedBy} and ${this.queue.length()} queued activities cleared (waited ${dayjs().diff(pauseWaitStart, 's')} seconds while activity processing finished)`);
this.firehose.kill();
this.queue.kill();
@@ -896,7 +1090,10 @@ export class Manager {
this.logger.warn('No submission or comment checks found!');
}
for (const s of this.streams) {
if (this.streams.size > 0) {
this.logger.debug(`Starting own streams => ${[...this.streams.values()].map(x => `${x.name.toUpperCase()} ${x.frequency / 1000}s interval`).join(' | ')}`)
}
for (const s of this.streams.values()) {
s.startInterval();
}
this.startedAt = dayjs();
@@ -921,7 +1118,7 @@ export class Manager {
state: PAUSED,
causedBy
};
for(const s of this.streams) {
for(const s of this.streams.values()) {
s.end();
}
if(causedBy === USER) {
@@ -938,15 +1135,10 @@ export class Manager {
stopEvents(causedBy: Invokee = 'system', options?: ManagerStateChangeOption) {
const {reason, suppressNotification = false} = options || {};
if(this.eventsState.state !== STOPPED) {
for (const s of this.streams) {
for (const s of this.streams.values()) {
s.end();
}
this.streams = [];
// for (const [k, v] of this.modStreamCallbacks) {
// const stream = this.cacheManager.modStreams.get(k) as Poll<Snoowrap.Submission | Snoowrap.Comment>;
// stream.removeListener('item', v);
// }
this.modStreamCallbacks = new Map();
this.streams = new Map();
this.startedAt = undefined;
this.logger.info(`Events STOPPED by ${causedBy}`);
this.eventsState = {

View File

@@ -1,108 +1,151 @@
import {Poll, SnooStormOptions} from "snoostorm"
import Snoowrap from "snoowrap";
import Snoowrap, {Listing} from "snoowrap";
import {EventEmitter} from "events";
import {PollConfiguration} from "snoostorm/out/util/Poll";
import {ClearProcessedOptions, DEFAULT_POLLING_INTERVAL} from "../Common/interfaces";
import dayjs, {Dayjs} from "dayjs";
import { Duration } from "dayjs/plugin/duration";
import {parseDuration, sleep} from "../util";
import setRandomInterval from 'set-random-interval';
import {DEFAULT_POLLING_INTERVAL} from "../Common/interfaces";
import {mergeArr, parseDuration, random} from "../util";
import { Logger } from "winston";
import {ErrorWithCause} from "pony-cause";
type Awaitable<T> = Promise<T> | T;
interface RCBPollingOptions extends SnooStormOptions {
interface RCBPollingOptions<T> extends SnooStormOptions {
subreddit: string,
clearProcessed?: ClearProcessedOptions
enforceContinuity?: boolean
logger: Logger
name?: string,
processed?: Set<T[keyof T]>
label?: string
}
interface RCBPollConfiguration<T> extends PollConfiguration<T> {
clearProcessed?: ClearProcessedOptions
interface RCBPollConfiguration<T> extends PollConfiguration<T>,RCBPollingOptions<T> {
get: () => Promise<Listing<T>>
}
export class SPoll<T extends object> extends Poll<T> {
identifier: keyof T;
getter: () => Awaitable<T[]>;
getter: () => Promise<Listing<T>>;
frequency;
running: boolean = false;
clearProcessedDuration?: Duration;
clearProcessedSize?: number;
clearProcessedAfter?: Dayjs;
retainProcessed: number = 0;
// intention of newStart is to make polling behavior such that only "new" items AFTER polling has started get emitted
// -- that is, we don't want to emit the items we immediately fetch on a fresh poll start since they existed "before" polling started
newStart: boolean = true;
enforceContinuity: boolean;
randInterval?: { clear: () => void };
name: string = 'Reddit Stream';
logger: Logger;
subreddit: string;
constructor(options: RCBPollConfiguration<T>) {
super(options);
this.identifier = options.identifier;
this.getter = options.get;
this.frequency = options.frequency;
const {
after,
size,
retain = 0,
} = options.clearProcessed || {};
if(after !== undefined) {
this.clearProcessedDuration = parseDuration(after);
}
this.clearProcessedSize = size;
this.retainProcessed = retain;
if (this.clearProcessedDuration !== undefined) {
this.clearProcessedAfter = dayjs().add(this.clearProcessedDuration.asSeconds(), 's');
identifier,
get,
frequency,
enforceContinuity = false,
logger,
name,
subreddit,
label = 'Polling',
processed
} = options;
this.subreddit = subreddit;
this.name = name !== undefined ? name : this.name;
this.logger = logger.child({labels: [label, this.name]}, mergeArr)
this.identifier = identifier;
this.getter = get;
this.frequency = frequency;
this.enforceContinuity = enforceContinuity;
// if we pass in processed on init the intention is to "continue" from where the previous stream left off
// WITHOUT new start behavior
if (processed !== undefined) {
this.processed = processed;
this.newStart = false;
}
clearInterval(this.interval);
}
startInterval = () => {
this.running = true;
this.randInterval = setRandomInterval((function (self) {
createInterval = () => {
this.interval = setTimeout((function (self) {
return async () => {
try {
// clear the tracked, processed activity ids after a set period or number of activities have been processed
// because when RCB is long-running and has streams from high-volume subreddits this list never gets smaller...
// so clear if after time period
if ((self.clearProcessedAfter !== undefined && dayjs().isSameOrAfter(self.clearProcessedAfter))
// or clear if processed list is larger than defined max allowable size (default setting, 2 * polling option limit)
|| (self.clearProcessedSize !== undefined && self.processed.size >= self.clearProcessedSize)) {
if (self.retainProcessed === 0) {
self.processed = new Set();
} else {
// retain some processed so we have continuity between processed list resets -- this is default behavior and retains polling option limit # of activities
// we can slice from the set here because ID order is guaranteed for Set object so list is oldest -> newest
// -- retain last LIMIT number of activities (or all if retain # is larger than list due to user config error)
self.processed = new Set(Array.from(self.processed).slice(Math.max(0, self.processed.size - self.retainProcessed)));
}
// reset time interval if there is one
if (self.clearProcessedAfter !== undefined && self.clearProcessedDuration !== undefined) {
self.clearProcessedAfter = dayjs().add(self.clearProcessedDuration.asSeconds(), 's');
}
}
const batch = await self.getter();
self.logger.debug('Polling...');
let batch = await self.getter();
const newItems: T[] = [];
for (const item of batch) {
const id = item[self.identifier];
if (self.processed.has(id)) continue;
let anyAlreadySeen = false;
let page = 1;
// initial iteration should always run
// but only continue iterating if stream enforces continuity and we've only seen new items so far
while(page === 1 || (self.enforceContinuity && !self.newStart && !anyAlreadySeen)) {
if(page !== 1) {
self.logger.debug(`Did not find any already seen activities and continuity is enforced. This probably means there were more new items than 1 api call can return. Fetching next page (${page})...`);
// @ts-ignore
batch = await batch.fetchMore({amount: 100});
}
if(batch.length === 0 || batch.isFinished) {
// if nothing is returned we don't want to end up in an endless loop!
anyAlreadySeen = true;
}
for (const item of batch) {
const id = item[self.identifier];
if (self.processed.has(id)) {
anyAlreadySeen = true;
continue;
}
// Emit for new items and add it to the list
newItems.push(item);
self.processed.add(id);
self.emit("item", item);
// Emit for new items and add it to the list
newItems.push(item);
self.processed.add(id);
// but don't emit on new start since we are "buffering" already existing activities
if(!self.newStart) {
self.emit("item", item);
}
}
page++;
}
// Emit the new listing of all new items
self.emit("listing", newItems);
} catch (err) {
const newItemMsg = `Found ${newItems.length} new items out of ${batch.length} returned`;
if(self.newStart) {
self.logger.debug(`${newItemMsg} but will ignore all on first start.`);
self.emit("listing", []);
} else {
self.logger.debug(newItemMsg);
// Emit the new listing of all new items
self.emit("listing", newItems);
}
// no longer new start on n+1 interval
self.newStart = false;
// if everything succeeded then create a new timeout
self.createInterval();
} catch (err: any) {
self.running = false;
self.logger.error(new ErrorWithCause('Polling Interval stopped due to error encountered', {cause: err}));
self.emit('error', err);
self.end();
}
}
})(this), this.frequency - 1, this.frequency + 1);
})(this), random(this.frequency - 1, this.frequency + 1));
}
end = () => {
this.running = false;
if(this.randInterval !== undefined) {
this.randInterval.clear();
// allow controlling newStart state
startInterval = (newStartState?: boolean, msg?: string) => {
this.running = true;
if(newStartState !== undefined) {
this.newStart = newStartState;
}
const startMsg = `Polling Interval Started${msg !== undefined ? `: ${msg}` : ''}`;
this.logger.debug(startMsg)
this.createInterval();
}
end = (reason?: string) => {
let msg ='Stopping Polling Interval';
if(reason !== undefined) {
msg += `: ${reason}`;
}
this.logger.debug(msg);
this.running = false;
this.newStart = true;
super.end();
}
}
@@ -110,12 +153,13 @@ export class SPoll<T extends object> extends Poll<T> {
export class UnmoderatedStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment> {
constructor(
client: Snoowrap,
options: RCBPollingOptions) {
options: RCBPollingOptions<Snoowrap.Submission | Snoowrap.Comment>) {
super({
frequency: options.pollTime || DEFAULT_POLLING_INTERVAL * 1000,
get: async () => client.getSubreddit(options.subreddit).getUnmoderated(options),
identifier: "id",
clearProcessed: options.clearProcessed
name: 'Unmoderated',
...options,
});
}
}
@@ -123,12 +167,13 @@ export class UnmoderatedStream extends SPoll<Snoowrap.Submission | Snoowrap.Comm
export class ModQueueStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment> {
constructor(
client: Snoowrap,
options: RCBPollingOptions) {
options: RCBPollingOptions<Snoowrap.Submission | Snoowrap.Comment>) {
super({
frequency: options.pollTime || DEFAULT_POLLING_INTERVAL * 1000,
get: async () => client.getSubreddit(options.subreddit).getModqueue(options),
identifier: "id",
clearProcessed: options.clearProcessed
name: 'Modqueue',
...options,
});
}
}
@@ -136,12 +181,13 @@ export class ModQueueStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment
export class SubmissionStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment> {
constructor(
client: Snoowrap,
options: RCBPollingOptions) {
options: RCBPollingOptions<Snoowrap.Submission | Snoowrap.Comment>) {
super({
frequency: options.pollTime || DEFAULT_POLLING_INTERVAL * 1000,
get: async () => client.getNew(options.subreddit, options),
identifier: "id",
clearProcessed: options.clearProcessed
name: 'Submission',
...options,
});
}
}
@@ -149,12 +195,13 @@ export class SubmissionStream extends SPoll<Snoowrap.Submission | Snoowrap.Comme
export class CommentStream extends SPoll<Snoowrap.Submission | Snoowrap.Comment> {
constructor(
client: Snoowrap,
options: RCBPollingOptions) {
options: RCBPollingOptions<Snoowrap.Submission | Snoowrap.Comment>) {
super({
frequency: options.pollTime || DEFAULT_POLLING_INTERVAL * 1000,
get: async () => client.getNewComments(options.subreddit, options),
identifier: "id",
clearProcessed: options.clearProcessed
name: 'Comment',
...options,
});
}
}

Some files were not shown because too many files have changed in this diff Show More