@@ -11,7 +11,9 @@ import { request, systemPrefix } from "@main/utils/utils";
11
11
import crypto from "crypto" ;
12
12
import { Attachment , hyperlink , inlineCode , spoiler } from "discord.js" ;
13
13
import { readFile , rm } from "fs/promises" ;
14
+ import { env } from "process" ;
14
15
import sharp from "sharp" ;
16
+ import undici from "undici" ;
15
17
import ModerationRuleHandlerContract , {
16
18
MessageRuleScope ,
17
19
RuleExecResult ,
@@ -23,6 +25,60 @@ import ModerationRuleHandlerContract, {
23
25
type MessageContext < T > = ModerationRuleContext < "message" , { type : T } > ;
24
26
type ProfileContext < T > = ModerationRuleContext < "profile" , { type : T } > ;
25
27
28
+ type GoogleClient = {
29
+ comments : {
30
+ analyze : (
31
+ params : unknown ,
32
+ callback : ( error : Error | null , response : unknown ) => void
33
+ ) => void ;
34
+ } ;
35
+ } ;
36
+
37
+ type GoogleResponse = {
38
+ attributeScores : {
39
+ TOXICITY : {
40
+ summaryScore : {
41
+ value : number ;
42
+ } ;
43
+ } ;
44
+ THREAT : {
45
+ summaryScore : {
46
+ value : number ;
47
+ } ;
48
+ } ;
49
+ SEVERE_TOXICITY : {
50
+ summaryScore : {
51
+ value : number ;
52
+ } ;
53
+ } ;
54
+ IDENTITY_ATTACK : {
55
+ summaryScore : {
56
+ value : number ;
57
+ } ;
58
+ } ;
59
+ INSULT : {
60
+ summaryScore : {
61
+ value : number ;
62
+ } ;
63
+ } ;
64
+ PROFANITY : {
65
+ summaryScore : {
66
+ value : number ;
67
+ } ;
68
+ } ;
69
+ SEXUALLY_EXPLICIT : {
70
+ summaryScore : {
71
+ value : number ;
72
+ } ;
73
+ } ;
74
+ FLIRTATION : {
75
+ summaryScore : {
76
+ value : number ;
77
+ } ;
78
+ } ;
79
+ } ;
80
+ } ;
81
+
26
82
class ModerationRuleHandler extends HasApplication implements ModerationRuleHandlerContract {
27
83
protected readonly computedRegexCache = new WeakMap <
28
84
Array < string | [ string , string ] > ,
@@ -35,8 +91,6 @@ class ModerationRuleHandler extends HasApplication implements ModerationRuleHand
35
91
@Inject ( "imageRecognitionService" )
36
92
private readonly imageRecognitionService ! : ImageRecognitionService ;
37
93
38
- public boot ( ) { }
39
-
40
94
@AcceptsMessageRuleScopes ( MessageRuleScope . Content )
41
95
public domain_filter ( context : ModerationRuleContext < "message" , { type : "domain_filter" } > ) {
42
96
const { message, rule } = context ;
@@ -930,6 +984,124 @@ class ModerationRuleHandler extends HasApplication implements ModerationRuleHand
930
984
} ;
931
985
}
932
986
987
+ @AcceptsMessageRuleScopes ( MessageRuleScope . Content )
988
+ public async ai_scan (
989
+ context : ModerationRuleContext < "message" , { type : "ai_scan" } >
990
+ ) : Promise < RuleExecResult > {
991
+ const { message, rule } = context ;
992
+ const invert = rule . mode === "invert" ;
993
+
994
+ if ( message . content && env . PERSPECTIVE_API_TOKEN ) {
995
+ const payload = {
996
+ comment : {
997
+ text : message . content
998
+ } ,
999
+ requestedAttributes : {
1000
+ TOXICITY : { } ,
1001
+ THREAT : { } ,
1002
+ SEVERE_TOXICITY : { } ,
1003
+ IDENTITY_ATTACK : { } ,
1004
+ INSULT : { } ,
1005
+ PROFANITY : { } ,
1006
+ SEXUALLY_EXPLICIT : { } ,
1007
+ FLIRTATION : { }
1008
+ } ,
1009
+ languages : [ "en" ]
1010
+ } ;
1011
+
1012
+ const result = await this . analyzeComment ( payload ) ;
1013
+
1014
+ if ( ! result ) {
1015
+ return {
1016
+ matched : false
1017
+ } ;
1018
+ }
1019
+
1020
+ const {
1021
+ TOXICITY : { summaryScore : toxicity } ,
1022
+ THREAT : { summaryScore : threat } ,
1023
+ SEVERE_TOXICITY : { summaryScore : severeToxicity } ,
1024
+ IDENTITY_ATTACK : { summaryScore : identityAttack } ,
1025
+ INSULT : { summaryScore : insult } ,
1026
+ PROFANITY : { summaryScore : profanity } ,
1027
+ SEXUALLY_EXPLICIT : { summaryScore : sexualExplicit } ,
1028
+ FLIRTATION : { summaryScore : flirtation }
1029
+ } = result . attributeScores ;
1030
+
1031
+ const {
1032
+ toxicity_threshold,
1033
+ threat_threshold,
1034
+ severe_toxicity_threshold,
1035
+ identity_attack_threshold,
1036
+ insult_threshold,
1037
+ profanity_threshold,
1038
+ sexual_explicit_threshold,
1039
+ flirtation_threshold
1040
+ } = rule ;
1041
+
1042
+ if (
1043
+ ( toxicity . value >= toxicity_threshold ||
1044
+ threat . value >= threat_threshold ||
1045
+ severeToxicity . value >= severe_toxicity_threshold ||
1046
+ identityAttack . value >= identity_attack_threshold ||
1047
+ insult . value >= insult_threshold ||
1048
+ profanity . value >= profanity_threshold ||
1049
+ sexualExplicit . value >= sexual_explicit_threshold ||
1050
+ flirtation . value >= flirtation_threshold ) === ! invert
1051
+ ) {
1052
+ let results = "" ;
1053
+
1054
+ results += `Toxicity: ${ Math . round ( toxicity . value * 100 ) } %\n` ;
1055
+ results += `Threat: ${ Math . round ( threat . value * 100 ) } %\n` ;
1056
+ results += `Severe Toxicity: ${ Math . round ( severeToxicity . value * 100 ) } %\n` ;
1057
+ results += `Identity Attack: ${ Math . round ( identityAttack . value * 100 ) } %\n` ;
1058
+ results += `Insult: ${ Math . round ( insult . value * 100 ) } %\n` ;
1059
+ results += `Profanity: ${ Math . round ( profanity . value * 100 ) } %\n` ;
1060
+ results += `Sexually Explicit: ${ Math . round ( sexualExplicit . value * 100 ) } %\n` ;
1061
+ results += `Flirtation: ${ Math . round ( flirtation . value * 100 ) } %\n` ;
1062
+
1063
+ return {
1064
+ matched : true ,
1065
+ reason : "Message possibly contains inappropriate content." ,
1066
+ fields : [
1067
+ {
1068
+ name : "Scan Results" ,
1069
+ value : results
1070
+ }
1071
+ ]
1072
+ } ;
1073
+ }
1074
+ }
1075
+
1076
+ return {
1077
+ matched : invert
1078
+ } ;
1079
+ }
1080
+
1081
+ private async analyzeComment ( payload : unknown ) {
1082
+ if ( ! env . PERSPECTIVE_API_TOKEN ) {
1083
+ return null ;
1084
+ }
1085
+
1086
+ try {
1087
+ const url =
1088
+ "https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze?key=" +
1089
+ encodeURIComponent ( env . PERSPECTIVE_API_TOKEN ) ;
1090
+ const response = await undici . request ( url , {
1091
+ method : "POST" ,
1092
+ body : JSON . stringify ( payload ) ,
1093
+ headers : {
1094
+ "Content-Type" : "application/json"
1095
+ }
1096
+ } ) ;
1097
+
1098
+ return response . body . json ( ) as Promise < GoogleResponse > ;
1099
+ } catch ( error ) {
1100
+ this . application . logger . error ( error ) ;
1101
+ return null ;
1102
+ }
1103
+ }
1104
+
933
1105
@AcceptsMessageRuleScopes ( MessageRuleScope . Content )
934
1106
public async word_filter (
935
1107
context : ModerationRuleContext < "message" , { type : "word_filter" } >
0 commit comments