-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathphishing.js
178 lines (148 loc) · 5.95 KB
/
phishing.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
export class PhishingDetector {
constructor() {
// No need to check for ai.languageModel here
// We'll handle capability checks when we use the model
}
async calculateScore(emailContent) {
try {
// Log emailContent for debugging
console.log('Email content received:', emailContent);
const features = this.analyzeEmailFeatures(emailContent);
// Check if the language model is available
const capabilities = await ai.languageModel.capabilities();
if (capabilities.available !== "readily") {
console.warn('Language model is not readily available');
if (capabilities.consent === "granted") {
console.log('Consent already granted.');
} else if (capabilities.consent === "requirable") {
// Request user consent
const consentResult = await ai.languageModel.requestConsent();
if (!consentResult) {
console.warn('User did not grant consent');
return 50; // Return a default score or handle accordingly
} else {
console.log('User granted consent.');
}
} else {
console.warn('Consent is not available.');
return 50; // Return a default score or handle accordingly
}
}
// Generate the prompt
const prompt = `
Instructions:
- Considering the above information, assign a phishing likelihood score and provide a detailed explanation.
- Use English and Arabic numerals.
- Return the result in the format "The score is the following: X", where X is a number between 0 and 100.
Analyze this email for phishing indicators and provide a response in the following format:
The score is the following: X
The explanation is: [Your detailed analysis here]
End of explanation.
Consider these factors:
- Urgency or pressure tactics
- Grammar and spelling errors
- Suspicious links or attachments
- Requests for sensitive information
- Mismatched or fake sender addresses
- Time sent (suspicious if outside business hours)
- Sender legitimacy and domain analysis
Email details:
From: ${emailContent.sender}
Sent: ${emailContent.timestamp}
Subject: ${emailContent.subject}
Body:
${emailContent.body.mainText}
Detected suspicious features:
${features.join('\n')}
`.trim();
// Log the generated prompt
console.log('Generated prompt:', prompt);
// Create the language model session
const session = await ai.languageModel.create();
// Generate the assistant's response
const response = await session.prompt(prompt);
// Log the assistant's response
console.log(`Assistant response from Prompt: ${response}`);
// After receiving the assistant's response
const assistantMessage = response.trim();
console.log(`Assistant response Trimmed: ${assistantMessage}`);
// Extract the score from the assistant's response
const scoreMatch = assistantMessage.match(/The score is the following:\s*(\*\*|\*)?(\d+)(\*\*|\*)?/);
if (scoreMatch) {
const score = parseInt(scoreMatch[2], 10);
if (!isNaN(score) && score >= 0 && score <= 100) {
// Modified regex to capture everything after "The explanation is:"
const explanationMatch = assistantMessage.match(/The explanation is:([\s\S]*$)/);
const explanation = explanationMatch ? explanationMatch[1].trim() : 'No explanation provided.';
return {
score: score,
explanation: explanation
};
}
} else {
console.error("Score not found in assistant's response.");
return 50; // Default score if parsing fails
}
} catch (error) {
console.error(`Error calculating phishing score: ${error}`);
return 50; // Default score on error
}
}
analyzeEmailFeatures(emailContent) {
const suspiciousFeatures = [];
const body = emailContent.body?.mainText || emailContent.body || '';
const subject = emailContent.subject || '';
const sender = emailContent.sender || '';
const bodyLower = body.toLowerCase();
const urgencyPhrases = [
'urgent', 'immediate action', 'act now', 'immediate attention',
'expires soon', 'deadline', 'urgent action required'
];
if (urgencyPhrases.some(phrase => bodyLower.includes(phrase))) {
suspiciousFeatures.push('Urgency tactics detected');
}
const sensitiveTerms = [
'bank', 'account', 'password', 'verify', 'login',
'social security', 'credit card', 'validate', 'confirm identity'
];
if (new RegExp(sensitiveTerms.join('|'), 'gi').test(body)) {
suspiciousFeatures.push('Requests for sensitive information');
}
const linkPattern = /http[s]?:\/\/(?![a-z]+\.(google|microsoft|apple|amazon)\.com)[^\s]+/gi;
const links = body.match(linkPattern);
if (links) {
suspiciousFeatures.push(`Suspicious links detected: ${links.length} found`);
}
const suspiciousSubjectPhrases = [
'account suspended', 'verify your account', 'unusual activity',
'security alert', 'unauthorized access'
];
if (suspiciousSubjectPhrases.some(phrase => subject.toLowerCase().includes(phrase))) {
suspiciousFeatures.push('Suspicious subject line detected');
}
if (sender) {
const domainMatch = sender.match(/@([^.]+\.[^.]+)$/);
if (domainMatch) {
const domain = domainMatch[1].toLowerCase();
if (!domain.endsWith('.com')) {
suspiciousFeatures.push('Potential domain spoofing detected');
}
}
}
if (emailContent.timestamp) {
const emailDate = new Date(emailContent.timestamp);
const hour = emailDate.getHours();
if (hour < 6 || hour > 18) {
suspiciousFeatures.push('Sent outside typical business hours');
}
}
return suspiciousFeatures;
}
getScoreCategory(score) {
if (score <= 20) return 'Very Low Risk';
if (score <= 40) return 'Low Risk';
if (score <= 60) return 'Medium Risk';
if (score <= 80) return 'High Risk';
return 'Very High Risk';
}
}