mirror of
https://github.com/mouna23/OSINT-with-LLM.git
synced 2026-02-12 20:52:45 +00:00
71 lines
1.8 KiB
Python
71 lines
1.8 KiB
Python
import requests
|
|
|
|
def ask_llm(prompt, model="mistral"):
|
|
print("[LLM] Sending prompt...")
|
|
try:
|
|
res = requests.post("http://localhost:11434/api/generate", json={
|
|
"model": model,
|
|
"prompt": prompt,
|
|
"stream": False
|
|
})
|
|
if res.status_code != 200:
|
|
print(f"[LLM] Error: {res.status_code} - {res.text}")
|
|
return "[LLM Error]"
|
|
output = res.json().get("response", "[No response]")
|
|
#print("\n[LLM Response]:\n", output)
|
|
return output.strip()
|
|
except Exception as e:
|
|
print(f"[LLM] Exception: {e}")
|
|
return "[LLM Exception]"
|
|
|
|
#####################################
|
|
#summarize with LLM if it is domain
|
|
#####################################
|
|
def summarize_domain(raw_data):
|
|
prompt = f"""
|
|
You are an OSINT analyst.
|
|
Analyze this domain data and summarize key security findings:
|
|
- WHOIS or registrant issues
|
|
- Subdomain risks
|
|
- Is it malicious or not based on virustotal result
|
|
- Action recommendations
|
|
|
|
DATA:
|
|
{raw_data}
|
|
"""
|
|
return ask_llm(prompt)
|
|
|
|
#####################################
|
|
#summarize with LLM if it is email
|
|
#####################################
|
|
def summarize_email(raw_data):
|
|
prompt = f"""
|
|
You are a breach analyst.
|
|
Summarize this email breach data:
|
|
- Sources of exposure
|
|
- Likely leaked data types
|
|
- Risk level
|
|
- Remediation advice
|
|
|
|
DATA:
|
|
{raw_data}
|
|
"""
|
|
return ask_llm(prompt)
|
|
|
|
#####################################
|
|
#summarize with LLM if it is ip
|
|
#####################################
|
|
def summarize_ip(raw_data):
|
|
prompt = f"""
|
|
You are a SOC (Security Operations Center) analyst.
|
|
Summarize this IP intelligence report:
|
|
- Whether the IP is malicious
|
|
- Number of abuse reports
|
|
- Type of malicious activity
|
|
- Action recommendations
|
|
|
|
DATA:
|
|
{raw_data}
|
|
"""
|
|
return ask_llm(prompt)
|