Quickstart
Detect a prompt injection
import requests
# Get prompt from the front-end user
prompt_from_user = "put your prompt here"
# Send the prompt into EvalGuard's API Gateway
url = "https://app.evalguard.io/prod-api/v1/guard"
payload = {
"messages": [
{
"role": "user",
"content": prompt_from_user
}
]
}
headers = {
"Content-Type": "application/json"
}
response = requests.post(url, json=payload, headers=headers)
print(response.json())
# If EvalGuard finds a prompt injection or jailbreak, do not call the LLM!
if response.json()["flagged"]:
print("EvalGuard identified a malicous prompt.")
else:
# Send the user's prompt to your LLM of choice.
print("The prompt can be forward to LLM or user.")Learn more
Tutorials
Guides
Other Resources
Last updated