Event JSON
{
"id": "a622f6773432433b2f0d5157d7910bbb6b0a7f75d3bfbc95d9faff407a7c23f5",
"pubkey": "b35793fdb8b9aa17305537cb7c090b8d88ae1fb785f7ffc9992df465b84cceab",
"created_at": 1709936371,
"kind": 1,
"tags": [
[
"p",
"9b1e0c6d0dbb1f3de564baab967fbd107b7fdd372f05518fcc76f9f829c42849",
"wss://relay.mostr.pub"
],
[
"p",
"588da04a36777f3e73257ee2e3f8624cbb4520ea6bcde9951ad106dd0941bbe4",
"wss://relay.mostr.pub"
],
[
"proxy",
"https://uwu.social/users/k1tteh/statuses/112062390066675677",
"activitypub"
]
],
"content": "nostr:npub1nv0qcmgdhv0nmetyh24evlaazpahlhfh9uz4rr7vwmuls2wy9pys7hskkw they're breaking chatbots with ascii art now https://www.tomshardware.com/tech-industry/artificial-intelligence/researchers-jailbreak-ai-chatbots-with-ascii-art-artprompt-bypasses-safety-measures-to-unlock-malicious-queries",
"sig": "946c8b943bdf258a863dc0a1793c7772eda943f7afacf986c4c26b268347dd95d6e6b3ddf1d655b46de45f6290cbe0bbc4a26a73fffa94204d8c85ab07907bd8"
}