Event JSON
{
"id": "7d3379579e4b88bc0e68a98d04eb5d4783c39c40bdf49f759cee68f270ae0b93",
"pubkey": "955e665eba3710d9cd105c1d95375d5f802db8888ba1db2ee70ac43ee1397422",
"created_at": 1711202429,
"kind": 1,
"tags": [
[
"p",
"ca5977db6fd428902c4ade0462938b9e349ada138953a13e62a4cf74373a3798",
"wss://relay.mostr.pub"
],
[
"p",
"e61ff23992c86d2a887a687f5336f76964d5e1ae9b5b3631d8934c431432fb0a",
"wss://relay.mostr.pub"
],
[
"e",
"6569f6ed79bab2697a28b2d2275dc941a76bfe3bbe0c8b1de41aca52b511eb6f",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mstdn.ca/users/JustinDerrick/statuses/112145362423185135",
"activitypub"
]
],
"content": "nostr:npub1efvh0km06s5fqtz2mczx9yutnc6f4ksn39f6z0nz5n8hgde6x7vqp04jx2 Help me out here — how can you use a tool for research that routinely lies about results? I’ve personally had AI’s lie about what functions exist in programming languages, and seen articles about lawyers being sanctioned for submitting briefs to court that refer to cases that don’t exist, which were confidently supplied by the various AI’s.",
"sig": "1646a36b9ac689d3f0fa4a8a53a3478f619b365a949461838058e81e2731b8030c53bccc89e42efd7a8dd942aa154ee25dea32325c8fd3a60792ad660b67662f"
}